summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt180
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cci.txt32
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt17
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt275
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt394
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt20
-rw-r--r--Documentation/devicetree/bindings/usb/msm-ssusb.txt2
-rw-r--r--Documentation/filesystems/f2fs.txt18
-rw-r--r--Makefile2
-rw-r--r--android/configs/README15
-rw-r--r--android/configs/android-base-arm64.cfg5
-rw-r--r--android/configs/android-base.cfg164
-rw-r--r--android/configs/android-recommended.cfg140
-rw-r--r--arch/alpha/include/asm/types.h2
-rw-r--r--arch/alpha/include/uapi/asm/types.h12
-rw-r--r--arch/arc/kernel/entry.S6
-rw-r--r--arch/arc/mm/tlb.c3
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v3.dtsi65
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi37
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts13
-rw-r--r--arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-qrd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts65
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm64/configs/msm-auto-gvm-perf_defconfig2
-rw-r--r--arch/arm64/configs/msm-auto-gvm_defconfig2
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig4
-rw-r--r--arch/arm64/configs/msm-auto_defconfig24
-rw-r--r--arch/arm64/configs/msm-perf_defconfig4
-rw-r--r--arch/arm64/configs/msm_defconfig4
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c18
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/mips/math-emu/dp_fmax.c84
-rw-r--r--arch/mips/math-emu/dp_fmin.c86
-rw-r--r--arch/mips/math-emu/sp_fmax.c84
-rw-r--r--arch/mips/math-emu/sp_fmin.c86
-rw-r--r--arch/powerpc/kernel/align.c119
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-core.c39
-rw-r--r--block/blk-integrity.c4
-rw-r--r--block/blk-sysfs.c6
-rw-r--r--block/genhd.c6
-rw-r--r--crypto/algif_skcipher.c13
-rw-r--r--drivers/android/Kconfig10
-rw-r--r--drivers/android/Makefile1
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c425
-rw-r--r--drivers/android/binder_alloc.h33
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h55
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/skd_main.c21
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/diag/diag_debugfs.c7
-rw-r--r--drivers/char/diag/diag_masks.c128
-rw-r--r--drivers/char/diag/diag_memorydevice.c13
-rw-r--r--drivers/char/diag/diagfwd.c3
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c124
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h2
-rw-r--r--drivers/clk/msm/clock-dummy.c9
-rw-r--r--drivers/clk/msm/mdss/mdss-pll.h1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c7
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c8
-rw-r--r--drivers/crypto/msm/ice.c14
-rw-r--r--drivers/crypto/msm/qcedev.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c71
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_counters.c406
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c74
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c36
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h23
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c117
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c12
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.c112
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c41
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c22
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp_1x.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c25
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c5
-rw-r--r--drivers/gpu/msm/kgsl.c18
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c8
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/iio/adc/qcom-rradc.c53
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/st/fts.c28
-rw-r--r--drivers/iommu/arm-smmu.c15
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-qpnp-flash.c2683
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c15
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.c50
-rw-r--r--drivers/md/bcache/writeback.c20
-rw-r--r--drivers/md/bcache/writeback.h21
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c10
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/platform/msm/ais/msm.c11
-rw-r--r--drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c83
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/Makefile1
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c573
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h57
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c44
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c83
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c9
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c30
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c39
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h1
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_helper.h2
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c4
-rw-r--r--drivers/misc/hdcp.c47
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_aac.c6
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_multi_aac.c8
-rw-r--r--drivers/misc/qseecom.c40
-rw-r--r--drivers/mmc/card/block.c10
-rw-r--r--drivers/mmc/core/core.c24
-rw-r--r--drivers/mmc/core/sd.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h35
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.c47
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.h2
-rw-r--r--drivers/net/wireless/cnss/Kconfig2
-rw-r--r--drivers/net/wireless/cnss2/Makefile1
-rw-r--r--drivers/net/wireless/cnss2/debug.c1
-rw-r--r--drivers/net/wireless/cnss2/main.c73
-rw-r--r--drivers/net/wireless/cnss2/main.h11
-rw-r--r--drivers/net/wireless/cnss2/pci.c44
-rw-r--r--drivers/net/wireless/cnss2/pci.h2
-rw-r--r--drivers/net/wireless/cnss2/utils.c129
-rw-r--r--drivers/net/wireless/cnss_genl/cnss_nl.c2
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/platform/msm/gpio-usbdetect.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c28
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c53
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c61
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c11
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c152
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h184
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c87
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c528
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c153
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c35
-rw-r--r--drivers/power/supply/qcom/Kconfig18
-rw-r--r--drivers/power/supply/qcom/Makefile2
-rw-r--r--drivers/power/supply/qcom/battery.c10
-rw-r--r--drivers/power/supply/qcom/fg-core.h3
-rw-r--r--drivers/power/supply/qcom/fg-util.c29
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c135
-rw-r--r--drivers/power/supply/qcom/qpnp-fg.c7051
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c18
-rw-r--r--drivers/power/supply/qcom/qpnp-smbcharger.c8472
-rw-r--r--drivers/power/supply/qcom/smb-lib.c178
-rw-r--r--drivers/power/supply/qcom/smb-lib.h9
-rw-r--r--drivers/power/supply/qcom/smb-reg.h1
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c14
-rw-r--r--drivers/pwm/pwm-qpnp.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c33
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h17
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c7
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c16
-rw-r--r--drivers/scsi/isci/remote_node_context.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/sg.c233
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom-debugfs.c12
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c41
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/scsi/ufs/ufshcd.c58
-rw-r--r--drivers/scsi/ufs/ufshcd.h4
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/icnss.c29
-rw-r--r--drivers/soc/qcom/ipc_router_glink_xprt.c2
-rw-r--r--drivers/soc/qcom/peripheral-loader.c27
-rw-r--r--drivers/soc/qcom/peripheral-loader.h6
-rw-r--r--drivers/soc/qcom/pil-msa.c7
-rw-r--r--drivers/soc/qcom/pil-msa.h1
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c13
-rw-r--r--drivers/soc/qcom/qdsp6v2/Makefile2
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_vm.c1270
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c838
-rw-r--r--drivers/soc/qcom/spcom.c16
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c2
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c19
-rw-r--r--drivers/soundwire/swr-wcd-ctrl.c24
-rwxr-xr-xdrivers/soundwire/swr-wcd-ctrl.h1
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger.c2
-rw-r--r--drivers/staging/android/ion/ion_cma_secure_heap.c11
-rw-r--r--drivers/staging/android/ion/ion_page_pool.c2
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c120
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/usb/core/devio.c4
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c74
-rw-r--r--drivers/usb/gadget/function/f_ccid.c106
-rw-r--r--drivers/usb/gadget/function/f_gsi.c235
-rw-r--r--drivers/usb/gadget/function/f_qdss.c13
-rw-r--r--drivers/usb/host/pci-quirks.c35
-rw-r--r--drivers/usb/host/xhci-plat.c30
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/pd/policy_engine.c603
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c121
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h8
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c51
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c54
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c144
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h10
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator.c4
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile1
-rw-r--r--fs/block_dev.c25
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c24
-rw-r--r--fs/ceph/cache.c12
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/crypto/Kconfig18
-rw-r--r--fs/crypto/Makefile3
-rw-r--r--fs/crypto/crypto.c568
-rw-r--r--fs/crypto/fname.c414
-rw-r--r--fs/crypto/keyinfo.c333
-rw-r--r--fs/crypto/policy.c250
-rw-r--r--fs/dlm/user.c4
-rw-r--r--fs/eventpoll.c37
-rw-r--r--fs/ext4/crypto.c6
-rw-r--r--fs/ext4/ext4.h5
-rw-r--r--fs/ext4/extents.c12
-rw-r--r--fs/ext4/inode.c15
-rw-r--r--fs/ext4/super.c38
-rw-r--r--fs/f2fs/Kconfig21
-rw-r--r--fs/f2fs/Makefile2
-rw-r--r--fs/f2fs/acl.c23
-rw-r--r--fs/f2fs/acl.h3
-rw-r--r--fs/f2fs/checkpoint.c547
-rw-r--r--fs/f2fs/crypto.c491
-rw-r--r--fs/f2fs/crypto_fname.c440
-rw-r--r--fs/f2fs/data.c1330
-rw-r--r--fs/f2fs/debug.c88
-rw-r--r--fs/f2fs/dir.c491
-rw-r--r--fs/f2fs/extent_cache.c315
-rw-r--r--fs/f2fs/f2fs.h1112
-rw-r--r--fs/f2fs/file.c1331
-rw-r--r--fs/f2fs/gc.c382
-rw-r--r--fs/f2fs/gc.h8
-rw-r--r--fs/f2fs/hash.c7
-rw-r--r--fs/f2fs/inline.c280
-rw-r--r--fs/f2fs/inode.c199
-rw-r--r--fs/f2fs/namei.c411
-rw-r--r--fs/f2fs/node.c927
-rw-r--r--fs/f2fs/node.h134
-rw-r--r--fs/f2fs/recovery.c258
-rw-r--r--fs/f2fs/segment.c907
-rw-r--r--fs/f2fs/segment.h65
-rw-r--r--fs/f2fs/shrinker.c14
-rw-r--r--fs/f2fs/super.c991
-rw-r--r--fs/f2fs/trace.c6
-rw-r--r--fs/f2fs/xattr.c71
-rw-r--r--fs/f2fs/xattr.h3
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/pagelist.c26
-rw-r--r--fs/nfs/pnfs.c2
-rw-r--r--fs/nfsd/nfs4state.c10
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/sdcardfs/derived_perm.c3
-rw-r--r--fs/sdcardfs/inode.c12
-rw-r--r--fs/sdcardfs/main.c7
-rw-r--r--fs/sdcardfs/sdcardfs.h1
-rw-r--r--fs/sdcardfs/super.c2
-rw-r--r--fs/super.c2
-rw-r--r--fs/xfs/xfs_linux.h9
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/backing-dev.h10
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/f2fs_fs.h54
-rw-r--r--include/linux/fs.h9
-rw-r--r--include/linux/fscrypto.h411
-rw-r--r--include/linux/lightnvm.h1
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/qdsp6v2/apr.h6
-rw-r--r--include/linux/qdsp6v2/aprv2_vm.h116
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/stacktrace.h2
-rw-r--r--include/linux/tty_flip.h3
-rw-r--r--include/linux/workqueue.h2
-rw-r--r--include/media/msm_cam_sensor.h14
-rw-r--r--include/net/cnss2.h6
-rw-r--r--include/net/cnss_nl.h4
-rw-r--r--include/net/inet_frag.h41
-rw-r--r--include/net/ip6_fib.h32
-rw-r--r--include/sound/apr_audio-v2.h67
-rw-r--r--include/sound/q6core.h6
-rw-r--r--include/trace/events/f2fs.h85
-rw-r--r--include/uapi/linux/fs.h18
-rw-r--r--include/uapi/linux/msm_ipa.h111
-rw-r--r--include/uapi/linux/msm_mdp_ext.h22
-rw-r--r--include/uapi/linux/rmnet_ipa_fd_ioctl.h80
-rw-r--r--include/uapi/linux/v4l2-controls.h7
-rw-r--r--include/uapi/linux/videodev2.h7
-rw-r--r--include/uapi/media/msm_cam_sensor.h15
-rw-r--r--include/uapi/media/msm_camera.h2
-rw-r--r--include/uapi/media/msm_camsensor_sdk.h9
-rw-r--r--include/uapi/media/msmb_camera.h1
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/cpuset.c1
-rw-r--r--kernel/fork.c53
-rw-r--r--kernel/gcov/base.c6
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/locking/locktorture.c4
-rw-r--r--kernel/locking/osq_lock.c29
-rw-r--r--kernel/rcu/tree.c44
-rw-r--r--kernel/rcu/tree_plugin.h14
-rw-r--r--kernel/sched/core.c17
-rw-r--r--kernel/sched/cpufreq_sched.c14
-rw-r--r--kernel/sched/cpufreq_schedutil.c62
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c44
-rw-r--r--kernel/sched/sched.h19
-rw-r--r--kernel/sched/tune.c1
-rw-r--r--kernel/sched/walt.c8
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_selftest.c2
-rw-r--r--mm/backing-dev.c34
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_owner.c2
-rw-r--r--net/bluetooth/l2cap_core.c80
-rw-r--r--net/ieee802154/6lowpan/reassembly.c11
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/ip_fragment.c12
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_fib.c56
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c12
-rw-r--r--net/ipv6/output_core.c6
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c17
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/netfilter/xt_qtaguid.c20
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/wireless/db.txt34
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--sound/core/info.c5
-rw-r--r--sound/isa/msnd/msnd_midi.c30
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c23
-rw-r--r--sound/pci/au88x0/au88x0_core.c14
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c4
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c32
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c3
-rw-r--r--sound/soc/codecs/wcd9335.c26
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c28
-rw-r--r--sound/soc/msm/Kconfig29
-rw-r--r--sound/soc/msm/Makefile6
-rw-r--r--sound/soc/msm/apq8096-auto.c1792
-rw-r--r--sound/soc/msm/msm-dai-fe.c48
-rw-r--r--sound/soc/msm/qdsp6v2/Makefile7
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c9
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c7
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c329
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c235
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h6
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c61
-rw-r--r--sound/soc/msm/qdsp6v2/q6core.c136
-rw-r--r--sound/soc/msm/qdsp6v2/rtac.c32
-rw-r--r--sound/usb/mixer.c1
476 files changed, 41240 insertions, 7403 deletions
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
new file mode 100644
index 000000000000..ed1ddf597016
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
@@ -0,0 +1,180 @@
+Qualcomm Technologies PNP Flash LED
+
+QPNP (Qualcomm Technologies Plug N Play) Flash LED (Light
+Emitting Diode) driver is used to provide illumination to
+camera sensor when background light is dim to capture good
+picture. It can also be used for flashlight/torch application.
+It is part of PMIC on Qualcomm Technologies reference platforms.
+The PMIC is connected to the host processor via SPMI bus.
+
+Required properties:
+- compatible : should be "qcom,qpnp-flash-led"
+- reg : base address and size for flash LED modules
+
+Optional properties:
+- qcom,headroom : headroom to use. Values should be 250, 300,
+ 400 and 500 in mV.
+- qcom,startup-dly : delay before flashing after flash executed.
+ Values should 10, 32, 64, and 128 in us.
+- qcom,clamp-curr : current to clamp at when voltage droop happens.
+ Values are in integer from 0 to 1000 inclusive,
+ indicating 0 to 1000 mA.
+- qcom,self-check-enabled : boolean type. self fault check enablement
+- qcom,thermal-derate-enabled : boolean type. derate enablement when module
+ temperature reaches threshold
+- qcom,thermal-derate-threshold : thermal threshold for derate. Values
+ should be 95, 105, 115, 125 in C.
+- qcom,thermal-derate-rate : derate rate when module temperature
+ reaches threshold. Values should be
+ "1_PERCENT", "1P25_PERCENT", "2_PERCENT",
+ "2P5_PERCENT", "5_PERCENT" in string.
+- qcom,current-ramp-enabled : boolean type. stepped current ramp enablement
+- qcom,ramp-up-step : current ramp up rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,ramp-dn-step : current ramp down rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,vph-pwr-droop-enabled : boolean type. VPH power droop enablement. Enablement
+ allows current clamp when phone power drops below
+ pre-determined threshold
+- qcom,vph-pwr-droop-threshold : VPH power threshold for module to clamp current.
+ Values are 2500 - 3200 in mV with 100 mV steps.
+- qcom,vph-pwr-droop-debounce-time : debounce time for module to confirm a voltage
+ droop is happening. Values are 0, 10, 32, 64
+ in us.
+- qcom,pmic-charger-support : Boolean type. This tells if flash utilizes charger boost
+ support
+- qcom,headroom-sense-ch0-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 0
+- qcom,headroom-sense-ch1-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 1
+- qcom,power-detect-enabled : Boolean type. This enables driver to get maximum flash LED
+ current at current battery level to avoid intensity clamp
+ when battery voltage is low
+- qcom,otst2-moduled-enabled : Boolean type. This enables driver to enable MASK to support
+ OTST2 connection.
+- qcom,follow-otst2-rb-disabled : Boolean type. This allows driver to reset/deset module.
+ By default, driver resets module. This entry allows driver to
+ bypass reset module sequence.
+- qcom,die-current-derate-enabled: Boolean type. This enables driver to get maximum flash LED
+ current, based on PMIC die temperature threshold to
+ avoid significant current derate from hardware. This property
+ is not needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-vadc : VADC channel source for flash LED. This property is not
+ needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-threshold : Integer type array for PMIC die temperature threshold.
+ Array should have at least one value. Values should be in
+ celcius. This property is not needed if PMIC is older than
+ PMI8994v2.0.
+- qcom,die-temp-derate-current : Integer type arrray for PMIC die temperature derate
+ current. Array should have at least one value. Values
+ should be in mA. This property is not needed if PMIC is older
+ than PMI8994v2.0.
+
+Required properties inside child node. Chile node contains settings for each individual LED.
+Each LED hardware needs a node for itself and a switch node to control brightness.
+For the purpose of turning on/off LED and better regulator control, "led:switch" node
+is introduced. "led:switch" acquires several existing properties from other nodes for
+operational simplification. For backward compatibility purpose, switch node can be optional:
+- label : type of led that will be used, either "flash" or "torch".
+- qcom,led-name : name of the LED. Accepted values are "led:flash_0",
+ "led:flash_1", "led:torch_0", "led:torch_1"
+- qcom,default-led-trigger : trigger for the camera flash and torch. Accepted values are
+ "flash0_trigger", "flash1_trigger", "torch0_trigger", torch1_trigger"
+- qcom,id : enumerated ID for each physical LED. Accepted values are "0",
+ "1", etc..
+- qcom,max-current : maximum current allowed on this LED. Valid values should be
+ integer from 0 to 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,pmic-revid : PMIC revision id source. This property is needed for PMI8996
+ revision check.
+
+Optional properties inside child node:
+- qcom,current : default current intensity for LED. Accepted values should be
+ integer from 0 t 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,duration : Duration for flash LED. When duration time expires, hardware will turn off
+ flash LED. Values should be from 10 ms to 1280 ms with 10 ms incremental
+ step. Not applicable to torch. It is required for LED:SWITCH node to handle
+ LED used as flash.
+- reg<n> : reg<n> (<n> represents number. eg 0,1,2,..) property is to add support for
+ multiple power sources. It includes two properties regulator-name and max-voltage.
+ Required property inside regulator node:
+ - regulator-name : This denotes this node is a regulator node and which
+ regulator to use.
+ Optional property inside regulator node:
+ - max-voltage : This specifies max voltage of regulator. Some switch
+ or boost regulator does not need this property.
+
+Example:
+ qcom,leds@d300 {
+ compatible = "qcom,qpnp-flash-led";
+ status = "okay";
+ reg = <0xd300 0x100>;
+ label = "flash";
+ qcom,headroom = <500>;
+ qcom,startup-dly = <128>;
+ qcom,clamp-curr = <200>;
+ qcom,pmic-charger-support;
+ qcom,self-check-enabled;
+ qcom,thermal-derate-enabled;
+ qcom,thermal-derate-threshold = <80>;
+ qcom,thermal-derate-rate = "4_PERCENT";
+ qcom,current-ramp-enabled;
+ qcom,ramp_up_step = "27US";
+ qcom,ramp_dn_step = "27US";
+ qcom,vph-pwr-droop-enabled;
+ qcom,vph-pwr-droop-threshold = <3200>;
+ qcom,vph-pwr-droop-debounce-time = <10>;
+ qcom,headroom-sense-ch0-enabled;
+ qcom,headroom-sense-ch1-enabled;
+ qcom,die-current-derate-enabled;
+ qcom,die-temp-vadc = <&pmi8994_vadc>;
+ qcom,die-temp-threshold = <85 80 75 70 65>;
+ qcom,die-temp-derate-current = <400 800 1200 1600 2000>;
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ pm8226_flash0: qcom,flash_0 {
+ label = "flash";
+ qcom,led-name = "led:flash_0";
+ qcom,default-led-trigger =
+ "flash0_trigger";
+ qcom,max-current = <1000>;
+ qcom,id = <0>;
+ qcom,duration = <1280>;
+ qcom,current = <625>;
+ };
+
+ pm8226_torch: qcom,torch_0 {
+ label = "torch";
+ qcom,led-name = "led:torch_0";
+ qcom,default-led-trigger =
+ "torch0_trigger";
+ boost-supply = <&pm8226_chg_boost>;
+ qcom,max-current = <200>;
+ qcom,id = <0>;
+ qcom,current = <120>;
+ qcom,max-current = <200>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+
+ pm8226_switch: qcom,switch {
+ lable = "switch";
+ qcom,led-name = "led:switch";
+ qcom,default-led-trigger =
+ "switch_trigger";
+ qcom,id = <2>;
+ qcom,current = <625>;
+ qcom,duration = <1280>;
+ qcom,max-current = <1000>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index bb413af4b54d..c5c82a89f662 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -205,6 +205,31 @@ Optional properties:
(in the same order).
- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+* Qualcomm Technologies, Inc. MSM LASER LED
+
+Required properties:
+- cell-index : should contain unique identifier to differentiate
+ between multiple laser led modules
+- reg : should contain i2c slave address of the laser led and length of
+ data field which is 0x0
+- compatible :
+ - "qcom,laser-led"
+- qcom,cci-master : should contain i2c master id to be used for this camera
+ sensor
+ - 0 -> MASTER 0
+ - 1 -> MASTER 1
+
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+ laser led
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in microvolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in microvolts
+ for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+ required from the regulators mentioned in the qcom,cam-vreg-name property
+ (in the same order).
+
* Qualcomm Technologies, Inc. MSM OIS
Required properties:
@@ -277,6 +302,13 @@ Example:
qcom,cam-vreg-op-mode = <100000>;
};
+ laserled0: qcom,laserled@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,laser-led";
+ qcom,cci-master = <1>;
+ };
+
qcom,camera@0 {
cell-index = <0>;
compatible = "qcom,camera";
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
index acc850773210..c1a8d1bd697d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
@@ -11,13 +11,24 @@ Required properties:
- compatible: "qcom,wcn3990-wifi";
- reg: Memory regions defined as starting address and size
- reg-names: Names of the memory regions defined in reg entry
+ - clocks: List of clock phandles
+ - clock-names: List of clock names corresponding to the "clocks" property
- interrupts: Copy engine interrupt table
+Optional properties:
+ - <supply-name>-supply: phandle to the regulator device tree node
+ optional "supply-name" is "vdd-0.8-cx-mx".
+ - qcom,<supply>-config: Specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV. There can
+ be optional load in uA and Regulator settle delay in
+ uS.
Example:
msm_ath10k_wlan: qcom,msm_ath10k_wlan@18800000 {
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_aggre2_noc_clk>;
+ clock-names = "smmu_aggre2_noc_clk";
interrupts =
<0 130 0 /* CE0 */ >,
<0 131 0 /* CE1 */ >,
@@ -31,4 +42,10 @@ Example:
<0 139 0 /* CE9 */ >,
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
new file mode 100644
index 000000000000..f6a7a1ba3005
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg.txt
@@ -0,0 +1,275 @@
+QTI's QPNP PMIC Fuel Gauge Device
+
+QPNP PMIC FG provides interface to clients to read properties related
+to the battery. Its main function is to retrieve the State of Charge (SOC),
+a 0-100 percentage representing the amount of charge left in the battery.
+
+There are two required peripherals in the FG driver, both implemented as
+subnodes in the example. These peripherals must not be disabled if the FG
+device is to enabled:
+
+- qcom,fg-soc : The main FG device. Supports battery fuel gauge controls and
+ sensors.
+- qcom,fg-batt : The FG battery device supports interrupts and controls with
+ respect to the state of the connected battery.For example: the
+ peripheral informs the driver if the battery has been identified
+ by the fuel gauge based on a given battery resistance range.
+
+Optionally ADC nodes can be added
+- qcom,revid-tp-rev: A subnode with a register address for the TP_REV register
+ in the REVID peripheral. This is used to apply workarounds that
+ may depend on the trim program.
+- qcom,fg-adc-vbat : A subnode with a register address for the FG_ADC_USR
+ peripheral which is used mainly for battery current limiting (BCL).
+ This node maps out the VBAT reading register which allows to have
+ a +/- 32 mV accurate reading of VBAT.
+- qcom,fg-adc-ibat : A subnode with a register address for the FG_ADC_USR
+ peripheral which is used mainly for battery current limiting (BCL).
+ This node maps out the IBAT current reading register which allows
+ to have a +/- 32 mA accurate reading of IBAT.
+
+Parent node required properties:
+- compatible : should be "qcom,qpnp-fg" for the FG driver.
+- qcom,pmic-revid : Should specify the phandle of PMIC
+ revid module. This is used to identify
+ the PMIC subtype.
+
+Parent node optional properties:
+- qcom,warm-bat-decidegc: Warm battery temperature in decidegC.
+- qcom,cool-bat-decidegc: Cool battery temperature in decidegC.
+- qcom,hot-bat-decidegc: Hot battery temperature in decidegC.
+- qcom,cold-bat-decidegc: Cold battery temperature in decidegC.
+- qcom,cold-hot-jeita-hysteresis: A tuple of 2. Index[0] is cold
+ hysteresis and index[1] is hot
+ hysterisis(in decidegC).
+- qcom,ext-sense-type: Current sense channel used by the FG.
+ Set this to use external rsense.
+- qcom,thermal-coefficients: Byte array of thermal coefficients for
+ reading battery thermistor. This should
+ be exactly 6 bytes in length.
+ Example: [01 02 03 04 05 06]
+- qcom,resume-soc: soc to resume charging in percentage.
+- qcom,resume-soc-raw: soc to resume charging in the scale of
+ [0-255]. This overrides qcom,resume-soc
+ if defined.
+- qcom,hold-soc-while-full: A boolean property that when defined
+ holds SOC at 100% when the battery is
+ full.
+- qcom,bcl-lm-threshold-ma: BCL LPM to MPM mode transition threshold
+ in milliAmpere.
+- qcom,bcl-mh-threshold-ma: BCL MPM to HPM mode transition threshold
+ in milliAmpere.
+- qcom,use-otp-profile: Specify this flag to avoid RAM loading
+ any battery profile.
+- qcom,sw-rbias-control: Boolean property which defines whether
+ the Rbias needs to be controlled by
+ software. If this is not set, it will
+ be controlled by hardware (default).
+- qcom,fg-iterm-ma: Battery current at which the fuel gauge
+ will try to scale 100% towards. When
+ the charge current goes above this, the
+ SoC should be at 100%.
+- qcom,fg-chg-iterm-ma: Battery current at which the fuel gauge
+ will issue end of charge if the charger
+ is configured to use the fuel gauge
+ ADCs for end of charge detection. This
+ property is in milliamps and should be
+ positive (e.g. 100mA to terminate at
+ -100mA).
+- qcom,irq-volt-empty-mv: The voltage threshold that the empty
+ soc interrupt will be triggered. When
+ the empty soc interrupt fires, battery
+ soc will be pulled to 0 and the
+ userspace will be notified via the
+ power supply framework. The userspace
+ will read 0% soc and immediately
+ shutdown.
+- qcom,fg-cutoff-voltage-mv: The voltage where the fuel gauge will
+ steer the SOC to be zero. For example,
+ if the cutoff voltage is set to 3400mv,
+ the fuel gauge will try to count SoC so
+ that the battery SoC will be 0 when it
+ is 3400mV.
+- qcom,fg-vbat-estimate-diff-mv: If the estimated voltage based on SoC
+ and battery current/resistance differs
+ from the actual voltage by more than
+ this amount, the fuel gauge will
+ redo the first SoC estimate when the
+ driver probes.
+- qcom,fg-delta-soc: How many percent the monotonic SoC must
+ change before a new delta_soc interrupt
+ is asserted. If this value is raised
+ above 3-4, some period workarounds may
+ not function well, so it's best to
+ leave this at 1 or 2%.
+- qcom,fg-vbatt-low-threshold: Voltage (in mV) which upon set will be
+ used for configuring the low battery
+ voltage threshold. Interrupt will be
+ asserted and handled based upon
+ this. If this property is not specified,
+ low battery voltage threshold will be
+ configured to 4200 mV.
+- qcom,cycle-counter-en: Boolean property which enables the cycle
+ counter feature. If this property is
+ present, then the following properties
+ to specify low and high soc thresholds
+ should be defined.
+- qcom,capacity-learning-on: A boolean property to have the fuel
+ gauge driver attempt to learn the
+ battery capacity when charging. Takes
+ precedence over capacity-estimation-on.
+- qcom,capacity-learning-feedback: A boolean property to have the fuel
+ gauge driver to feedback the learned
+ capacity into the capacity learning
+ algorithm. This has to be used only if
+ the property "qcom,capacity-learning-on"
+ is specified.
+- qcom,cl-max-increment-deciperc: The maximum percent that the capacity
+ can rise as the result of a single
+ charge cycle. This property corresponds
+ to .1% increments.
+- qcom,cl-max-decrement-deciperc: The maximum percent that the capacity
+ can fall as the result of a single
+ charge cycle. This property corresponds
+ to .1% decrements.
+- qcom,cl-max-temp-decidegc: Above this temperature, capacity
+ learning will be canceled.
+- qcom,cl-mix-temp-decidegc: Below this temperature, capacity
+ learning will be canceled.
+- qcom,cl-max-start-soc: The battery soc has to be below this
+ value at the start of a charge cycle
+ for capacity learning to be run.
+- qcom,cl-vbat-est-thr-uv: The maximum difference between the
+ battery voltage shadow and the current
+ predicted voltage in uV to initiate
+ capacity learning.
+- qcom,capacity-estimation-on: A boolean property to have the fuel
+ gauge driver attempt to estimate the
+ battery capacity using battery
+ resistance.
+- qcom,aging-eval-current-ma: Current used to evaluate battery aging.
+ This value should be around the steady
+ state current drawn from the battery
+ when the phone is low on battery.
+- qcom,fg-cc-cv-threshold-mv: Voltage threshold in mV for configuring
+ constant charge (CC) to constant
+ voltage (CV) setpoint in FG upon
+ which the battery EOC status will
+ be determined. This value should be
+ 10 mV less than the float voltage
+ configured in the charger.
+ This property should only be specified
+ if "qcom,autoadjust-vfloat" property is
+ specified in the charger driver to
+ ensure a proper operation.
+- qcom,bad-battery-detection-enable: A boolean property to enable the fuel
+ gauge driver to detect the damaged battery
+ when the safety-timer expires by using the
+ coulomb count.
+- qcom,fg-therm-delay-us: The time in microseconds to delay battery
+ thermistor biasing.
+- qcom,esr-pulse-tuning-en: A boolean property to enable ESR pulse
+ tuning feature. If this is enabled,
+ ESR pulse extraction will be disabled
+ when state of charge (SOC) is less than
+ 2%. It will be enabled back when SOC
+ gets above 2%. In addition, for SOC
+ between 2% and 5%, ESR pulse timing
+ settings will be different from default.
+ Once SOC crosses 5%, ESR pulse timings
+ will be restored back to default.
+
+qcom,fg-soc node required properties:
+- reg : offset and length of the PMIC peripheral register map.
+- interrupts : the interrupt mappings.
+ The format should be
+ <slave-id peripheral-id interrupt-number>.
+- interrupt-names : names for the mapped fg soc interrupts
+ The following interrupts are required:
+ 0: high-soc
+ 1: low-soc
+ 2: full-soc
+ 3: empty-soc
+ 4: delta-soc
+ 5: first-est-done
+ 6: sw-fallbk-ocv
+ 7: sw-fallbk-new-batt
+
+qcom,fg-memif node required properties:
+- reg : offset and length of the PMIC peripheral register map.
+- interrupts : the interrupt mappings.
+ The format should be
+ <slave-id peripheral-id interrupt-number>.
+- interrupt-names : names for the mapped fg adc interrupts
+ The following interrupts are required:
+ 0: mem-avail
+
+Example:
+pmi8994_fg: qcom,fg {
+ compatible = "qcom,qpnp-fg";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ status = "disabled";
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ qcom,fg-soc@4000 {
+ reg = <0x4000 0x100>;
+ interrupts = <0x2 0x40 0x0>,
+ <0x2 0x40 0x1>,
+ <0x2 0x40 0x2>,
+ <0x2 0x40 0x3>,
+ <0x2 0x40 0x4>,
+ <0x2 0x40 0x5>,
+ <0x2 0x40 0x6>,
+ <0x2 0x40 0x7>;
+
+ interrupt-names = "high-soc",
+ "low-soc",
+ "full-soc",
+ "empty-soc",
+ "delta-soc",
+ "first-est-done",
+ "sw-fallbk-ocv",
+ "sw-fallbk-new-batt";
+ };
+
+ qcom,fg-batt@4100 {
+ reg = <0x4100 0x100>;
+ interrupts = <0x2 0x41 0x0>,
+ <0x2 0x41 0x1>,
+ <0x2 0x41 0x2>,
+ <0x2 0x41 0x3>,
+ <0x2 0x41 0x4>,
+ <0x2 0x41 0x5>,
+ <0x2 0x41 0x6>,
+ <0x2 0x41 0x7>;
+
+ interrupt-names = "soft-cold",
+ "soft-hot",
+ "vbatt-low",
+ "batt-ided",
+ "batt-id-req",
+ "batt-unknown",
+ "batt-missing",
+ "batt-match";
+ };
+
+ qcom,fg-adc-vbat@4254 {
+ reg = <0x4254 0x1>;
+ };
+
+ qcom,fg-adc-ibat@4255 {
+ reg = <0x4255 0x1>;
+ };
+
+ qcom,fg-memif@4400 {
+ reg = <0x4400 0x100>;
+ interrupts = <0x2 0x44 0x0>,
+ <0x2 0x44 0x1>;
+
+ interrupt-names = "mem-avail",
+ "data-rcvry-sug";
+
+ qcom,cold-hot-jeita-hysteresis = <30 50>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt
new file mode 100644
index 000000000000..efd64cd90878
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smbcharger.txt
@@ -0,0 +1,394 @@
+QPNP SMB Battery Charger
+
+QPNP SMB Charger is a single-cell switching mode battery charger. It can charge
+the battery and power the system via the USB and AC adapter input.
+
+The QPNP SMB Charger interfaces via the SPMI bus.
+
+There are six different peripherals adding the following functionality.
+Each of these peripherals are implemented as subnodes in the example at the
+end of this file.
+
+- qcom,chgr: Supports charging control and status
+ reporting.
+- qcom,bat-if: Battery status reporting such as presence,
+ temperature reporting and voltage collapse
+ protection.
+- qcom,usb-chgpth: USB charge path detection and input current
+ limiting configuration.
+- qcom,dc-chgpth: DC charge path detection and input current
+ limiting configuration.
+- qcom,chg-misc: Miscellaneous features such as watchdog timers
+ and SYSOK pin control
+- qcom,chg-otg: OTG configuration control.
+
+Parent node required properties:
+- compatible: Must be "qcom,qpnp-smbcharger"
+- #address-cells: Must be <1>
+- #size-cells: Must be <1>
+- qcom,pmic-revid: Should specify the phandle of PMIC
+ revid module. This is used to identify
+ the PMIC subtype.
+
+
+
+Sub node required properties:
+- reg: The SPMI address for this peripheral
+- interrupts: Specifies the interrupt associated with the peripheral.
+- interrupt-names: Specifies the interrupt names for the peripheral. Every
+ available interrupt needs to have an associated name
+ with it to indentify its purpose.
+
+ The following lists each subnode and their corresponding
+ required interrupt names:
+
+ qcom,chgr:
+ - chg-tcc-thr: Triggers on charge completion.
+ - chg-taper-thr: Triggers on the taper charge
+ transtion.
+ - chg-inhibit: Notifies on battery voltage
+ being too high to resume
+ charging.
+ - chg-p2f-thr: Triggers on transitioning from
+ precharge to fastcharge.
+ - chg-rechg-thr: Triggers on battery voltage
+ falling below the resume
+ threshold.
+
+ qcom,bat-if:
+ - batt-hot: Triggers on battery temperature
+ hitting the hot threshold.
+ Charging stops.
+ - batt-warm: Triggers on battery temperature
+ hitting the warm threshold.
+ Charging current is reduced.
+ - batt-cool: Triggers on battery temperature
+ hitting the cool threshold.
+ Charging current is reduced
+ - batt-cold: Triggers on battery temperature
+ hitting the cold threshold.
+ Charging stops.
+ - batt-missing: Battery missing status
+ interrupt.
+ - batt-low: Triggers on battery voltage
+ falling across a low threshold.
+
+ qcom,usb-chgpth:
+ - usbin-uv: USB input voltage falls below a
+ valid threshold.
+ - usbin-src-det: USB automatic source detection
+ finishes.
+
+ qcom,dc-chgpth:
+ - dcin-uv: DC input voltage falls below a
+ valid threshold.
+
+ qcom,chgr-misc:
+ - wdog-timeout-mins: Charger watchdog timer
+ interrupt.
+ - temp-shutdown: Triggers when charger goes
+ overtemp and causes a shutdown.
+ - power-ok: Triggers when the charger
+ switcher turns on or off.
+
+Regulator Subnodes:
+- qcom,smbcharger-boost-otg A subnode for a regulator device that turns on
+ the charger boost for OTG operation.
+- qcom,smbcharger-external-otg A subnode for a regulator device that switches
+ off charging and the USB input charge path
+ in order to allow an external regulator to
+ operate. This can be used in place of the
+ qcom,smbcharger-boost-otg if an external boost
+ is available.
+
+Regulator Sub node required properties:
+- regulator-name A name string for the regulator in question
+
+Optional Properties:
+- qcom,battery-psy-name The name of the main battery power supply that
+ the charger will register. Failing to define
+ this property will default the name to
+ "battery".
+- qcom,bms-psy-name The psy name to use for reporting battery
+ capacity. If left unspecified the capacity uses
+ a preprogrammed default value of 50.
+- qcom,float-voltage-mv Float Voltage in mV - the maximum voltage up
+ to which the battery is charged. Supported
+ range 3600mV to 4500mV
+- qcom,float-voltage-comp Specifies the JEITA float voltage compensation.
+ Value ranges from 0 to 63.
+- qcom,fastchg-current-ma Specifies the fast charge current in mA. Supported
+ range is from 300mA to 3000mA.
+- qcom,fastchg-current-comp Specifies the fast charge current compensation in
+ mA. Supported values are 250, 700, 900 and 1200mA.
+- qcom,charging-timeout-mins Maximum duration in minutes that a single
+ charge cycle may last. Supported values are:
+ 0, 192, 384, 768, and 1536. A value of 0
+ means that no charge cycle timeout is used and
+ charging can continue indefinitely.
+- qcom,precharging-timeout-mins Maximum duration in minutes that a single
+ precharge cycle may last. Supported values
+ are: 0, 24, 48, 96, 192. A value of 0 means
+ that no precharge cycle timeout is used and
+ charging can continue indefinitely. Note that
+ the qcom,charging-timeout-mins property must
+ be specified in order for this to take effect.
+- qcom,dc-psy-type The type of charger connected to the DC path.
+ Can be "Mains", "Wireless" or "Wipower"
+- qcom,dc-psy-ma The current in mA dc path can support. Must be
+ specified if dc-psy-type is specified. Valid
+ range 300mA to 2000mA.
+- qcom,dcin-vadc The phandle to pmi8994 voltage adc. The ADC is
+ used to get notifications when the DCIN voltage
+ crosses a programmed min/max threshold. This is
+ used to make configurations for optimized power
+ draw for Wipower.
+- qcom,wipower-div2-ilim-map
+- qcom,wipower-pt-ilim-map
+- qcom,wipower-default-ilim-map
+ Array of 5 elements to indicate the voltage ranges and their corresponding
+ current limits. The 5 elements with index [0..4] are:
+ [0] => voltage_low in uV
+ [1] => voltage_high in uV
+ [2] => current limit for pass through in mA
+ [3] => current limit for div2 mode dcin low voltage in mA
+ [4] => current limit for div2 mode dcin high voltage in mA
+ The div2 and pt tables indicate the current limits
+ to use when Wipower is operating in divide_by_2 mode
+ and pass through mode respectively.
+ The default table is used when the voltage ranges
+ are beyond the ones specified in the mapping table.
+ Note that if dcin-vadc or any of these mapping
+ tables are not specified, dynamic dcin input
+ is disabled.
+- qcom,charging-disabled Set this if charging should be disabled in the
+ build by default.
+- qcom,resume-delta-mv Specifies the minimum voltage drop in
+ millivolts below the float voltage that is
+ required in order to initiate a new charging
+ cycle. Supported values are: 50, 100, 200 and
+ 300mV.
+- qcom,chg-inhibit-en Boolean that indicates whether the charge inhibit
+ feature needs to be enabled. If this is not set,
+ charge inhibit feature is disabled by default.
+- qcom,chg-inhibit-fg Indicates if the recharge threshold source has
+ to be Fuel gauge ADC. If this is not set, it
+ will be analog sensor by default.
+- qcom,bmd-algo-disabled Indicates if the battery missing detection
+ algorithm is disabled. If this node is present
+ SMB uses the THERM pin for battery missing
+ detection.
+- qcom,charge-unknown-battery Boolean that indicates whether an unknown
+ battery without a matching profile will be
+ charged. If this is not set, if the fuel gauge
+ does not recognize the battery based on its
+ battery ID, the charger will not start
+ charging.
+- qcom,bmd-pin-src A string that indicates the source pin for the
+ battery missind detection. This can be either:
+ - "bpd_none"
+ battery is considered always present
+ - "bpd_id"
+ battery id pin is used
+ - "bpd_thm"
+ battery therm pin is used
+ - "bpd_thm_id"
+ both pins are used (battery is
+ considered missing if either pin is
+ floating).
+- qcom,iterm-ma Specifies the termination current to indicate
+ end-of-charge. Possible values in mA:
+ 50, 100, 150, 200, 250, 300, 500, 600.
+- qcom,iterm-disabled Disables the termination current feature. This
+ is a boolean property.
+- otg-parent-supply A phandle to an external boost regulator for
+ OTG if it exists.
+- qcom,thermal-mitigation: Array of input current limit values for
+ different system thermal mitigation levels.
+ This should be a flat array that denotates the
+ maximum charge current in mA for each thermal
+ level.
+- qcom,rparasitics-uohm: The parasitic resistance of the board following
+ the line from the battery connectors through
+ vph_power. This is used to calculate maximum
+ available current of the battery.
+- qcom,vled-max-uv: The maximum input voltage of the flash leds.
+ This is used to calculate maximum available
+ current of the battery.
+- qcom,autoadjust-vfloat A boolean property that when set, makes the
+ driver automatically readjust vfloat using the
+ fuel gauge ADC readings to make charging more
+ accurate.
+- qcom,jeita-temp-hard-limit property when present will enable or disable
+ the jeita temperature hard limit based on the
+ value 1 or 0. Specify 0 if the jeita temp hard
+ limit needs to be disabled. If it is not present,
+ jeita temperature hard limit will be based on what
+ the bootloader had set earlier.
+- qcom,low-volt-dcin: A boolean property which upon set will enable the
+ AICL deglitch configuration dynamically. This needs
+ to be set if the DCIN supply is going to be less
+ than or equal to 5V.
+- qcom,force-aicl-rerun: A boolean property which upon set will enable the
+ AICL rerun by default along with the deglitch time
+ configured to long interval (20 ms). Also, specifying
+ this property will not adjust the AICL deglitch time
+ dynamically for handling the battery over-voltage
+ oscillations when the charger is headroom limited.
+- qcom,aicl-rerun-period-s If force-aicl-rerun is on, this property dictates
+ how often aicl is reran in seconds. Possible values
+ are 45, 90, 180, and 360.
+- qcom,ibat-ocp-threshold-ua Maximum current before the battery will trigger
+ overcurrent protection. Use the recommended
+ battery pack value minus some margin.
+- qcom,soft-vfloat-comp-disabled Set this property when the battery is
+ powered via external source and could
+ go above the float voltage.
+- qcom,parallel-usb-min-current-ma Minimum current drawn by the primary
+ charger before enabling the parallel
+ charger if one exists. Do not define
+ this property if no parallel chargers
+ exist.
+- qcom,parallel-usb-9v-min-current-ma Minimum current drawn by the primary
+ charger before enabling the parallel
+ charger if one exists. This property
+ applies only for 9V chargers.
+- qcom,parallel-allowed-lowering-ma Acceptable current drop from the initial limit
+ to keep parallel charger activated. If the
+ charger current reduces beyond this threshold
+ parallel charger is disabled. Must be specified
+ if parallel charger is used.
+- qcom,parallel-main-chg-fcc-percent Percentage of the fast charge current allotted to the
+ main charger when parallel charging is enabled and
+ operational. If this property is not defined, the
+ driver defaults to a 50%/50% split between the main
+ and parallel charger.
+- qcom,parallel-main-chg-icl-percent Percentage of the input current allotted to the
+ main charger when parallel charging is enabled and
+ operational. If this property is not defined, the
+ driver defaults to a 60%/40% split between the main
+ and parallel charger.
+- qcom,battery-data Points to the phandle of node which
+ contains the battery-profiles supported
+ by the charger/FG.
+- qcom,chg-led-support A bool property to support the charger led feature.
+- qcom,chg-led-sw-controls A bool property to allow the software to control
+ the charger led without a valid charger.
+- qcom,skip-usb-notification A boolean property to be used when usb gets present
+ and type from other means. Especially true on
+ liquid hardware, where usb presence is detected based on GPIO.
+- qcom,skip-usb-suspend-for-fake-battery A boolean property to skip
+ suspending USB path for fake
+ battery.
+- qcom,vchg_sns-vadc Phandle of the VADC node.
+- qcom,vchg-adc-channel-id The ADC channel to which the VCHG is routed.
+
+Example:
+ qcom,qpnp-smbcharger {
+ compatible = "qcom,qpnp-smbcharger";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,iterm-ma = <100>;
+ qcom,float-voltage-mv = <4200>;
+ qcom,resume-delta-mv = <100>;
+ qcom,bmd-pin-src = "bpd_thm_id";
+ qcom,dc-psy-type = "Mains";
+ qcom,dc-psy-ma = <1500>;
+ qcom,bms-psy-name = "bms";
+ qcom,battery-psy-name = "battery";
+ qcom,thermal-mitigation = <1500 700 600 325>;
+ qcom,vchg_sns-vadc = <&pmi8950_vadc>;
+ qcom,vchg-adc-channel-id = <3>;
+
+ qcom,chgr@1000 {
+ reg = <0x1000 0x100>;
+ interrupts = <0x2 0x10 0x0>,
+ <0x2 0x10 0x1>,
+ <0x2 0x10 0x2>,
+ <0x2 0x10 0x3>,
+ <0x2 0x10 0x4>,
+ <0x2 0x10 0x5>,
+ <0x2 0x10 0x6>,
+ <0x2 0x10 0x7>;
+
+ interrupt-names = "chg-error",
+ "chg-inhibit",
+ "chg-prechg-sft",
+ "chg-complete-chg-sft",
+ "chg-p2f-thr",
+ "chg-rechg-thr",
+ "chg-taper-thr",
+ "chg-tcc-thr";
+ };
+
+ qcom,otg@1100 {
+ reg = <0x1100 0x100>;
+ };
+
+ qcom,bat-if@1200 {
+ reg = <0x1200 0x100>;
+ interrupts = <0x2 0x12 0x0>,
+ <0x2 0x12 0x1>,
+ <0x2 0x12 0x2>,
+ <0x2 0x12 0x3>,
+ <0x2 0x12 0x4>,
+ <0x2 0x12 0x5>,
+ <0x2 0x12 0x6>,
+ <0x2 0x12 0x7>;
+
+ interrupt-names = "batt-hot",
+ "batt-warm",
+ "batt-cold",
+ "batt-cool",
+ "batt-ov",
+ "batt-low",
+ "batt-missing",
+ "batt-term-missing";
+ };
+
+ qcom,usb-chgpth@1300 {
+ reg = <0x1300 0x100>;
+ interrupts = <0x2 0x13 0x0>,
+ <0x2 0x13 0x1>,
+ <0x2 0x13 0x2>,
+ <0x2 0x13 0x3>,
+ <0x2 0x13 0x4>,
+ <0x2 0x13 0x5>,
+ <0x2 0x13 0x6>;
+
+ interrupt-names = "usbin-uv",
+ "usbin-ov",
+ "usbin-src-det",
+ "otg-fail",
+ "otg-oc",
+ "aicl-done",
+ "usbid-change";
+ };
+
+ qcom,dc-chgpth@1400 {
+ reg = <0x1400 0x100>;
+ interrupts = <0x2 0x14 0x0>,
+ <0x2 0x14 0x1>;
+
+ interrupt-names = "dcin-uv",
+ "dcin-ov";
+ };
+
+ qcom,chgr-misc@1600 {
+ reg = <0x1600 0x100>;
+ interrupts = <0x2 0x16 0x0>,
+ <0x2 0x16 0x1>,
+ <0x2 0x16 0x2>,
+ <0x2 0x16 0x3>,
+ <0x2 0x16 0x4>,
+ <0x2 0x16 0x5>;
+
+ interrupt-names = "power-ok",
+ "temp-shutdown",
+ "wdog-timeout",
+ "flash-fail",
+ "otst2",
+ "otst3";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index db21a2b58c2b..7820562d17ae 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1096,6 +1096,26 @@ qcom,msm-audio-ion {
qcom,smmu-enabled;
};
+* msm-audio-ion-vm
+
+Required properties:
+ - compatible : "qcom,msm-audio-ion-vm"
+
+Optional properties:
+ - qcom,smmu-enabled:
+ It is possible that some MSM have SMMU in ADSP. While other
+ MSM use no SMMU. Audio lib introduce wrapper for ION APIs.
+ The wrapper needs presence of SMMU in ADSP to handle ION
+ APIs differently. Presence of this property means ADSP has
+ SMMU in it.
+
+Example:
+
+qcom,msm-audio-ion-vm {
+ compatible = "qcom,msm-audio-ion-vm;
+ qcom,smmu-enabled;
+};
+
* MSM8994 ASoC Machine driver
Required properties:
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 47fad8aa4a1a..54792335e67e 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -64,6 +64,8 @@ Optional properties :
device provides both "USB" and "USB-HOST" events.
- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
which is used as a vote by driver to get max performance in perf mode.
+- qcom,no-wakeup-src-in-hostmode: If present then driver doesn't use wakeup_source APIs
+ in host mode. This allows PM suspend to happen irrespective of runtimePM state of host.
Sub nodes:
- Sub node for "DWC3- USB3 controller".
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index b102b436563e..753dd4f96afe 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -102,14 +102,16 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
collection, triggered in background when I/O subsystem is
idle. If background_gc=on, it will turn on the garbage
collection and if background_gc=off, garbage collection
- will be truned off. If background_gc=sync, it will turn
+ will be turned off. If background_gc=sync, it will turn
on synchronous garbage collection running in background.
Default value for this option is on. So garbage
collection is on by default.
disable_roll_forward Disable the roll-forward recovery routine
norecovery Disable the roll-forward recovery routine, mounted read-
only (i.e., -o ro,disable_roll_forward)
-discard Issue discard/TRIM commands when a segment is cleaned.
+discard/nodiscard Enable/disable real-time discard in f2fs, if discard is
+ enabled, f2fs will issue discard/TRIM commands when a
+ segment is cleaned.
no_heap Disable heap-style segment allocation which finds free
segments for data from the beginning of main area, while
for node from the end of main area.
@@ -129,6 +131,7 @@ inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The
space of inode block which is used to store inline
dentries is limited to ~3.4k.
+noinline_dentry Diable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly,
@@ -145,10 +148,15 @@ extent_cache Enable an extent cache based on rb-tree, it can cache
as many as extent which map between contiguous logical
address and physical address per inode, resulting in
increasing the cache hit ratio. Set by default.
-noextent_cache Diable an extent cache based on rb-tree explicitly, see
+noextent_cache Disable an extent cache based on rb-tree explicitly, see
the above extent_cache mount option.
noinline_data Disable the inline data feature, inline data feature is
enabled by default.
+data_flush Enable data flushing before checkpoint in order to
+ persist data of regular and symlink.
+mode=%s Control block allocation mode which supports "adaptive"
+ and "lfs". In "lfs" mode, there should be no random
+ writes towards main area.
================================================================================
DEBUGFS ENTRIES
@@ -192,7 +200,7 @@ Files in /sys/fs/f2fs/<devname>
policy for garbage collection. Setting gc_idle = 0
(default) will disable this option. Setting
gc_idle = 1 will select the Cost Benefit approach
- & setting gc_idle = 2 will select the greedy aproach.
+ & setting gc_idle = 2 will select the greedy approach.
reclaim_segments This parameter controls the number of prefree
segments to be reclaimed. If the number of prefree
@@ -298,7 +306,7 @@ The dump.f2fs shows the information of specific inode and dumps SSA and SIT to
file. Each file is dump_ssa and dump_sit.
The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
-It shows on-disk inode information reconized by a given inode number, and is
+It shows on-disk inode information recognized by a given inode number, and is
able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
./dump_sit respectively.
diff --git a/Makefile b/Makefile
index 919851a86eaa..6229b4d49877 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 85
+SUBLEVEL = 89
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/README b/android/configs/README
deleted file mode 100644
index 8798731f8904..000000000000
--- a/android/configs/README
+++ /dev/null
@@ -1,15 +0,0 @@
-The files in this directory are meant to be used as a base for an Android
-kernel config. All devices should have the options in android-base.cfg enabled.
-While not mandatory, the options in android-recommended.cfg enable advanced
-Android features.
-
-Assuming you already have a minimalist defconfig for your device, a possible
-way to enable these options would be:
-
- ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig android/configs/android-base.cfg android/configs/android-recommended.cfg
-
-This will generate a .config that can then be used to save a new defconfig or
-compile a new kernel with Android features enabled.
-
-Because there is no tool to consistently generate these config fragments,
-lets keep them alphabetically sorted instead of random.
diff --git a/android/configs/android-base-arm64.cfg b/android/configs/android-base-arm64.cfg
deleted file mode 100644
index 43f23d6b5391..000000000000
--- a/android/configs/android-base-arm64.cfg
+++ /dev/null
@@ -1,5 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-CONFIG_ARMV8_DEPRECATED=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
-CONFIG_SWP_EMULATION=y
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
deleted file mode 100644
index 419dca62542a..000000000000
--- a/android/configs/android-base.cfg
+++ /dev/null
@@ -1,164 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_DEVKMEM is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_FHANDLE is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_NFSD is not set
-# CONFIG_NFS_FS is not set
-# CONFIG_OABI_COMPAT is not set
-# CONFIG_SYSVIPC is not set
-# CONFIG_USELIB is not set
-CONFIG_ANDROID=y
-CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
-CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ASHMEM=y
-CONFIG_AUDIT=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_DEFAULT_SECURITY_SELINUX=y
-CONFIG_EMBEDDED=y
-CONFIG_FB=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_INET=y
-CONFIG_INET_DIAG_DESTROY=y
-CONFIG_INET_ESP=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IPV6=y
-CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_SECURITY=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_NET=y
-CONFIG_NETDEVICES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_SOCKET=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_TARGET_TPROXY=y
-CONFIG_NETFILTER_XT_TARGET_TRACE=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_CLS_U32=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_KEY=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_NAT=y
-CONFIG_NO_HZ=y
-CONFIG_PACKET=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PPP=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_MPPE=y
-CONFIG_PREEMPT=y
-CONFIG_PROFILING=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_SECCOMP=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_STAGING=y
-CONFIG_SYNC=y
-CONFIG_TUN=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_UNIX=y
-CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_GADGET=y
-CONFIG_XFRM_USER=y
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
deleted file mode 100644
index 6550d0423f50..000000000000
--- a/android/configs/android-recommended.cfg
+++ /dev/null
@@ -1,140 +0,0 @@
-# KEEP ALPHABETICALLY SORTED
-# CONFIG_AIO is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_NF_CONNTRACK_SIP is not set
-# CONFIG_PM_WAKELOCKS_GC is not set
-# CONFIG_VT is not set
-CONFIG_ANDROID_TIMED_GPIO=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_ARM_KERNMEM_PERMS=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_COMPACTION=y
-CONFIG_CPU_SW_DOMAIN_PAN=y
-CONFIG_DEBUG_RODATA=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
-CONFIG_DRAGONRISE_FF=y
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FUSE_FS=y
-CONFIG_GREENASIA_FF=y
-CONFIG_HIDRAW=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_ACRUX=y
-CONFIG_HID_ACRUX_FF=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EMS_FF=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GREENASIA=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_HOLTEK=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_KEYTOUCH=y
-CONFIG_HID_KYE=y
-CONFIG_HID_LCPOWER=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_LOGITECH_DJ=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_ORTEK=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_PICOLCD=y
-CONFIG_HID_PRIMAX=y
-CONFIG_HID_PRODIKEYS=y
-CONFIG_HID_ROCCAT=y
-CONFIG_HID_SAITEK=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SMARTJOYPLUS=y
-CONFIG_HID_SONY=y
-CONFIG_HID_SPEEDLINK=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_HID_TIVO=y
-CONFIG_HID_TOPSEED=y
-CONFIG_HID_TWINHAN=y
-CONFIG_HID_UCLOGIC=y
-CONFIG_HID_WACOM=y
-CONFIG_HID_WALTOP=y
-CONFIG_HID_WIIMOTE=y
-CONFIG_HID_ZEROPLUS=y
-CONFIG_HID_ZYDACRON=y
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_INPUT_KEYCHORD=y
-CONFIG_INPUT_KEYRESET=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_TABLET=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
-CONFIG_JOYSTICK_XPAD=y
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_KSM=y
-CONFIG_LOGIG940_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGITECH_FF=y
-CONFIG_MD=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_MSDOS_FS=y
-CONFIG_PANIC_TIMEOUT=5
-CONFIG_PANTHERLORD_FF=y
-CONFIG_PERF_EVENTS=y
-CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-CONFIG_POWER_SUPPLY=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-CONFIG_PSTORE_RAM=y
-CONFIG_QFMT_V2=y
-CONFIG_QUOTA=y
-CONFIG_QUOTACTL=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-CONFIG_QUOTA_TREE=y
-CONFIG_SCHEDSTATS=y
-CONFIG_SMARTJOYPLUS_FF=y
-CONFIG_SND=y
-CONFIG_SOUND=y
-CONFIG_SUSPEND_TIME=y
-CONFIG_TABLET_USB_ACECAD=y
-CONFIG_TABLET_USB_AIPTEK=y
-CONFIG_TABLET_USB_GTCO=y
-CONFIG_TABLET_USB_HANWANG=y
-CONFIG_TABLET_USB_KBTAB=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_TASK_XACCT=y
-CONFIG_TIMER_STATS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_UHID=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_HIDDEV=y
-CONFIG_USB_USBNET=y
-CONFIG_VFAT_FS=y
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d3452c..0bc66e1d3a7e 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd459777..8d1024d7be05 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 2efb0625331d..db1eee5fe502 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -104,6 +104,12 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
+ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
+ ; access for say stack unwinding of modules for crash dumps
+ lr r3, [ARC_REG_PID]
+ or r3, r3, MMU_ENABLE
+ sr r3, [ARC_REG_PID]
+
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index daf2bf52b984..97e9582dcf99 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -885,9 +885,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
local_irq_save(flags);
- /* re-enable the MMU */
- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
-
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 588393412271..22b546e0f845 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1776,7 +1776,7 @@ source "mm/Kconfig"
choice
prompt "Virtual Memory Reclaim"
- default NO_VM_RECLAIM
+ default ENABLE_VMALLOC_SAVING
help
Select the method of reclaiming virtual memory
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index db33594d3827..80901ddcf7d1 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -717,6 +717,8 @@
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -739,6 +741,8 @@
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index 3a7514397139..75aea7280e6c 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -383,6 +383,7 @@
qcom,msm-cpudai-auxpcm-data = <0>, <0>;
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
qcom,msm-auxpcm-interface = "primary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
};
dai_sec_auxpcm: qcom,msm-sec-auxpcm {
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index c156e91dfcf9..fa21dd7995eb 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -88,7 +88,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
@@ -129,7 +129,7 @@
};
};
-&smb138x_parallel_slave {
+&smb1381_charger {
smb138x_vbus: qcom,smb138x-vbus {
status = "disabled";
regulator-name = "smb138x-vbus";
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 1283cdddc2db..343c6a2ee2da 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1104,6 +1104,8 @@
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -1126,6 +1128,8 @@
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -1381,6 +1385,7 @@
&usb2s {
status = "ok";
+ qcom,no-wakeup-src-in-hostmode;
};
&usb3 {
@@ -1388,6 +1393,7 @@
vbus_dwc3-supply = <&usb_otg_switch>;
vdda33-supply = <&pm8994_l24>;
vdda18-supply = <&pm8994_l12>;
+ qcom,no-wakeup-src-in-hostmode;
};
&blsp1_uart2 {
@@ -1557,5 +1563,9 @@
reg = <0 0xb3fff000 0 0x800000>;
label = "early_camera_mem";
};
+ early_audio_mem: early_audio_mem@0xb5fff000 {
+ reg = <0x0 0xb5fff000 0x0 0x3FFFFC>;
+ label = "early_audio_mem";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index c3b986786034..682a745b30e8 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -926,6 +926,8 @@
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -948,6 +950,8 @@
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -1331,6 +1335,10 @@
reg = <0 0xb3fff000 0 0x800000>;
label = "early_camera_mem";
};
+ early_audio_mem: early_audio_mem@0xb5fff000 {
+ reg = <0x0 0xb5fff000 0x0 0x3FFFFC>;
+ label = "early_audio_mem";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
index f1cf3136dbd0..d8835c40e3d8 100644
--- a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi
@@ -342,6 +342,8 @@
&pcie2 {
/* Enumerate MDM on wake interrupt */
qcom,boot-option = <0x0>;
+ /delete-property/ qcom,l1-supported;
+ /delete-property/ qcom,l1ss-supported;
};
&mdm3 {
@@ -477,6 +479,15 @@
qcom,invert = <1>; /* Output high */
status = "okay";
};
+
+ mpp@a500 { /* MPP 6 */
+ qcom,mode = <1>; /* Digital output */
+ qcom,output-type = <0>; /* CMOS logic */
+ qcom,vin-sel = <2>; /* S4 1.8V */
+ qcom,src-sel = <0>; /* Constant */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
};
&pm8994_vadc {
diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
index c8898ec01992..d8770a738422 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
@@ -542,6 +542,8 @@
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -564,6 +566,8 @@
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index b0688668e667..11c45606f6c2 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -183,7 +183,7 @@
};
smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
- compatible = "qcom,smmu_kms_unsec";
+ compatible = "qcom,smmu_sde_unsec";
iommus = <&mdp_smmu 0>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
index 7e5fa8a495c9..8e46ce5277b3 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
@@ -259,6 +259,71 @@
};
};
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <2>;
+
+ qcom,initial-pwrlevel = <4>;
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <560000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <11>;
+ };
+
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <510000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <401800000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <9>;
+ };
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <315000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <214000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <133000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <2>;
+ qcom,bus-max = <4>;
+ };
+
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <27000000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index b3c355481238..34d93a473645 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -3450,6 +3450,43 @@
};
};
+ qcom,msm-dai-tdm-sec-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37136>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <0>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36880>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_1: qcom,msm-dai-q6-tdm-sec-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36882>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_2: qcom,msm-dai-q6-tdm-sec-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36884>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_3: qcom,msm-dai-q6-tdm-sec-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36886>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
index f5c33063643d..0126081e4b03 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto-adp-lite.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,9 +42,6 @@
i2c@75b6000 { /* BLSP8 */
/* ADV7533 HDMI Bridge Chip removed on ADP Lite */
- adv7533@3d {
- status = "disabled";
- };
adv7533@39 {
status = "disabled";
};
@@ -59,6 +56,14 @@
};
};
+&dsi_adv_7533_2 {
+ /delete-property/ qcom,dsi-display-active;
+};
+
+&sde_kms {
+ connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1>;
+};
+
&pil_modem {
pinctrl-names = "default";
pinctrl-0 = <&modem_mux>;
diff --git a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
index 15295639e361..f0fade10633e 100644
--- a/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996pro-auto.dtsi
@@ -466,5 +466,11 @@
qcom,gpu-pwrlevels-0 {
qcom,initial-pwrlevel = <1>;
};
+
+ qcom,gpu-pwrlevels-2 {
+ qcom,initial-pwrlevel = <2>;
+
+ };
+
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
index 2095b4e07069..86b68b2440a9 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
@@ -54,6 +54,13 @@
qcom,cam-vreg-op-mode = <0>;
};
+ laserled0: qcom,laserled@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,laser-led";
+ qcom,cci-master = <1>;
+ };
+
actuator1: qcom,actuator@1 {
cell-index = <1>;
reg = <0x1>;
@@ -322,6 +329,7 @@
qcom,eeprom-src = <&eeprom2>;
qcom,led-flash-src = <&led_flash1>;
qcom,actuator-src = <&actuator1>;
+ qcom,laserled-src = <&laserled0>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pm8998_l22>;
cam_vdig-supply = <&pm8998_s3>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
index 5708fce44378..fbde8d21f22c 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
@@ -497,12 +497,16 @@
<&clock_mmss clk_mmss_mdss_dp_link_clk>,
<&clock_mmss clk_mmss_mdss_dp_link_intf_clk>,
<&clock_mmss clk_mmss_mdss_dp_crypto_clk>,
- <&clock_mmss clk_mmss_mdss_dp_pixel_clk>;
+ <&clock_mmss clk_mmss_mdss_dp_pixel_clk>,
+ <&mdss_dp_pll clk_vco_divided_clk_src_mux>,
+ <&mdss_dp_pll clk_vco_divsel_two_clk_src>,
+ <&mdss_dp_pll clk_vco_divsel_four_clk_src>;
clock-names = "core_mnoc_clk", "core_iface_clk", "core_bus_clk",
"core_mdp_core_clk", "core_alt_iface_clk",
"core_aux_clk", "core_ref_clk_src", "core_ref_clk",
"ctrl_link_clk", "ctrl_link_iface_clk",
- "ctrl_crypto_clk", "ctrl_pixel_clk";
+ "ctrl_crypto_clk", "ctrl_pixel_clk", "pixel_parent",
+ "pixel_clk_two_div", "pixel_clk_four_div";
qcom,dp-usbpd-detection = <&pmi8998_pdphy>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
index ed1259918620..1abb28897fbd 100644
--- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
@@ -582,7 +582,7 @@
config {
pins = "gpio37";
drive-strength = <2>;
- bias-pull-down;
+ bias-pull-up;
};
};
@@ -595,7 +595,7 @@
config {
pins = "gpio37";
drive-strength = <2>;
- bias-disable;
+ bias-pull-up;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index eafa6b841c17..132f3e7ce332 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -292,7 +292,7 @@
dev = "/dev/block/platform/soc/1da4000.ufshc/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
@@ -3106,6 +3106,8 @@
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "cxo_ref_clk_pin";
interrupts =
<0 413 0 /* CE0 */ >,
<0 414 0 /* CE1 */ >,
@@ -3119,6 +3121,12 @@
<0 423 0 /* CE9 */ >,
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
qcom,icnss@18800000 {
diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
index f4a9592bf4ff..ccc1be75f39b 100644
--- a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
index 5f44b4c32c98..0d7b6c0341b5 100644
--- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
index af3c5d1b51da..384e24d221c4 100644
--- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
@@ -92,7 +92,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index e918864a3df7..ae110fe22535 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -309,7 +309,7 @@
dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index c436ce643091..48437ac9d31a 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -307,7 +307,7 @@
dev = "/dev/block/platform/soc/c0c4000.sdhci/by-name/vendor";
type = "ext4";
mnt_flags = "ro,barrier=1,discard";
- fsmgr_flags = "wait,slotselect";
+ fsmgr_flags = "wait,slotselect,verify";
status = "ok";
};
};
@@ -1606,6 +1606,7 @@
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
+ qcom,fastrpc-vmid-heap-shared;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
index e6d9f7b7d2f2..4a8cef21e90e 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
@@ -91,6 +91,25 @@
ranges = <0 0 0 0xffffffff>;
compatible = "simple-bus";
+ qcom,mpm2-sleep-counter@4a3000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x004a3000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
+ qcom,msm-imem@66bf000 {
+ compatible = "qcom,msm-imem";
+ reg = <0x66bf000 0x1000>; /* Address and size of IMEM */
+ ranges = <0x0 0x66bf000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+ };
+
sound-adp-agave {
compatible = "qcom,apq8096-asoc-snd-adp-agave";
qcom,model = "apq8096-adp-agave-snd-card";
@@ -116,6 +135,8 @@
<&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>,
<&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>,
<&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>,
+ <&dai_sec_tdm_rx_0>, <&dai_sec_tdm_rx_1>,
+ <&dai_sec_tdm_rx_2>, <&dai_sec_tdm_rx_3>,
<&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>,
<&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>,
@@ -138,6 +159,8 @@
"msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871",
"msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866",
"msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870",
+ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36882",
+ "msm-dai-q6-tdm.36884", "msm-dai-q6-tdm.36886",
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883",
"msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887",
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898",
@@ -165,6 +188,11 @@
iommus = <&lpass_q6_smmu 1>;
};
+ qcom,msm-audio-ion-vm {
+ compatible = "qcom,msm-audio-ion-vm";
+ qcom,smmu-enabled;
+ };
+
pcm0: qcom,msm-pcm {
compatible = "qcom,msm-pcm-dsp";
qcom,msm-pcm-dsp-id = <0>;
@@ -409,6 +437,43 @@
};
};
+ qcom,msm-dai-tdm-sec-rx {
+ compatible = "qcom,msm-dai-tdm";
+ qcom,msm-cpudai-tdm-group-id = <37136>;
+ qcom,msm-cpudai-tdm-group-num-ports = <4>;
+ qcom,msm-cpudai-tdm-group-port-id = <36880 36882 36884 36886>;
+ qcom,msm-cpudai-tdm-clk-rate = <12288000>;
+ qcom,msm-cpudai-tdm-clk-internal = <0>;
+ qcom,msm-cpudai-tdm-sync-mode = <1>;
+ qcom,msm-cpudai-tdm-sync-src = <0>;
+ qcom,msm-cpudai-tdm-data-out = <0>;
+ qcom,msm-cpudai-tdm-invert-sync = <0>;
+ qcom,msm-cpudai-tdm-data-delay = <0>;
+ dai_sec_tdm_rx_0: qcom,msm-dai-q6-tdm-sec-rx-0 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36880>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_1: qcom,msm-dai-q6-tdm-sec-rx-1 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36882>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_2: qcom,msm-dai-q6-tdm-sec-rx-2 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36884>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+
+ dai_sec_tdm_rx_3: qcom,msm-dai-q6-tdm-sec-rx-3 {
+ compatible = "qcom,msm-dai-q6-tdm";
+ qcom,msm-cpudai-tdm-dev-id = <36886>;
+ qcom,msm-cpudai-tdm-data-align = <0>;
+ };
+ };
+
qcom,msm-dai-tdm-sec-tx {
compatible = "qcom,msm-dai-tdm";
qcom,msm-cpudai-tdm-group-id = <37137>;
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 360cea172b06..4c055a63c9c6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -831,24 +831,25 @@ void stage2_unmap_vm(struct kvm *kvm)
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
*/
void kvm_free_stage2_pgd(struct kvm *kvm)
{
- if (kvm->arch.pgd == NULL)
- return;
+ void *pgd = NULL;
+ void *hwpgd = NULL;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ if (kvm->arch.pgd) {
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ pgd = READ_ONCE(kvm->arch.pgd);
+ hwpgd = kvm_get_hwpgd(kvm);
+ kvm->arch.pgd = NULL;
+ }
spin_unlock(&kvm->mmu_lock);
- kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
-
- kvm->arch.pgd = NULL;
+ if (hwpgd)
+ kvm_free_hwpgd(hwpgd);
+ if (KVM_PREALLOC_LEVEL > 0 && pgd)
+ kfree(pgd);
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c73f10c1984f..83519afe3254 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -314,8 +314,11 @@ retry:
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the
diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
index 2e551218af2d..650b9d7bc127 100644
--- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig
@@ -256,6 +256,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig
index a6d36c314a4a..5b0fb5cd910b 100644
--- a/arch/arm64/configs/msm-auto-gvm_defconfig
+++ b/arch/arm64/configs/msm-auto-gvm_defconfig
@@ -261,6 +261,8 @@ CONFIG_MSM_SMP2P_TEST=y
CONFIG_MSM_QMI_INTERFACE=y
CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_BOOT_TIME_MARKER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_MSS_QDSP6V5=y
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index 7e3bf18b06f7..e55ebfc79ddb 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -277,10 +277,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_CRYPTO=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
-CONFIG_CNSS=y
-CONFIG_CNSS_ASYNC=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_CNSS2=y
+CONFIG_CNSS2_DEBUG=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index 92fc522c11ed..8f8e696f8866 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -232,6 +233,8 @@ CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
@@ -278,10 +281,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_CRYPTO=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
-CONFIG_CNSS=y
-CONFIG_CNSS_ASYNC=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_CNSS2=y
+CONFIG_CNSS2_DEBUG=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
@@ -311,7 +314,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_QUP=y
@@ -348,7 +350,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_MAX20010=y
@@ -380,15 +381,11 @@ CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=y
CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_MSM_DBA=y
+CONFIG_MSM_DBA_ADV7533=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -472,7 +469,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_SW_SYNC_USER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -521,7 +518,6 @@ CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -576,7 +572,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -593,6 +588,7 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index 61418724b897..842f495bb17e 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -330,6 +330,8 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
@@ -461,7 +463,7 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_LEDS_QPNP=y
-CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_FLASH=y
CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_SWITCH=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index ee2b9fa628ff..38f8092e7d8a 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -317,6 +317,8 @@ CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_SMBCHARGER=y
+CONFIG_QPNP_FG=y
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
@@ -449,7 +451,7 @@ CONFIG_MMC_SPI=y
CONFIG_MMC_DW=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_LEDS_QPNP=y
-CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_FLASH=y
CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 790d27e3b997..7950df171d86 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -202,9 +202,11 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ea3e453fdd14..2ac2abe8a494 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -157,7 +157,8 @@ static int save_trace(struct stackframe *frame, void *d)
return trace->nr_entries >= trace->max_entries;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
@@ -167,17 +168,18 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.pc = thread_saved_pc(tsk);
} else {
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = tsk->curr_ret_stack;
@@ -191,9 +193,15 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
}
EXPORT_SYMBOL(save_stack_trace_tsk);
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
+
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2720d47da366..1f9a2620c692 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -370,8 +370,11 @@ retry:
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the initial
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
index fd71b8daaaf2..5bec64f2884e 100644
--- a/arch/mips/math-emu/dp_fmax.c
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
index c1072b0dfb95..a287b23818d8 100644
--- a/arch/mips/math-emu/dp_fmin.c
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754dp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754dp_zero(1);
+ return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
index 4d000844e48e..74a5a00d2f22 100644
--- a/arch/mips/math-emu/sp_fmax.c
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return x;
- /* Compare exponent */
- if (xe > ye)
- return x;
- else if (xe < ye)
- return y;
+ /* Signs of inputs are equal, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return y;
+ return x;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return y;
- return x;
+ return x;
+ return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs & ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
return y;
- return x;
+ else if (xm > ym)
+ return x;
+ else if (xs == 0)
+ return x;
+ return y;
}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
index 4eb1bb9e9dec..c51385f46b09 100644
--- a/arch/mips/math-emu/sp_fmin.c
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return y;
- /* Compare exponent */
- if (xe > ye)
- return y;
- else if (xe < ye)
- return x;
+ /* Signs of inputs are the same, let's compare exponents */
+ if (xs == 0) {
+ /* Inputs are both positive */
+ if (xe > ye)
+ return y;
+ else if (xe < ye)
+ return x;
+ } else {
+ /* Inputs are both negative */
+ if (xe > ye)
+ return x;
+ else if (xe < ye)
+ return y;
+ }
- /* Compare mantissa */
+ /* Signs and exponents of inputs are equal, let's compare mantissas */
+ if (xs == 0) {
+ /* Inputs are both positive, with equal signs and exponents */
+ if (xm <= ym)
+ return x;
+ return y;
+ }
+ /* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
- return x;
- return y;
+ return y;
+ return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
- /* numbers are preferred to NaNs */
+ /*
+ * Quiet NaN handling
+ */
+
+ /*
+ * The case of both inputs quiet NaNs
+ */
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ return x;
+
+ /*
+ * The cases of exactly one input quiet NaN (numbers
+ * are here preferred as returned values to NaNs)
+ */
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ return ieee754sp_inf(xs | ys);
+
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- return x;
+ return y;
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- return y;
+ return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- if (xs == ys)
- return x;
- return ieee754sp_zero(1);
+ return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
return x;
/* Compare mantissa */
- if (xm <= ym)
+ if (xm < ym)
+ return x;
+ else if (xm > ym)
+ return y;
+ else if (xs == 1)
return x;
return y;
}
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 91e5c1758b5c..64e016abb2a5 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+#define __get_user_or_set_dar(_regs, _dest, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__get_user_inatomic(_dest, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
+#define __put_user_or_set_dar(_regs, _src, _addr) \
+ ({ \
+ int rc = 0; \
+ typeof(_addr) __addr = (_addr); \
+ if (__put_user_inatomic(_src, __addr)) { \
+ _regs->dar = (unsigned long)__addr; \
+ rc = -EFAULT; \
+ } \
+ rc; \
+ })
+
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
@@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
- if (__get_user_inatomic(instr,
- (unsigned int __user *)pc))
+ if (__get_user_or_set_dar(regs, instr,
+ (unsigned int __user *)pc))
return -EFAULT;
+
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
@@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__get_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__get_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
- if (__put_user_inatomic(REG_BYTE(rptr,
- i ^ bswiz),
- SWIZ_PTR(p)))
+ if (__put_user_or_set_dar(regs,
+ REG_BYTE(rptr, i ^ bswiz),
+ SWIZ_PTR(p)))
return -EFAULT;
}
}
@@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
- unsigned int flags)
+static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *) &current->thread.TS_FPR(reg);
char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
@@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
{
char *ptr0 = (char *)&regs->gpr[reg];
char *ptr1 = (char *)&regs->gpr[reg+1];
- int i, ret, sw = 0;
+ int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
- ret = 0;
+
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
- ret |= __get_user(ptr0[i^sw], addr + i);
- ret |= __get_user(ptr1[i^sw], addr + i + 8);
+ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
} else {
- ret |= __put_user(ptr0[i^sw], addr + i);
- ret |= __put_user(ptr1[i^sw], addr + i + 8);
+ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
+ return -EFAULT;
+ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
+ return -EFAULT;
}
}
- if (ret)
- return -EFAULT;
+
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
@@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
- ret |= __put_user(ptr[i^sw], addr + i);
+ ret = __put_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
else
- ret |= __get_user(ptr[i^sw], addr + i);
+ ret = __get_user_or_set_dar(regs, ptr[i^sw],
+ addr + i);
+
+ if (ret)
+ return ret;
}
ptr += elsize;
#ifdef __LITTLE_ENDIAN__
@@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
- int ret, i;
+ int i;
union data {
u64 ll;
double dd;
@@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
- return emulate_fp_pair(addr, reg, flags);
+ return emulate_fp_pair(regs, addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
@@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
}
data.ll = 0;
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __get_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
-
- if (unlikely(ret))
- return -EFAULT;
+ if (__get_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
} else if (flags & F) {
data.ll = current->thread.TS_FPR(reg);
@@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
break;
}
- ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
- ret |= __put_user_inatomic(data.v[start + i],
- SWIZ_PTR(p++));
+ if (__put_user_or_set_dar(regs, data.v[start + i],
+ SWIZ_PTR(p++)))
+ return -EFAULT;
- if (unlikely(ret))
- return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.ll;
else
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index bcd3d6199464..bb16a58cf7e4 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
+ unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@@ -226,8 +227,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
- (pr_reg)[21] = current->thread.fs; \
- (pr_reg)[22] = current->thread.gs; \
+ rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
+ rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..9016b4b70375 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 8161090a1970..37ade035c956 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
goto err_free_blkg;
}
- wb_congested = wb_congested_get_create(&q->backing_dev_info,
+ wb_congested = wb_congested_get_create(q->backing_dev_info,
blkcg->css.id, GFP_NOWAIT);
if (!wb_congested) {
ret = -ENOMEM;
@@ -468,8 +468,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
/* some drivers (floppy) instantiate a queue w/o disk registered */
- if (blkg->q->backing_dev_info.dev)
- return dev_name(blkg->q->backing_dev_info.dev);
+ if (blkg->q->backing_dev_info->dev)
+ return dev_name(blkg->q->backing_dev_info->dev);
return NULL;
}
EXPORT_SYMBOL_GPL(blkg_dev_name);
diff --git a/block/blk-core.c b/block/blk-core.c
index 3115494c4cb2..56652cd209db 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -87,7 +87,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
* flip its congestion state for events on other blkcgs.
*/
if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
@@ -98,7 +98,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else
/* see blk_clear_congested() */
if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
+ set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}
@@ -122,14 +122,12 @@ void blk_queue_congestion_threshold(struct request_queue *q)
* @bdev: device
*
* Locates the passed device's request queue and returns the address of its
- * backing_dev_info. This function can only be called if @bdev is opened
- * and the return value is never NULL.
+ * backing_dev_info. The return value is never NULL however we may return
+ * &noop_backing_dev_info if the bdev is not currently open.
*/
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- return &q->backing_dev_info;
+ return bdev->bd_bdi;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
@@ -247,7 +245,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
- WARN_ON(!irqs_disabled());
+ WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);
@@ -597,7 +595,7 @@ void blk_cleanup_queue(struct request_queue *q)
blk_flush_integrity();
/* @q won't process any more request, flush async actions */
- del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
+ del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
if (q->mq_ops)
@@ -697,7 +695,6 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
- int err;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
@@ -712,17 +709,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->bio_split)
goto fail_id;
- q->backing_dev_info.ra_pages =
+ q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
+ if (!q->backing_dev_info)
+ goto fail_split;
+
+ q->backing_dev_info->ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
- q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
- q->backing_dev_info.name = "block";
+ q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
+ q->backing_dev_info->name = "block";
q->node = node_id;
- err = bdi_init(&q->backing_dev_info);
- if (err)
- goto fail_split;
-
- setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
+ setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, (unsigned long) q);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->queue_head);
@@ -772,7 +769,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref:
percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
- bdi_destroy(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
fail_split:
bioset_free(q->bio_split);
fail_id:
@@ -1195,7 +1192,7 @@ fail_elvpriv:
* disturb iosched and blkcg but weird is bettern than dead.
*/
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info.dev));
+ __func__, dev_name(q->backing_dev_info->dev));
rq->cmd_flags &= ~REQ_ELVPRIV;
rq->elv.icq = NULL;
@@ -3251,7 +3248,7 @@ void blk_finish_request(struct request *req, int error)
BUG_ON(blk_queued_rq(req));
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
- laptop_io_completion(&req->q->backing_dev_info);
+ laptop_io_completion(req->q->backing_dev_info);
blk_delete_timer(req);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 478f572cb1e7..e4ebd79de679 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -418,7 +418,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
- disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
}
EXPORT_SYMBOL(blk_integrity_register);
@@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_integrity_register);
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+ disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc487ce1..a113dc1e3eb7 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -75,7 +75,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
- unsigned long ra_kb = q->backing_dev_info.ra_pages <<
+ unsigned long ra_kb = q->backing_dev_info->ra_pages <<
(PAGE_CACHE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+ q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret;
}
@@ -578,7 +578,7 @@ static void blk_release_queue(struct kobject *kobj)
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
- bdi_exit(&q->backing_dev_info);
+ bdi_put(q->backing_dev_info);
blkcg_exit_queue(q);
if (q->elevator) {
diff --git a/block/genhd.c b/block/genhd.c
index de2a6162e3f3..53a931b30d78 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -611,7 +611,7 @@ void add_disk(struct gendisk *disk)
disk_alloc_events(disk);
/* Register BDI before referencing it from bdev */
- bdi = &disk->queue->backing_dev_info;
+ bdi = disk->queue->backing_dev_info;
bdi_register_owner(bdi, disk_to_dev(disk));
blk_register_region(disk_devt(disk), disk->minors, NULL,
@@ -646,6 +646,8 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_init(&piter, disk,
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
while ((part = disk_part_iter_next(&piter))) {
+ bdev_unhash_inode(MKDEV(disk->major,
+ disk->first_minor + part->partno));
invalidate_partition(disk, part->partno);
delete_partition(disk, part->partno);
}
@@ -661,7 +663,7 @@ void del_gendisk(struct gendisk *disk)
* Unregister bdi before releasing device numbers (as they can
* get reused and we'd get clashes in sysfs).
*/
- bdi_unregister(&disk->queue->backing_dev_info);
+ bdi_unregister(disk->queue->backing_dev_info);
blk_unregister_queue(disk);
} else {
WARN_ON(1);
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index f5e9f9310b48..d12782dc9683 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
}
sgl = sreq->tsg;
n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ for_each_sg(sgl, sg, n, i) {
+ struct page *page = sg_page(sg);
+
+ /* some SGs may not have a page mapped */
+ if (page && atomic_read(&page->_count))
+ put_page(page);
+ }
kfree(sreq->tsg);
}
@@ -138,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
- if (sg)
+ if (sg) {
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
+ }
list_add_tail(&sgl->list, &ctx->tsgl);
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1a6e25..01de42c8b74b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726bb560..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 13598d807de0..3419cb0b4447 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2481,7 +2481,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@@ -3253,6 +3252,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -4580,6 +4580,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5425,6 +5427,8 @@ static void print_binder_proc_stats(struct seq_file *m,
count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
@@ -5621,6 +5625,8 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_deferred_workqueue = create_singlethread_workqueue("binder");
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index aabfebac6e57..b95da16fd938 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/list_lru.h>
#include "binder_alloc.h"
#include "binder_trace.h"
+struct list_lru binder_alloc_lru;
+
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
@@ -48,14 +51,23 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -105,9 +117,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -122,18 +134,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -175,13 +186,14 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
+ struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
@@ -192,25 +204,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_update_page_range(alloc, allocate, start, end);
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(alloc->tsk);
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
+ if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
@@ -218,18 +232,40 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
+ bool on_lru;
+ size_t index;
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
@@ -239,12 +275,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
+
+ trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
@@ -256,16 +294,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- alloc->user_buffer_offset, PAGE_SIZE, NULL);
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
err_alloc_page_failed:
+err_page_ptr_cleared:
;
}
err_no_vma:
@@ -321,6 +370,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -380,32 +432,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->free_in_progress = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ struct binder_buffer *new_buffer;
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
@@ -420,6 +475,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -454,57 +515,58 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -525,8 +587,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -538,14 +600,12 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -553,8 +613,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
@@ -640,14 +699,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -655,10 +714,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ /* Same as mmgrab() in later kernel versions */
+ atomic_inc(&alloc->vma_vm_mm->mm_count);
return 0;
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
@@ -678,14 +739,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
@@ -695,28 +755,44 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
+ bool on_lru;
- if (!alloc->pages[i])
+ if (!alloc->pages[i].page_ptr)
continue;
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK not freed\n",
- __func__, alloc->pid, i, page_addr);
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(alloc->pages[i]);
+ __free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -754,6 +830,34 @@ void binder_alloc_print_allocated(struct seq_file *m,
}
/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
@@ -783,10 +887,112 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+ struct vm_area_struct *vma;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ vma = alloc->vma;
+ if (vma) {
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+
+ if (vma) {
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(vma,
+ page_addr +
+ alloc->user_buffer_offset,
+ PAGE_SIZE, NULL);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ spin_lock(lock);
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
+err_down_write_mmap_sem_failed:
+ mmput_async(mm);
+err_mmget:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
@@ -795,8 +1001,13 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 088e4ffc6230..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/list_lru.h>
+extern struct list_lru binder_alloc_lru;
struct binder_transaction;
/**
@@ -57,7 +59,19 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
};
/**
@@ -75,8 +89,7 @@ struct binder_buffer {
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of physical page addresses for each
- * page of mmap'd space
+ * @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
@@ -87,7 +100,6 @@ struct binder_buffer {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
@@ -96,18 +108,27 @@ struct binder_alloc {
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct page **pages;
+ struct binder_lru_page *pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -120,6 +141,8 @@ extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7967db16ba5a..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -291,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 500592486e88..0346e46e2871 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -737,7 +737,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..aadab0381e0d 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info->name = "aoe";
+ q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1d58854c4a9f..1f9c77609dd1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2393,7 +2393,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
@@ -2765,8 +2765,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = device;
+ q->backing_dev_info->congested_fn = drbd_congested;
+ q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e80cbefbc2b5..ef03cb25f5bf 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1170,11 +1170,11 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_stack_limits(q, b);
- if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+ if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info.ra_pages,
- b->backing_dev_info.ra_pages);
- q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+ q->backing_dev_info->ra_pages,
+ b->backing_dev_info->ra_pages);
+ q->backing_dev_info->ra_pages = b->backing_dev_info->ra_pages;
}
}
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 3b10fa6cb039..7a6b9f3e1a9f 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(&device->rq_queue->backing_dev_info);
+ bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3ae2c0086563..17ae4e1ab358 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -937,7 +937,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d06c62eccdf0..f018318d4466 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1276,7 +1276,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
- clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
@@ -2405,7 +2405,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fbdddd6f94b8..55a8671f1979 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3780,7 +3780,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f9168ffa4..47d1e834f3f4 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ disk->queue = NULL;
}
+
+ put_disk(disk);
skdev->disk = NULL;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd6b141b9825..7bb8055bd10c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -333,6 +333,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index c963e4658c07..5e455878ac3e 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -52,7 +52,7 @@ static int diag_dbgfs_bridgeinfo_index;
static int diag_dbgfs_finished;
static int diag_dbgfs_dci_data_index;
static int diag_dbgfs_dci_finished;
-
+static struct mutex diag_dci_dbgfs_mutex;
static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -159,6 +159,7 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file,
buf_size = ksize(buf);
bytes_remaining = buf_size;
+ mutex_lock(&diag_dci_dbgfs_mutex);
if (diag_dbgfs_dci_data_index == 0) {
bytes_written =
scnprintf(buf, buf_size,
@@ -214,8 +215,8 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file,
}
temp_data++;
}
-
diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+ mutex_unlock(&diag_dci_dbgfs_mutex);
bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
bytes_in_buf);
kfree(buf);
@@ -1186,6 +1187,7 @@ int diag_debugfs_init(void)
pr_warn("diag: could not allocate memory for dci debug info\n");
mutex_init(&dci_stat_mutex);
+ mutex_init(&diag_dci_dbgfs_mutex);
return 0;
err:
kfree(dci_traffic);
@@ -1202,6 +1204,7 @@ void diag_debugfs_cleanup(void)
kfree(dci_traffic);
mutex_destroy(&dci_stat_mutex);
+ mutex_destroy(&diag_dci_dbgfs_mutex);
}
#else
int diag_debugfs_init(void) { return 0; }
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index e206d9db4d7d..e1e86f6e74dc 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -555,6 +555,11 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -656,7 +661,11 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -669,6 +678,12 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
(req->ssid_first > mask->ssid_last_tools)) {
@@ -714,11 +729,23 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_build_mask_t *)src_buf;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
mask_next = mask;
@@ -831,6 +858,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_config_rsp_t *)src_buf;
@@ -838,6 +870,13 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -931,7 +970,11 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_event_mask_config_t *)src_buf;
mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
if (mask_len <= 0 || mask_len > event_mask.mask_len) {
@@ -989,6 +1032,11 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
toggle = *(src_buf + 1);
mutex_lock(&mask_info->lock);
@@ -1046,6 +1094,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -1065,6 +1118,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
write_len += rsp_header_len;
log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!log_item->ptr) {
+ pr_err("diag: Invalid input in %s, mask: %pK\n",
+ __func__, log_item);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
if (log_item->equip_id != req->equip_id)
continue;
@@ -1172,11 +1230,20 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_log_config_req_t *)src_buf;
read_len += req_header_len;
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
if (req->equip_id >= MAX_EQUIP_ID) {
pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
__func__, req->equip_id);
@@ -1294,9 +1361,17 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
mutex_lock(&mask->lock);
memset(mask->ptr, 0, mask->range);
@@ -1562,7 +1637,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
static void __diag_mask_exit(struct diag_mask_info *mask_info)
{
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
@@ -1619,11 +1694,17 @@ void diag_log_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_log_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1698,11 +1779,18 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_msg_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1869,6 +1957,11 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
mutex_lock(&driver->diag_maskclear_mutex);
if (driver->mask_clear) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -1881,6 +1974,13 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
@@ -1941,8 +2041,20 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index a27f12883c8d..986aeed169f5 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -155,15 +155,20 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
return -EIO;
ch = &diag_md[id];
+ if (!ch)
+ return -EINVAL;
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
if (ch->tbl[i].buf != buf)
continue;
found = 1;
- pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, ctxt: %d len: %d at i: %d back to the table, proc: %d, mode: %d\n",
- buf, ctx, ch->tbl[i].len,
- i, id, driver->logging_mode);
+ pr_err_ratelimited("diag: trying to write the same buffer buf: %pK, len: %d, back to the table for p: %d, t: %d, buf_num: %d, proc: %d, i: %d\n",
+ buf, ch->tbl[i].len, GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx), GET_BUF_NUM(ctx), id, i);
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
}
spin_unlock_irqrestore(&ch->lock, flags);
@@ -227,7 +232,7 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
ch = &diag_md[i];
for (j = 0; j < ch->num_tbl_entries && !err; j++) {
entry = &ch->tbl[j];
- if (entry->len <= 0)
+ if (entry->len <= 0 || entry->buf == NULL)
continue;
peripheral = diag_md_get_peripheral(entry->ctx);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 7dc2eabf1bb9..ef08f939c36e 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1572,6 +1572,9 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
switch (type) {
case TYPE_DATA:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free after write done p: %d, t: %d, buf_num: %d\n",
+ peripheral, type, num);
diagfwd_write_done(peripheral, type, num);
diag_ws_on_copy(DIAG_WS_MUX);
} else if (peripheral == APPS_DATA) {
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 0f94bab3bf84..b7dff47623de 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -31,6 +31,7 @@
#include "diag_mux.h"
#include "diag_ipc_logging.h"
#include "diagfwd_glink.h"
+#include "diag_memorydevice.h"
struct data_header {
uint8_t control_char;
@@ -188,8 +189,10 @@ static int diag_add_hdlc_encoding(unsigned char *dest_buf, int *dest_len,
static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
{
+ int i, ctx = 0;
uint32_t max_size = 0;
unsigned char *temp_buf = NULL;
+ struct diag_md_info *ch = NULL;
if (!buf || len == 0)
return -EINVAL;
@@ -203,11 +206,31 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
}
if (buf->len < max_size) {
+ if (driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) {
+ ch = &diag_md[DIAG_LOCAL_PROC];
+ for (i = 0; ch != NULL &&
+ i < ch->num_tbl_entries; i++) {
+ if (ch->tbl[i].buf == buf->data) {
+ ctx = ch->tbl[i].ctx;
+ ch->tbl[i].buf = NULL;
+ ch->tbl[i].len = 0;
+ ch->tbl[i].ctx = 0;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Flushed mdlog table entries before reallocating data buffer, p:%d, t:%d\n",
+ GET_BUF_PERIPHERAL(ctx),
+ GET_BUF_TYPE(ctx));
+ break;
+ }
+ }
+ }
temp_buf = krealloc(buf->data, max_size +
APF_DIAG_PADDING,
GFP_KERNEL);
if (!temp_buf)
return -ENOMEM;
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Reallocated data buffer: %pK with size: %d\n",
+ temp_buf, max_size);
buf->data = temp_buf;
buf->len = max_size;
}
@@ -360,6 +383,10 @@ end:
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(buf->ctxt));
}
@@ -572,6 +599,10 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
end:
diag_ws_release();
if (temp_ptr_cpd) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_ptr_cpd->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_ptr_cpd->ctxt));
}
@@ -692,6 +723,10 @@ end:
mutex_unlock(&fwd_info->data_mutex);
mutex_unlock(&driver->hdlc_disable_mutex);
if (temp_buf) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Marking buffer as free p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_buf->ctxt));
diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
GET_BUF_NUM(temp_buf->ctxt));
}
@@ -772,6 +807,16 @@ static void diagfwd_reset_buffers(struct diagfwd_info *fwd_info,
else if (fwd_info->buf_2 && fwd_info->buf_2->data_raw == buf)
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
+ if (fwd_info->buf_1 && !atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && !atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
}
int diagfwd_peripheral_init(void)
@@ -1158,10 +1203,18 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if ((driver->logging_mode != DIAG_USB_MODE) ||
driver->usb_connected) {
- if (fwd_info->buf_1)
+ if (fwd_info->buf_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
}
if (fwd_info->p_ops && fwd_info->p_ops->open)
@@ -1285,10 +1338,18 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
fwd_info->c_ops->close(fwd_info);
- if (fwd_info->buf_1 && fwd_info->buf_1->data)
+ if (fwd_info->buf_1 && fwd_info->buf_1->data) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- if (fwd_info->buf_2 && fwd_info->buf_2->data)
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
+ if (fwd_info->buf_2 && fwd_info->buf_2->data) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
+ }
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
if (fwd_info->buf_ptr[i])
@@ -1314,6 +1375,9 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
* in_busy flags. No need to queue read in this case.
*/
if (len == 0) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Read Length is 0, resetting the diag buffers p: %d, t: %d\n",
+ fwd_info->peripheral, fwd_info->type);
diagfwd_reset_buffers(fwd_info, buf);
diag_ws_release();
return 0;
@@ -1326,7 +1390,7 @@ int diagfwd_channel_read_done(struct diagfwd_info *fwd_info,
return 0;
}
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num)
{
struct diagfwd_info *fwd_info = NULL;
@@ -1334,8 +1398,10 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
return;
fwd_info = &peripheral_info[type][peripheral];
+ if (!fwd_info)
+ return;
- if (ctxt == 1 && fwd_info->buf_1) {
+ if (buf_num == 1 && fwd_info->buf_1) {
/* Buffer 1 for core PD is freed */
fwd_info->cpd_len_1 = 0;
@@ -1349,7 +1415,12 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
} else {
atomic_set(&fwd_info->buf_1->in_busy, 0);
}
- } else if (ctxt == 2 && fwd_info->buf_2) {
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 2 && fwd_info->buf_2) {
/* Buffer 2 for core PD is freed */
fwd_info->cpd_len_2 = 0;
@@ -1363,8 +1434,12 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
} else {
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
-
- } else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
+ } else if (buf_num == 3 && fwd_info->buf_upd_1_a && fwd_info->buf_1) {
/* Buffer 1 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
@@ -1382,9 +1457,14 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
if (!fwd_info->cpd_len_1)
atomic_set(&fwd_info->buf_1->in_busy, 0);
}
+ if (!atomic_read(&(fwd_info->buf_1->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_1_a = 0;
- } else if (ctxt == 4 && fwd_info->buf_upd_1_b) {
+ } else if (buf_num == 4 && fwd_info->buf_upd_1_b && fwd_info->buf_2) {
/* Buffer 2 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
if (peripheral == PERIPHERAL_LPASS) {
@@ -1401,34 +1481,46 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
if (!fwd_info->cpd_len_2)
atomic_set(&fwd_info->buf_2->in_busy, 0);
}
+ if (!atomic_read(&(fwd_info->buf_2->in_busy))) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_1_b = 0;
- } else if (ctxt == 5 && fwd_info->buf_upd_2_a) {
+ } else if (buf_num == 5 && fwd_info->buf_upd_2_a && fwd_info->buf_1) {
/* Buffer 1 for user pd 2 is freed */
atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0);
/* if not data in cpd and other user pd
* free the core pd buffer for LPASS
*/
if (!fwd_info->cpd_len_1 &&
- !fwd_info->upd_len_1_a)
+ !fwd_info->upd_len_1_a) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 1 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_2_a = 0;
- } else if (ctxt == 6 && fwd_info->buf_upd_2_b) {
+ } else if (buf_num == 6 && fwd_info->buf_upd_2_b && fwd_info->buf_2) {
/* Buffer 2 for user pd 2 is freed */
atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0);
/* if not data in cpd and other user pd
* free the core pd buffer for LPASS
*/
if (!fwd_info->cpd_len_2 &&
- !fwd_info->upd_len_1_b)
+ !fwd_info->upd_len_1_b) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
-
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "Buffer 2 for core PD is marked free, p: %d, t: %d, buf_num: %d\n",
+ fwd_info->peripheral, fwd_info->type, buf_num);
+ }
fwd_info->upd_len_2_b = 0;
} else
- pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
+ pr_err("diag: In %s, invalid buf_num: %d\n", __func__, buf_num);
diagfwd_queue_read(fwd_info);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index eda70dcfdcd9..00621c178906 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -117,7 +117,7 @@ int diagfwd_cntl_register(uint8_t transport, uint8_t peripheral, void *ctxt,
void diagfwd_deregister(uint8_t peripheral, uint8_t type, void *ctxt);
int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len);
-void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt);
+void diagfwd_write_done(uint8_t peripheral, uint8_t type, int buf_num);
void diagfwd_buffers_init(struct diagfwd_info *fwd_info);
/*
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
index caa6a6ab7565..e874fccc7f6c 100644
--- a/drivers/clk/msm/clock-dummy.c
+++ b/drivers/clk/msm/clock-dummy.c
@@ -64,7 +64,6 @@ struct clk dummy_clk = {
static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
{
struct clk *c;
- u32 rate;
c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c) {
@@ -73,9 +72,6 @@ static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
}
c->ops = &clk_ops_dummy;
- if (!of_property_read_u32(np, "clock-frequency", &rate))
- c->rate = rate;
-
return msmclk_generic_clk_init(dev, np, c);
}
MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
@@ -83,6 +79,11 @@ MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
void *data)
{
+ u32 rate;
+
+ if (!of_property_read_u32(clkspec->np, "clock-frequency", &rate))
+ dummy_clk.rate = rate;
+
return &dummy_clk;
}
diff --git a/drivers/clk/msm/mdss/mdss-pll.h b/drivers/clk/msm/mdss/mdss-pll.h
index 0120d71f0daf..7aa8b0d6c051 100644
--- a/drivers/clk/msm/mdss/mdss-pll.h
+++ b/drivers/clk/msm/mdss/mdss-pll.h
@@ -70,6 +70,7 @@ struct dfps_info {
struct dfps_panel_info panel_dfps;
struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
void *dfps_fb_base;
+ uint32_t chip_serial;
};
struct mdss_pll_resources {
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..a72ae98b4838 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -279,6 +279,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
+ /*
+ * Android: set default parameters for parity between schedutil and
+ * schedfreq
+ */
+ policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
+ policy->down_transition_delay_us = 50000; /* 50ms */
+
return 0;
out_free_cpufreq_table:
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index abbee61c99c8..ae65fbc3ceac 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1825,6 +1825,7 @@ struct cpufreq_governor cpufreq_gov_interactive = {
static int __init cpufreq_interactive_init(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ int ret = 0;
spin_lock_init(&speedchange_cpumask_lock);
mutex_init(&gov_lock);
@@ -1841,7 +1842,12 @@ static int __init cpufreq_interactive_init(void)
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process_no_notif(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
+ ret = cpufreq_register_governor(&cpufreq_gov_interactive);
+ if (ret) {
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+ }
+ return ret;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 490f8d9ddb9f..68b6a26f00b8 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -869,7 +869,7 @@ static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
static int qcom_ice_init_clocks(struct ice_device *ice)
{
int ret = -EINVAL;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -913,7 +913,7 @@ out:
static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
{
int ret = 0;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -1590,12 +1590,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
if (ice_dev->pdev->of_node == node) {
pr_info("%s: found ice device %pK\n", __func__,
ice_dev);
+ ice_pdev = to_platform_device(ice_dev->pdev);
break;
}
}
- ice_pdev = to_platform_device(ice_dev->pdev);
- pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
+ if (ice_pdev)
+ pr_info("%s: matching platform device %pK\n", __func__,
+ ice_pdev);
out:
return ice_pdev;
}
@@ -1615,11 +1617,11 @@ static struct ice_device *get_ice_device_from_storage_type
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
pr_debug("%s: found ice device %pK\n",
__func__, ice_dev);
- break;
+ return ice_dev;
}
}
out:
- return ice_dev;
+ return NULL;
}
static int enable_ice_setup(struct ice_device *ice_dev)
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 20bf034bb193..a55f236961b8 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -273,8 +273,6 @@ void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
if (authdata) {
handle->sha_ctxt.auth_data[0] = auth32[0];
handle->sha_ctxt.auth_data[1] = auth32[1];
- handle->sha_ctxt.auth_data[2] = auth32[2];
- handle->sha_ctxt.auth_data[3] = auth32[3];
}
tasklet_schedule(&pdev->done_tasklet);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..dba5c0ea0827 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -36,7 +36,10 @@ struct adv7511 {
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -48,6 +51,10 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
@@ -362,12 +369,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
@@ -422,7 +436,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,8 +472,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -456,7 +490,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +507,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -567,13 +601,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -849,10 +888,6 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -913,6 +948,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..6fd7b50c5747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index c085e173232b..049478fd9bcb 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -406,11 +406,9 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_show(gpu, m);
- gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 624c2a87d593..45c83fbe20e1 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -443,13 +443,9 @@ static const unsigned int a4xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
-
adreno_show(gpu, m);
- gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_counters.c b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
index bc442039c308..1d5e61daca47 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_counters.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
@@ -86,15 +86,15 @@ static int a5xx_counter_get(struct msm_gpu *gpu,
spin_unlock(&group->lock);
- if (group->funcs.enable)
- group->funcs.enable(gpu, group, empty);
+ if (pm_runtime_active(&gpu->pdev->dev) && group->funcs.enable)
+ group->funcs.enable(gpu, group, empty, false);
return empty;
}
/* The majority of the non-fixed counter selects can be programmed by the CPU */
static void a5xx_counter_enable_cpu(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
struct adreno_counter *counter = &group->counters[counterid];
@@ -102,15 +102,36 @@ static void a5xx_counter_enable_cpu(struct msm_gpu *gpu,
}
static void a5xx_counter_enable_pm4(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
struct adreno_counter *counter = &group->counters[counterid];
+ /*
+ * If we are restoring the counters after a power cycle we can safely
+ * use AHB to enable the counters because we know SP/TP power collapse
+ * isn't active
+ */
+ if (restore) {
+ a5xx_counter_enable_cpu(gpu, group, counterid, true);
+ return;
+ }
+
mutex_lock(&gpu->dev->struct_mutex);
+ /*
+ * If HW init hasn't run yet we can use the CPU to program the counter
+ * (and indeed we must because we can't submit commands to the
+ * GPU if it isn't initalized)
+ */
+ if (gpu->needs_hw_init) {
+ a5xx_counter_enable_cpu(gpu, group, counterid, true);
+ mutex_unlock(&gpu->dev->struct_mutex);
+ return;
+ }
+
/* Turn off preemption for the duration of this command */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
@@ -168,7 +189,7 @@ static void a5xx_counter_enable_pm4(struct msm_gpu *gpu,
* registers
*/
static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
struct adreno_counter *counter = &group->counters[counterid];
u32 reg;
@@ -192,7 +213,7 @@ static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu,
/* VBIF counters are selectable but have their own programming process */
static void a5xx_counter_enable_vbif(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
struct adreno_counter *counter = &group->counters[counterid];
@@ -208,7 +229,7 @@ static void a5xx_counter_enable_vbif(struct msm_gpu *gpu,
* use
*/
static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 1);
gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 0);
@@ -217,7 +238,7 @@ static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu,
/* GPMU always on counter needs to be enabled before use */
static void a5xx_counter_enable_alwayson_power(struct msm_gpu *gpu,
- struct adreno_counter_group *group, int counterid)
+ struct adreno_counter_group *group, int counterid, bool restore)
{
gpu_write(gpu, REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1);
}
@@ -228,6 +249,10 @@ static u64 a5xx_counter_read(struct msm_gpu *gpu,
if (counterid >= group->nr_counters)
return 0;
+ /* If the power is off, return the shadow value */
+ if (!pm_runtime_active(&gpu->pdev->dev))
+ return group->counters[counterid].value;
+
return gpu_read64(gpu, group->counters[counterid].lo,
group->counters[counterid].hi);
}
@@ -252,6 +277,77 @@ static void a5xx_counter_put(struct msm_gpu *gpu,
spin_unlock(&group->lock);
}
+static void a5xx_counter_group_enable(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, bool restore)
+{
+ int i;
+
+ if (!group || !group->funcs.enable)
+ return;
+
+ spin_lock(&group->lock);
+
+ for (i = 0; i < group->nr_counters; i++) {
+ if (!group->counters[i].refcount)
+ continue;
+
+ group->funcs.enable(gpu, group, i, restore);
+ }
+ spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_restore(struct msm_gpu *gpu,
+ struct adreno_counter_group *group)
+{
+ int i;
+
+ spin_lock(&group->lock);
+ for (i = 0; i < group->nr_counters; i++) {
+ struct adreno_counter *counter = &group->counters[i];
+ uint32_t bit, offset = counter->load_bit;
+
+ /* Don't load if the counter isn't active or can't be loaded */
+ if (!counter->refcount)
+ continue;
+
+ /*
+ * Each counter has a specific bit in one of four load command
+ * registers. Figure out which register / relative bit to use
+ * for the counter
+ */
+ bit = do_div(offset, 32);
+
+ /* Write the counter value */
+ gpu_write64(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI,
+ counter->value);
+
+ /*
+ * Write the load bit to load the counter - the command register
+ * will get reset to 0 after the operation completes
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 + offset,
+ (1 << bit));
+ }
+ spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_save(struct msm_gpu *gpu,
+ struct adreno_counter_group *group)
+{
+ int i;
+
+ spin_lock(&group->lock);
+ for (i = 0; i < group->nr_counters; i++) {
+ struct adreno_counter *counter = &group->counters[i];
+
+ if (counter->refcount > 0)
+ counter->value = gpu_read64(gpu, counter->lo,
+ counter->hi);
+ }
+ spin_unlock(&group->lock);
+}
+
static struct adreno_counter a5xx_counters_alwayson[1] = {
{ REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
REG_A5XX_RBBM_ALWAYSON_COUNTER_HI },
@@ -270,242 +366,242 @@ static struct adreno_counter a5xx_counters_ccu[] = {
static struct adreno_counter a5xx_counters_cmp[] = {
{ REG_A5XX_RBBM_PERFCTR_CMP_0_LO, REG_A5XX_RBBM_PERFCTR_CMP_0_HI,
- REG_A5XX_RB_PERFCTR_CMP_SEL_0 },
+ REG_A5XX_RB_PERFCTR_CMP_SEL_0, 94 },
{ REG_A5XX_RBBM_PERFCTR_CMP_1_LO, REG_A5XX_RBBM_PERFCTR_CMP_1_HI,
- REG_A5XX_RB_PERFCTR_CMP_SEL_1 },
+ REG_A5XX_RB_PERFCTR_CMP_SEL_1, 95 },
{ REG_A5XX_RBBM_PERFCTR_CMP_2_LO, REG_A5XX_RBBM_PERFCTR_CMP_2_HI,
- REG_A5XX_RB_PERFCTR_CMP_SEL_2 },
+ REG_A5XX_RB_PERFCTR_CMP_SEL_2, 96 },
{ REG_A5XX_RBBM_PERFCTR_CMP_3_LO, REG_A5XX_RBBM_PERFCTR_CMP_3_HI,
- REG_A5XX_RB_PERFCTR_CMP_SEL_3 },
+ REG_A5XX_RB_PERFCTR_CMP_SEL_3, 97 },
};
static struct adreno_counter a5xx_counters_cp[] = {
{ REG_A5XX_RBBM_PERFCTR_CP_0_LO, REG_A5XX_RBBM_PERFCTR_CP_0_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_0 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_0, 0 },
{ REG_A5XX_RBBM_PERFCTR_CP_1_LO, REG_A5XX_RBBM_PERFCTR_CP_1_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_1 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_1, 1},
{ REG_A5XX_RBBM_PERFCTR_CP_2_LO, REG_A5XX_RBBM_PERFCTR_CP_2_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_2 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_2, 2 },
{ REG_A5XX_RBBM_PERFCTR_CP_3_LO, REG_A5XX_RBBM_PERFCTR_CP_3_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_3 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_3, 3 },
{ REG_A5XX_RBBM_PERFCTR_CP_4_LO, REG_A5XX_RBBM_PERFCTR_CP_4_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_4 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_4, 4 },
{ REG_A5XX_RBBM_PERFCTR_CP_5_LO, REG_A5XX_RBBM_PERFCTR_CP_5_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_5 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_5, 5 },
{ REG_A5XX_RBBM_PERFCTR_CP_6_LO, REG_A5XX_RBBM_PERFCTR_CP_6_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_6 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_6, 6 },
{ REG_A5XX_RBBM_PERFCTR_CP_7_LO, REG_A5XX_RBBM_PERFCTR_CP_7_HI,
- REG_A5XX_CP_PERFCTR_CP_SEL_7 },
+ REG_A5XX_CP_PERFCTR_CP_SEL_7, 7 },
};
static struct adreno_counter a5xx_counters_hlsq[] = {
{ REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0, 28 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1, 29 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2, 30 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3, 31 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4, 32 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5, 33 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6, 34 },
{ REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI,
- REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 },
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7, 35 },
};
static struct adreno_counter a5xx_counters_lrz[] = {
{ REG_A5XX_RBBM_PERFCTR_LRZ_0_LO, REG_A5XX_RBBM_PERFCTR_LRZ_0_HI,
- REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 },
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0, 90 },
{ REG_A5XX_RBBM_PERFCTR_LRZ_1_LO, REG_A5XX_RBBM_PERFCTR_LRZ_1_HI,
- REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 },
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1, 91 },
{ REG_A5XX_RBBM_PERFCTR_LRZ_2_LO, REG_A5XX_RBBM_PERFCTR_LRZ_2_HI,
- REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 },
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2, 92 },
{ REG_A5XX_RBBM_PERFCTR_LRZ_3_LO, REG_A5XX_RBBM_PERFCTR_LRZ_3_HI,
- REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 },
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3, 93 },
};
static struct adreno_counter a5xx_counters_pc[] = {
{ REG_A5XX_RBBM_PERFCTR_PC_0_LO, REG_A5XX_RBBM_PERFCTR_PC_0_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_0 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_0, 12 },
{ REG_A5XX_RBBM_PERFCTR_PC_1_LO, REG_A5XX_RBBM_PERFCTR_PC_1_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_1 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_1, 13 },
{ REG_A5XX_RBBM_PERFCTR_PC_2_LO, REG_A5XX_RBBM_PERFCTR_PC_2_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_2 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_2, 14 },
{ REG_A5XX_RBBM_PERFCTR_PC_3_LO, REG_A5XX_RBBM_PERFCTR_PC_3_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_3 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_3, 15 },
{ REG_A5XX_RBBM_PERFCTR_PC_4_LO, REG_A5XX_RBBM_PERFCTR_PC_4_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_4 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_4, 16 },
{ REG_A5XX_RBBM_PERFCTR_PC_5_LO, REG_A5XX_RBBM_PERFCTR_PC_5_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_5 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_5, 17 },
{ REG_A5XX_RBBM_PERFCTR_PC_6_LO, REG_A5XX_RBBM_PERFCTR_PC_6_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_6 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_6, 18 },
{ REG_A5XX_RBBM_PERFCTR_PC_7_LO, REG_A5XX_RBBM_PERFCTR_PC_7_HI,
- REG_A5XX_PC_PERFCTR_PC_SEL_7 },
+ REG_A5XX_PC_PERFCTR_PC_SEL_7, 19 },
};
static struct adreno_counter a5xx_counters_ras[] = {
{ REG_A5XX_RBBM_PERFCTR_RAS_0_LO, REG_A5XX_RBBM_PERFCTR_RAS_0_HI,
- REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 },
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_0, 48 },
{ REG_A5XX_RBBM_PERFCTR_RAS_1_LO, REG_A5XX_RBBM_PERFCTR_RAS_1_HI,
- REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 },
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_1, 49 },
{ REG_A5XX_RBBM_PERFCTR_RAS_2_LO, REG_A5XX_RBBM_PERFCTR_RAS_2_HI,
- REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 },
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_2, 50 },
{ REG_A5XX_RBBM_PERFCTR_RAS_3_LO, REG_A5XX_RBBM_PERFCTR_RAS_3_HI,
- REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 },
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_3, 51 },
};
static struct adreno_counter a5xx_counters_rb[] = {
{ REG_A5XX_RBBM_PERFCTR_RB_0_LO, REG_A5XX_RBBM_PERFCTR_RB_0_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_0 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_0, 80 },
{ REG_A5XX_RBBM_PERFCTR_RB_1_LO, REG_A5XX_RBBM_PERFCTR_RB_1_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_1 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_1, 81 },
{ REG_A5XX_RBBM_PERFCTR_RB_2_LO, REG_A5XX_RBBM_PERFCTR_RB_2_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_2 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_2, 82 },
{ REG_A5XX_RBBM_PERFCTR_RB_3_LO, REG_A5XX_RBBM_PERFCTR_RB_3_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_3 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_3, 83 },
{ REG_A5XX_RBBM_PERFCTR_RB_4_LO, REG_A5XX_RBBM_PERFCTR_RB_4_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_4 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_4, 84 },
{ REG_A5XX_RBBM_PERFCTR_RB_5_LO, REG_A5XX_RBBM_PERFCTR_RB_5_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_5 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_5, 85 },
{ REG_A5XX_RBBM_PERFCTR_RB_6_LO, REG_A5XX_RBBM_PERFCTR_RB_6_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_6 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_6, 86 },
{ REG_A5XX_RBBM_PERFCTR_RB_7_LO, REG_A5XX_RBBM_PERFCTR_RB_7_HI,
- REG_A5XX_RB_PERFCTR_RB_SEL_7 },
+ REG_A5XX_RB_PERFCTR_RB_SEL_7, 87 },
};
static struct adreno_counter a5xx_counters_rbbm[] = {
{ REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI,
- REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 },
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 8 },
{ REG_A5XX_RBBM_PERFCTR_RBBM_1_LO, REG_A5XX_RBBM_PERFCTR_RBBM_1_HI,
- REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 },
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1, 9 },
{ REG_A5XX_RBBM_PERFCTR_RBBM_2_LO, REG_A5XX_RBBM_PERFCTR_RBBM_2_HI,
- REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 },
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2, 10 },
{ REG_A5XX_RBBM_PERFCTR_RBBM_3_LO, REG_A5XX_RBBM_PERFCTR_RBBM_3_HI,
- REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 },
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3, 11 },
};
static struct adreno_counter a5xx_counters_sp[] = {
{ REG_A5XX_RBBM_PERFCTR_SP_0_LO, REG_A5XX_RBBM_PERFCTR_SP_0_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_0 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_0, 68 },
{ REG_A5XX_RBBM_PERFCTR_SP_1_LO, REG_A5XX_RBBM_PERFCTR_SP_1_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_1 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_1, 69 },
{ REG_A5XX_RBBM_PERFCTR_SP_2_LO, REG_A5XX_RBBM_PERFCTR_SP_2_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_2 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_2, 70 },
{ REG_A5XX_RBBM_PERFCTR_SP_3_LO, REG_A5XX_RBBM_PERFCTR_SP_3_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_3 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_3, 71 },
{ REG_A5XX_RBBM_PERFCTR_SP_4_LO, REG_A5XX_RBBM_PERFCTR_SP_4_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_4 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_4, 72 },
{ REG_A5XX_RBBM_PERFCTR_SP_5_LO, REG_A5XX_RBBM_PERFCTR_SP_5_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_5 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_5, 73 },
{ REG_A5XX_RBBM_PERFCTR_SP_6_LO, REG_A5XX_RBBM_PERFCTR_SP_6_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_6 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_6, 74 },
{ REG_A5XX_RBBM_PERFCTR_SP_7_LO, REG_A5XX_RBBM_PERFCTR_SP_7_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_7 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_7, 75 },
{ REG_A5XX_RBBM_PERFCTR_SP_8_LO, REG_A5XX_RBBM_PERFCTR_SP_8_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_8 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_8, 76 },
{ REG_A5XX_RBBM_PERFCTR_SP_9_LO, REG_A5XX_RBBM_PERFCTR_SP_9_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_9 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_9, 77 },
{ REG_A5XX_RBBM_PERFCTR_SP_10_LO, REG_A5XX_RBBM_PERFCTR_SP_10_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_10 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_10, 78 },
{ REG_A5XX_RBBM_PERFCTR_SP_11_LO, REG_A5XX_RBBM_PERFCTR_SP_11_HI,
- REG_A5XX_SP_PERFCTR_SP_SEL_11 },
+ REG_A5XX_SP_PERFCTR_SP_SEL_11, 79 },
};
static struct adreno_counter a5xx_counters_tp[] = {
{ REG_A5XX_RBBM_PERFCTR_TP_0_LO, REG_A5XX_RBBM_PERFCTR_TP_0_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_0 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_0, 60 },
{ REG_A5XX_RBBM_PERFCTR_TP_1_LO, REG_A5XX_RBBM_PERFCTR_TP_1_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_1 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_1, 61 },
{ REG_A5XX_RBBM_PERFCTR_TP_2_LO, REG_A5XX_RBBM_PERFCTR_TP_2_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_2 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_2, 62 },
{ REG_A5XX_RBBM_PERFCTR_TP_3_LO, REG_A5XX_RBBM_PERFCTR_TP_3_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_3 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_3, 63 },
{ REG_A5XX_RBBM_PERFCTR_TP_4_LO, REG_A5XX_RBBM_PERFCTR_TP_4_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_4 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_4, 64 },
{ REG_A5XX_RBBM_PERFCTR_TP_5_LO, REG_A5XX_RBBM_PERFCTR_TP_5_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_5 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_5, 65 },
{ REG_A5XX_RBBM_PERFCTR_TP_6_LO, REG_A5XX_RBBM_PERFCTR_TP_6_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_6 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_6, 66 },
{ REG_A5XX_RBBM_PERFCTR_TP_7_LO, REG_A5XX_RBBM_PERFCTR_TP_7_HI,
- REG_A5XX_TPL1_PERFCTR_TP_SEL_7 },
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_7, 67 },
};
static struct adreno_counter a5xx_counters_tse[] = {
{ REG_A5XX_RBBM_PERFCTR_TSE_0_LO, REG_A5XX_RBBM_PERFCTR_TSE_0_HI,
- REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 },
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_0, 44 },
{ REG_A5XX_RBBM_PERFCTR_TSE_1_LO, REG_A5XX_RBBM_PERFCTR_TSE_1_HI,
- REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 },
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_1, 45 },
{ REG_A5XX_RBBM_PERFCTR_TSE_2_LO, REG_A5XX_RBBM_PERFCTR_TSE_2_HI,
- REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 },
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_2, 46 },
{ REG_A5XX_RBBM_PERFCTR_TSE_3_LO, REG_A5XX_RBBM_PERFCTR_TSE_3_HI,
- REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 },
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_3, 47 },
};
static struct adreno_counter a5xx_counters_uche[] = {
{ REG_A5XX_RBBM_PERFCTR_UCHE_0_LO, REG_A5XX_RBBM_PERFCTR_UCHE_0_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0, 52 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_1_LO, REG_A5XX_RBBM_PERFCTR_UCHE_1_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1, 53 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_2_LO, REG_A5XX_RBBM_PERFCTR_UCHE_2_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2, 54 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_3_LO, REG_A5XX_RBBM_PERFCTR_UCHE_3_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3, 55 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_4_LO, REG_A5XX_RBBM_PERFCTR_UCHE_4_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4, 56 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_5_LO, REG_A5XX_RBBM_PERFCTR_UCHE_5_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5, 57 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_6_LO, REG_A5XX_RBBM_PERFCTR_UCHE_6_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6, 58 },
{ REG_A5XX_RBBM_PERFCTR_UCHE_7_LO, REG_A5XX_RBBM_PERFCTR_UCHE_7_HI,
- REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 },
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7, 59 },
};
static struct adreno_counter a5xx_counters_vfd[] = {
{ REG_A5XX_RBBM_PERFCTR_VFD_0_LO, REG_A5XX_RBBM_PERFCTR_VFD_0_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_0 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_0, 20 },
{ REG_A5XX_RBBM_PERFCTR_VFD_1_LO, REG_A5XX_RBBM_PERFCTR_VFD_1_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_1 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_1, 21 },
{ REG_A5XX_RBBM_PERFCTR_VFD_2_LO, REG_A5XX_RBBM_PERFCTR_VFD_2_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_2 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_2, 22 },
{ REG_A5XX_RBBM_PERFCTR_VFD_3_LO, REG_A5XX_RBBM_PERFCTR_VFD_3_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_3 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_3, 23 },
{ REG_A5XX_RBBM_PERFCTR_VFD_4_LO, REG_A5XX_RBBM_PERFCTR_VFD_4_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_4 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_4, 24 },
{ REG_A5XX_RBBM_PERFCTR_VFD_5_LO, REG_A5XX_RBBM_PERFCTR_VFD_5_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_5 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_5, 25 },
{ REG_A5XX_RBBM_PERFCTR_VFD_6_LO, REG_A5XX_RBBM_PERFCTR_VFD_6_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_6 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_6, 26 },
{ REG_A5XX_RBBM_PERFCTR_VFD_7_LO, REG_A5XX_RBBM_PERFCTR_VFD_7_HI,
- REG_A5XX_VFD_PERFCTR_VFD_SEL_7 },
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_7, 27 },
};
static struct adreno_counter a5xx_counters_vpc[] = {
{ REG_A5XX_RBBM_PERFCTR_VPC_0_LO, REG_A5XX_RBBM_PERFCTR_VPC_0_HI,
- REG_A5XX_VPC_PERFCTR_VPC_SEL_0 },
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_0, 36 },
{ REG_A5XX_RBBM_PERFCTR_VPC_1_LO, REG_A5XX_RBBM_PERFCTR_VPC_1_HI,
- REG_A5XX_VPC_PERFCTR_VPC_SEL_1 },
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_1, 37 },
{ REG_A5XX_RBBM_PERFCTR_VPC_2_LO, REG_A5XX_RBBM_PERFCTR_VPC_2_HI,
- REG_A5XX_VPC_PERFCTR_VPC_SEL_2 },
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_2, 38 },
{ REG_A5XX_RBBM_PERFCTR_VPC_3_LO, REG_A5XX_RBBM_PERFCTR_VPC_3_HI,
- REG_A5XX_VPC_PERFCTR_VPC_SEL_3 },
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_3, 39 },
};
static struct adreno_counter a5xx_counters_vsc[] = {
{ REG_A5XX_RBBM_PERFCTR_VSC_0_LO, REG_A5XX_RBBM_PERFCTR_VSC_0_HI,
- REG_A5XX_VSC_PERFCTR_VSC_SEL_0 },
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_0, 88 },
{ REG_A5XX_RBBM_PERFCTR_VSC_1_LO, REG_A5XX_RBBM_PERFCTR_VSC_1_HI,
- REG_A5XX_VSC_PERFCTR_VSC_SEL_1 },
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_1, 89 },
};
static struct adreno_counter a5xx_counters_power_ccu[] = {
{ REG_A5XX_CCU_POWER_COUNTER_0_LO, REG_A5XX_CCU_POWER_COUNTER_0_HI,
- REG_A5XX_RB_POWERCTR_CCU_SEL_0 },
+ REG_A5XX_RB_POWERCTR_CCU_SEL_0, 40 },
{ REG_A5XX_CCU_POWER_COUNTER_1_LO, REG_A5XX_CCU_POWER_COUNTER_1_HI,
- REG_A5XX_RB_POWERCTR_CCU_SEL_1 },
+ REG_A5XX_RB_POWERCTR_CCU_SEL_1, 41 },
};
static struct adreno_counter a5xx_counters_power_cp[] = {
@@ -590,39 +686,47 @@ static struct adreno_counter a5xx_counters_alwayson_power[] = {
REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI },
};
-#define DEFINE_COUNTER_GROUP(_name, _array, _get, _enable, _put) \
-static struct adreno_counter_group _name = { \
- .counters = _array, \
- .nr_counters = ARRAY_SIZE(_array), \
+#define DEFINE_COUNTER_GROUP(_n, _a, _get, _enable, _put, _save, _restore) \
+static struct adreno_counter_group _n = { \
+ .counters = _a, \
+ .nr_counters = ARRAY_SIZE(_a), \
.lock = __SPIN_LOCK_UNLOCKED(_name.lock), \
.funcs = { \
.get = _get, \
.enable = _enable, \
.read = a5xx_counter_read, \
.put = _put, \
+ .save = _save, \
+ .restore = _restore \
}, \
}
-#define DEFAULT_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
- _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put)
+#define COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+ a5xx_counter_save, a5xx_counter_restore)
#define SPTP_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
- _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put)
+ _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put, \
+ a5xx_counter_save, a5xx_counter_restore)
+
+#define POWER_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+ NULL, NULL)
/* "standard" counters */
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz);
+COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp);
+COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm);
+COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc);
+COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd);
+COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc);
+COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu);
+COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp);
+COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse);
+COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras);
+COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche);
+COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb);
+COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc);
+COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz);
/* SP/TP counters */
SPTP_COUNTER_GROUP(a5xx_counter_group_hlsq, a5xx_counters_hlsq);
@@ -630,24 +734,27 @@ SPTP_COUNTER_GROUP(a5xx_counter_group_tp, a5xx_counters_tp);
SPTP_COUNTER_GROUP(a5xx_counter_group_sp, a5xx_counters_sp);
/* Power counters */
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp);
-DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche);
DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson, a5xx_counters_alwayson,
- a5xx_counter_get_fixed, NULL, NULL);
+ a5xx_counter_get_fixed, NULL, NULL, NULL, NULL);
DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif, a5xx_counters_vbif,
- a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put);
+ a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put,
+ NULL, NULL);
DEFINE_COUNTER_GROUP(a5xx_counter_group_gpmu, a5xx_counters_gpmu,
- a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put);
+ a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put,
+ NULL, NULL);
DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif_power, a5xx_counters_vbif_power,
- a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL);
+ a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL, NULL,
+ NULL);
DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson_power,
a5xx_counters_alwayson_power, a5xx_counter_get_fixed,
- a5xx_counter_enable_alwayson_power, NULL);
+ a5xx_counter_enable_alwayson_power, NULL, NULL, NULL);
static const struct adreno_counter_group *a5xx_counter_groups[] = {
[MSM_COUNTER_GROUP_ALWAYSON] = &a5xx_counter_group_alwayson,
@@ -680,6 +787,35 @@ static const struct adreno_counter_group *a5xx_counter_groups[] = {
&a5xx_counter_group_alwayson_power,
};
+void a5xx_counters_restore(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+ struct adreno_counter_group *group =
+ (struct adreno_counter_group *) a5xx_counter_groups[i];
+
+ if (group && group->funcs.restore)
+ group->funcs.restore(gpu, group);
+
+ a5xx_counter_group_enable(gpu, group, true);
+ }
+}
+
+
+void a5xx_counters_save(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+ struct adreno_counter_group *group =
+ (struct adreno_counter_group *) a5xx_counter_groups[i];
+
+ if (group && group->funcs.save)
+ group->funcs.save(gpu, group);
+ }
+}
+
int a5xx_counters_init(struct adreno_gpu *adreno_gpu)
{
adreno_gpu->counter_groups = a5xx_counter_groups;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 765c1c087c76..e493c2fee762 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,7 +15,6 @@
#include "msm_iommu.h"
#include "msm_trace.h"
#include "a5xx_gpu.h"
-#include <linux/clk/msm-clk.h>
#define SECURE_VA_START 0xc0000000
#define SECURE_VA_SIZE SZ_256M
@@ -1170,25 +1169,14 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
{
int ret;
- /*
- * Between suspend/resumes the GPU clocks need to be turned off
- * but not a complete power down, typically between frames. Set the
- * memory retention flags on the GPU core clock to retain memory
- * across clock toggles.
- */
- if (gpu->core_clk) {
- clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_PERIPH);
- clk_set_flags(gpu->core_clk, CLKFLAG_RETAIN_MEM);
- }
-
/* Turn on the core power */
ret = msm_gpu_pm_resume(gpu);
if (ret)
return ret;
- /* If we are already up, don't mess with what works */
- if (gpu->active_cnt > 1)
- return 0;
+
+ /* Restore all the counters before turning on the GPMU */
+ a5xx_counters_restore(gpu);
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
@@ -1220,33 +1208,26 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- /* Turn off the memory retention flag when not necessary */
- if (gpu->core_clk) {
- clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_PERIPH);
- clk_set_flags(gpu->core_clk, CLKFLAG_NORETAIN_MEM);
- }
-
- /* Only do this next bit if we are about to go down */
- if (gpu->active_cnt == 1) {
- /* Clear the VBIF pipe before shutting down */
-
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
- spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
== 0xF);
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
- /*
- * Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
- */
- if (adreno_is_a530(adreno_gpu)) {
- /* These only need to be done for A530 */
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ /* Save the counters before going down */
+ a5xx_counters_save(gpu);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
0x00000000);
- }
}
return msm_gpu_pm_suspend(gpu);
@@ -1266,29 +1247,10 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- bool enabled = test_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
-
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
-
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
-
- if (enabled)
- a5xx_set_hwcg(gpu, false);
-
adreno_show(gpu, m);
- if (enabled)
- a5xx_set_hwcg(gpu, true);
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index c30b65785ab6..9c62f861136d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -194,5 +194,7 @@ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
}
int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
+void a5xx_counters_save(struct msm_gpu *gpu);
+void a5xx_counters_restore(struct msm_gpu *gpu);
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 4e4709d6172f..4ecc3ad762ef 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -164,13 +164,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
- mutex_lock(&dev->struct_mutex);
- gpu->funcs->pm_resume(gpu);
- mutex_unlock(&dev->struct_mutex);
- disable_irq(gpu->irq);
-
- ret = gpu->funcs->hw_init(gpu);
+ pm_runtime_get_sync(&pdev->dev);
+ ret = msm_gpu_hw_init(gpu);
+ pm_runtime_put_sync_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
mutex_lock(&dev->struct_mutex);
@@ -178,10 +175,6 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
gpu->funcs->destroy(gpu);
gpu = NULL;
- } else {
- enable_irq(gpu->irq);
- /* give inactive pm a chance to kick in: */
- msm_gpu_retire(gpu);
}
}
@@ -250,12 +243,35 @@ static const struct of_device_id dt_match[] = {
{}
};
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 04e0056f2a49..d397c44f1203 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = gpu->gpufreq[gpu->active_level];
return 0;
case MSM_PARAM_TIMESTAMP:
- if (adreno_gpu->funcs->get_timestamp)
- return adreno_gpu->funcs->get_timestamp(gpu, value);
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
return -EINVAL;
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
@@ -68,14 +75,25 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name);
for (i = 0; i < gpu->nr_rings; i++) {
- int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
- &gpu->rb[i]->iova);
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ int ret = msm_gem_get_iova(ring->bo, gpu->aspace,
+ &ring->iova);
if (ret) {
- gpu->rb[i]->iova = 0;
+ ring->iova = 0;
dev_err(gpu->dev->dev,
"could not map ringbuffer %d: %d\n", i, ret);
return ret;
}
+
+ /* reset ringbuffer(s): */
+ /* No need for a lock here, nobody else is peeking in */
+ ring->cur = ring->start;
+ ring->next = ring->start;
+
+ /* reset completed fence seqno, discard anything pending: */
+ ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
+ ring->memptrs->rptr = 0;
}
/*
@@ -133,35 +151,22 @@ uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
void adreno_recover(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
- struct msm_ringbuffer *ring;
- int ret, i;
-
- gpu->funcs->pm_suspend(gpu);
-
- /* reset ringbuffer(s): */
-
- FOR_EACH_RING(gpu, ring, i) {
- if (!ring)
- continue;
+ int ret;
- /* No need for a lock here, nobody else is peeking in */
- ring->cur = ring->start;
- ring->next = ring->start;
+ /*
+ * XXX pm-runtime?? we *need* the device to be off after this
+ * so maybe continuing to call ->pm_suspend/resume() is better?
+ */
- /* reset completed fence seqno, discard anything pending: */
- ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
- ring->memptrs->rptr = 0;
- }
+ gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
- disable_irq(gpu->irq);
- ret = gpu->funcs->hw_init(gpu);
+ ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
- enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
@@ -520,6 +525,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -535,12 +544,18 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- release_firmware(gpu->pm4);
- release_firmware(gpu->pfp);
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+
+ release_firmware(adreno_gpu->pm4);
+ release_firmware(adreno_gpu->pfp);
- msm_gpu_cleanup(&gpu->base);
+ pm_runtime_disable(&pdev->dev);
+ msm_gpu_cleanup(gpu);
}
static void adreno_snapshot_os(struct msm_gpu *gpu,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c96189fb805b..462352f7fc9a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -87,8 +87,10 @@ struct adreno_counter {
u32 lo;
u32 hi;
u32 sel;
+ int load_bit;
u32 countable;
u32 refcount;
+ u64 value;
};
struct adreno_counter_group {
@@ -99,11 +101,15 @@ struct adreno_counter_group {
int (*get)(struct msm_gpu *,
struct adreno_counter_group *, u32, u32 *, u32 *);
void (*enable)(struct msm_gpu *,
- struct adreno_counter_group *, int);
+ struct adreno_counter_group *, int, bool);
u64 (*read)(struct msm_gpu *,
struct adreno_counter_group *, int);
void (*put)(struct msm_gpu *,
struct adreno_counter_group *, int);
+ void (*save)(struct msm_gpu *,
+ struct adreno_counter_group *);
+ void (*restore)(struct msm_gpu *,
+ struct adreno_counter_group *);
} funcs;
};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index c98f4511d644..fa111d581529 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -2153,6 +2153,8 @@ int sde_hdmi_get_property(struct drm_connector *connector,
mutex_lock(&hdmi_display->display_lock);
if (property_index == CONNECTOR_PROP_PLL_ENABLE)
*value = hdmi_display->pll_update_enable ? 1 : 0;
+ if (property_index == CONNECTOR_PROP_HDCP_VERSION)
+ *value = hdmi_display->sink_hdcp_ver;
mutex_unlock(&hdmi_display->display_lock);
return rc;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 672a9f188d27..865998c6a126 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -108,14 +108,34 @@ enum hdmi_tx_feature_type {
* @mode: Current display mode.
* @connected: If HDMI display is connected.
* @is_tpg_enabled: TPG state.
+ * @hdmi_tx_version: HDMI TX version
+ * @hdmi_tx_major_version: HDMI TX major version
+ * @max_pclk_khz: Max pixel clock supported
+ * @hdcp1_use_sw_keys: If HDCP1 engine uses SW keys
+ * @hdcp14_present: If the sink supports HDCP 1.4
+ * @hdcp22_present: If the sink supports HDCP 2.2
+ * @hdcp_status: Current HDCP status
+ * @sink_hdcp_ver: HDCP version of the sink
+ * @enc_lvl: Current encryption level
+ * @curr_hdr_state: Current HDR state of the HDMI connector
+ * @auth_state: Current authentication state of HDCP
+ * @sink_hdcp22_support: If the sink supports HDCP 2.2
+ * @src_hdcp22_support: If the source supports HDCP 2.2
+ * @hdcp_data: Call back data registered by the client with HDCP lib
+ * @hdcp_feat_data: Handle to HDCP feature data
+ * @hdcp_ops: Function ops registered by the client with the HDCP lib
+ * @ddc_ctrl: Handle to HDMI DDC Controller
* @hpd_work: HPD work structure.
* @codec_ready: If audio codec is ready.
* @client_notify_pending: If there is client notification pending.
* @irq_domain: IRQ domain structure.
+ * @notifier: CEC notifider to convey physical address information.
* @pll_update_enable: if it's allowed to update HDMI PLL ppm.
* @dc_enable: If deep color is enabled. Only DC_30 so far.
* @dc_feature_supported: If deep color feature is supported.
- * @notifier: CEC notifider to convey physical address information.
+ * @bt2020_colorimetry: If BT2020 colorimetry is supported by sink
+ * @hdcp_cb_work: Callback function for HDCP
+ * @io: Handle to IO base addresses for HDMI
* @root: Debug fs root entry.
*/
struct sde_hdmi {
@@ -146,6 +166,7 @@ struct sde_hdmi {
u32 hdcp14_present;
u32 hdcp22_present;
u8 hdcp_status;
+ u8 sink_hdcp_ver;
u32 enc_lvl;
u8 curr_hdr_state;
bool auth_state;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index e6b6d15b5fb7..0d93edb9201f 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -511,6 +511,11 @@ static void sde_hdmi_update_hdcp_info(struct drm_connector *connector)
}
}
+ if (display->sink_hdcp22_support)
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_22;
+ else
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_14;
+
/* update internal data about hdcp */
display->hdcp_data = fd;
display->hdcp_ops = ops;
@@ -543,6 +548,8 @@ static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
mutex_lock(&display->display_lock);
display->pll_update_enable = false;
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
+ display->sink_hdcp22_support = false;
mutex_unlock(&display->display_lock);
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 3c6b0f1b9dd4..421bdf7643ca 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -105,6 +105,10 @@
#define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3
#define SDE_HDMI_BT2020_COLORIMETRY 0x6
+#define SDE_HDMI_HDCP_22 0x22
+#define SDE_HDMI_HDCP_14 0x14
+#define SDE_HDMI_HDCP_NONE 0x0
+
/*
* Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
* read by the hardware
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7d660ba56594..9dbd86eff816 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -424,7 +424,7 @@ static struct hdmi_platform_config hdmi_tx_8994_config = {
static struct hdmi_platform_config hdmi_tx_8996_config = {
.phy_init = NULL,
HDMI_CFG(pwr_reg, none),
- HDMI_CFG(hpd_reg, none),
+ HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 969af4c6f0c0..b245a4c7c826 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -280,6 +280,10 @@ static int msm_unload(struct drm_device *dev)
if (gpu) {
mutex_lock(&dev->struct_mutex);
+ /*
+ * XXX what do we do here?
+ * pm_runtime_enable(&pdev->dev);
+ */
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&dev->struct_mutex);
gpu->funcs->destroy(gpu);
@@ -669,10 +673,10 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (ctx)
+ if (ctx) {
INIT_LIST_HEAD(&ctx->counters);
-
- msm_submitqueue_init(ctx);
+ msm_submitqueue_init(ctx);
+ }
file->driver_priv = ctx;
@@ -906,7 +910,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->show(gpu, m);
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
return 0;
@@ -2140,7 +2146,9 @@ static int msm_pdev_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
+#ifndef CONFIG_QCOM_KGSL
add_components(&pdev->dev, &match, "gpus");
+#endif
#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index ae3a930005b6..25dc5f9ef561 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -158,6 +158,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DST_H,
CONNECTOR_PROP_PLL_DELTA,
CONNECTOR_PROP_PLL_ENABLE,
+ CONNECTOR_PROP_HDCP_VERSION,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 7c109fdab545..44d9784d1bd7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -154,22 +154,9 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->gpu_pdev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (gpu->active_cnt++ > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt <= 0))
- return -EINVAL;
-
- WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0);
+ DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
@@ -186,25 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (gpu->aspace && gpu->aspace->mmu)
msm_mmu_enable(gpu->aspace->mmu);
+ gpu->needs_hw_init = true;
+
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->gpu_pdev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (--gpu->active_cnt > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt < 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
if (gpu->aspace && gpu->aspace->mmu)
msm_mmu_disable(gpu->aspace->mmu);
@@ -221,57 +199,23 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
- pm_runtime_put(&pdev->dev);
return 0;
}
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
- struct drm_device *dev = gpu->dev;
-
- if (gpu->inactive)
- return;
-
- DBG("%s: inactive!\n", gpu->name);
- mutex_lock(&dev->struct_mutex);
- if (!(msm_gpu_active(gpu) || gpu->inactive)) {
- disable_axi(gpu);
- disable_clk(gpu);
- gpu->inactive = true;
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void inactive_handler(unsigned long data)
-{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ int ret;
- queue_work(priv->wq, &gpu->inactive_work);
-}
+ if (!gpu->needs_hw_init)
+ return 0;
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- del_timer(&gpu->inactive_timer);
- if (gpu->inactive) {
- enable_clk(gpu);
- enable_axi(gpu);
- gpu->inactive = false;
- }
-}
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
-static void inactive_start(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- mod_timer(&gpu->inactive_timer,
- round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+ return ret;
}
static void retire_guilty_submit(struct msm_gpu *gpu,
@@ -306,8 +250,6 @@ static void recover_worker(struct work_struct *work)
struct msm_ringbuffer *ring;
int i;
- inactive_cancel(gpu);
-
/* Retire all events that have already passed */
FOR_EACH_RING(gpu, ring, i)
retire_submits(gpu, ring, ring->memptrs->fence);
@@ -316,6 +258,8 @@ static void recover_worker(struct work_struct *work)
/* Recover the GPU */
gpu->funcs->recover(gpu);
+ /* Decrement the device usage count for the guilty submit */
+ pm_runtime_put_sync_autosuspend(&gpu->pdev->dev);
/* Replay the remaining on all rings, highest priority first */
for (i = 0; i < gpu->nr_rings; i++) {
@@ -438,6 +382,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
@@ -451,6 +397,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
@@ -505,6 +452,8 @@ static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
trace_msm_retired(submit, ticks->started, ticks->retired);
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_free(submit);
}
}
@@ -550,9 +499,6 @@ static void retire_worker(struct work_struct *work)
_retire_ring(gpu, ring, ring->memptrs->fence);
mutex_unlock(&dev->struct_mutex);
}
-
- if (!msm_gpu_active(gpu))
- inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
@@ -574,7 +520,9 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
submit->fence = FENCE(submit->ring, ++ring->seqno);
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &ring->submits);
@@ -863,23 +811,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- /*
- * Set the inactive flag to false, so that when the retire worker
- * kicks in from the init path, it knows that it has to turn off the
- * clocks. This should be fine to do since this is the init sequence
- * and we have an init_lock in msm_open() to protect against bad things
- * from happening.
- */
- gpu->inactive = false;
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
- INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
- setup_timer(&gpu->inactive_timer, inactive_handler,
- (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
@@ -909,8 +846,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- pm_runtime_enable(&pdev->dev);
-
ret = get_clocks(pdev, gpu);
if (ret)
goto fail;
@@ -979,6 +914,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
bs_init(gpu);
@@ -1000,7 +937,6 @@ fail:
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
- pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1031,7 +967,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
}
msm_snapshot_destroy(gpu, gpu->snapshot);
- pm_runtime_disable(&pdev->dev);
msm_gpu_destroy_address_space(gpu->aspace);
msm_gpu_destroy_address_space(gpu->secure_aspace);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index eeebfb746f7f..deb12aed5b28 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -83,6 +83,7 @@ struct msm_gpu_funcs {
struct msm_gpu {
const char *name;
struct drm_device *dev;
+ struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
@@ -103,9 +104,8 @@ struct msm_gpu {
/* list of GEM active objects: */
struct list_head active_list;
- /* is gpu powered/active? */
- int active_cnt;
- bool inactive;
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
@@ -139,9 +139,7 @@ struct msm_gpu {
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
- struct timer_list inactive_timer;
- struct work_struct inactive_work;
+
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
@@ -255,6 +253,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 2ca91674a15a..6a741a7ce0f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -534,13 +534,7 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector,
idx = msm_property_index(&c_conn->property_info, property);
if (idx == CONNECTOR_PROP_RETIRE_FENCE)
- /*
- * Set a fence offset if not a virtual connector, so that the
- * fence signals after one additional commit rather than at the
- * end of the current one.
- */
- rc = sde_fence_create(&c_conn->retire_fence, val,
- c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ rc = sde_fence_create(&c_conn->retire_fence, val, 0);
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
@@ -933,6 +927,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
"PLL_ENABLE", 0x0, 0, 1, 0,
CONNECTOR_PROP_PLL_ENABLE);
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "HDCP_VERSION", 0x0, 0, U8_MAX, 0,
+ CONNECTOR_PROP_HDCP_VERSION);
+
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index f9b8c3966d74..0f563ac25da8 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -390,5 +390,22 @@ enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
*/
int sde_connector_get_dpms(struct drm_connector *connector);
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 0ba644d5519d..29e746e1fdf5 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,31 @@ static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
return false;
}
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ if (!crtc || !state || !perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -102,7 +127,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
sde_cstate = to_sde_crtc_state(state);
- bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
@@ -110,7 +137,7 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
}
@@ -126,11 +153,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
SDE_DEBUG("final threshold bw limit = %d\n", threshold);
if (!threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
@@ -138,26 +163,6 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
return 0;
}
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
- struct drm_crtc *crtc,
- struct sde_core_perf_params *perf)
-{
- struct sde_crtc_state *sde_cstate;
-
- sde_cstate = to_sde_crtc_state(crtc->state);
- memset(perf, 0, sizeof(struct sde_core_perf_params));
-
- perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
- perf->max_per_pipe_ib =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
- perf->core_clk_rate =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
- SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
- crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
-}
-
static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
struct drm_crtc *crtc, struct sde_core_perf_params *perf,
bool nrt_client, u32 core_clk)
@@ -175,13 +180,13 @@ static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
to_sde_crtc_state(tmp_crtc->state);
perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
- sde_cstate->cur_perf.max_per_pipe_ib);
+ sde_cstate->new_perf.max_per_pipe_ib);
- bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
SDE_DEBUG("crtc=%d bw=%llu\n",
tmp_crtc->base.id,
- sde_cstate->cur_perf.bw_ctl);
+ sde_cstate->new_perf.bw_ctl);
}
}
@@ -249,6 +254,7 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
@@ -263,6 +269,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
return;
}
+ sde_crtc = to_sde_crtc(crtc);
sde_cstate = to_sde_crtc_state(crtc->state);
/* only do this for command panel or writeback */
@@ -285,8 +292,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_cstate->cur_perf.bw_ctl = 0;
- sde_cstate->new_perf.bw_ctl = 0;
+ sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
_sde_core_perf_crtc_update_bus(kms, crtc, 0);
}
@@ -298,18 +304,27 @@ static int _sde_core_select_clk_lvl(struct sde_kms *kms,
return clk_round_rate(kms->perf.core_clk, clk_rate);
}
-static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+ struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
{
u32 clk_rate = 0;
- struct drm_crtc *crtc;
+ struct drm_crtc *tmp_crtc;
struct sde_crtc_state *sde_cstate;
int ncrtc = 0;
+ u32 tmp_rate;
+
+ drm_for_each_crtc(tmp_crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
- drm_for_each_crtc(crtc, kms->dev) {
- if (_sde_core_perf_crtc_is_power_on(crtc)) {
- sde_cstate = to_sde_crtc_state(crtc->state);
- clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
- clk_rate);
+ if (crtc->base.id == tmp_crtc->base.id) {
+ /* for current CRTC, use the cached value */
+ tmp_rate = crct_perf->core_clk_rate;
+ } else {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+ tmp_rate = sde_cstate->new_perf.core_clk_rate;
+ }
+
+ clk_rate = max(tmp_rate, clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
}
ncrtc++;
@@ -353,13 +368,20 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
SDE_ATRACE_BEGIN(__func__);
- old = &sde_cstate->cur_perf;
- new = &sde_cstate->new_perf;
+ /*
+ * cache the performance numbers in the crtc prior to the
+ * crtc kickoff, so the same numbers are used during the
+ * perf update that happens post kickoff.
+ */
+
+ if (params_changed)
+ memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+ sizeof(struct sde_core_perf_params));
- if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- if (params_changed)
- _sde_core_perf_calc_crtc(kms, crtc, new);
+ old = &sde_crtc->cur_perf;
+ new = &sde_crtc->new_perf;
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote or writeback output vote
@@ -398,7 +420,7 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
* use the new clock for the rotator bw calculation.
*/
if (update_clk)
- clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
if (update_bus)
_sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
@@ -409,7 +431,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
*/
if (update_clk) {
SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
- SDE_EVT32(kms->dev, stop_req, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+ old->core_clk_rate, new->core_clk_rate);
+
ret = sde_power_clk_set_rate(&priv->phandle,
kms->perf.clk_name, clk_rate);
if (ret) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a0417a0dd12e..2a31bc7fedc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -204,10 +204,15 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
idx = left_crtc_zpos_cnt[pstate->stage]++;
}
+ /* stage plane on right LM if it crosses the boundary */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+
/*
* program each mixer with two hw pipes in dual mixer mode,
*/
- if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS && lm_right) {
stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
sde_plane_pipe(plane, 1);
@@ -218,10 +223,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
sde_plane_pipe(plane, lm_idx ? 1 : 0));
- /* stage plane on right LM if it crosses the boundary */
- lm_right = (lm_idx == LEFT_MIXER) &&
- (plane->state->crtc_x + plane->state->crtc_w >
- crtc_split_width);
+
stage_cfg->stage[lm_idx][pstate->stage][idx] =
sde_plane_pipe(plane, lm_idx ? 1 : 0);
@@ -471,6 +473,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
struct sde_crtc_frame_event *fevent;
struct drm_crtc *crtc;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
@@ -480,13 +483,14 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
}
fevent = container_of(work, struct sde_crtc_frame_event, work);
- if (!fevent->crtc) {
+ if (!fevent->crtc || !fevent->crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
crtc = fevent->crtc;
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
sde_kms = _sde_crtc_get_kms(crtc);
if (!sde_kms) {
@@ -520,6 +524,9 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
} else {
SDE_EVT32(DRMID(crtc), fevent->event, 2);
}
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
ktime_to_ns(fevent->ts),
@@ -1674,19 +1681,28 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
+ bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
} else {
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(
+ cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
int offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
- ret = sde_fence_create(
- &sde_crtc->output_fence, val, offset);
+ ret = sde_fence_create(&sde_crtc->output_fence, val,
+ offset + conn_offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
@@ -1872,15 +1888,18 @@ static const struct file_operations __prefix ## _fops = { \
static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "is_rt: %d\n", cstate->is_rt);
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
- seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n",
+ sde_crtc->cur_perf.core_clk_rate);
seq_printf(s, "max_per_pipe_ib: %llu\n",
- cstate->cur_perf.max_per_pipe_ib);
+ sde_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0eed61580cd8..200073995d43 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -95,6 +95,8 @@ struct sde_crtc_frame_event {
* @frame_event_list : available frame event list
* @pending : Whether any page-flip events are pending signal
* @spin_lock : spin lock for frame event, transaction status, etc...
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @new_perf : new performance committed to clock/bandwidth driver
*/
struct sde_crtc {
struct drm_crtc base;
@@ -134,6 +136,9 @@ struct sde_crtc {
struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -148,6 +153,7 @@ struct sde_crtc {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -161,7 +167,6 @@ struct sde_crtc_state {
uint64_t input_fence_timeout_ns;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
- struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8d821e43afa5..34a32d79f22c 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -69,7 +69,7 @@
*
* This is disabled by default.
*/
-static bool sdecustom;
+static bool sdecustom = true;
module_param(sdecustom, bool, 0400);
MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6e2ccfa8e428..9cbee5243e6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1237,8 +1237,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
bool q16_data = true;
int idx;
struct sde_phy_plane *pp;
- uint32_t num_of_phy_planes = 0, maxlinewidth = 0xFFFF;
+ uint32_t num_of_phy_planes = 0;
int mode = 0;
+ uint32_t crtc_split_width;
+ bool is_across_mixer_boundary = false;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -1252,6 +1254,7 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
pstate = to_sde_plane_state(plane->state);
crtc = state->crtc;
+ crtc_split_width = get_crtc_split_width(crtc);
fb = state->fb;
if (!crtc || !fb) {
SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
@@ -1348,17 +1351,17 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
}
}
- list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
- if (maxlinewidth > pp->pipe_sblk->maxlinewidth)
- maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
num_of_phy_planes++;
- }
/*
* Only need to use one physical plane if plane width is still within
* the limitation.
*/
- if (maxlinewidth >= (src.x + src.w))
+ is_across_mixer_boundary = (plane->state->crtc_x < crtc_split_width) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+ if (crtc_split_width >= (src.x + src.w) && !is_across_mixer_boundary)
num_of_phy_planes = 1;
if (num_of_phy_planes > 1) {
@@ -1369,9 +1372,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
/* Adjust offset for multi-pipe */
- src.x += src.w * pp->index;
- dst.x += dst.w * pp->index;
-
+ if (num_of_phy_planes > 1) {
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+ }
pp->pipe_cfg.src_rect = src;
pp->pipe_cfg.dst_rect = dst;
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index d08cf13c448d..c2d29a084c7f 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -1201,9 +1201,16 @@ static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
if (rc)
goto error;
- /* do not proceed further if no device connected */
- if (!hdcp->current_tp.dev_count)
+ /*
+ * Do not proceed further if no device connected
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ */
+
+ if (!hdcp->current_tp.dev_count) {
+ rc = -EINVAL;
goto error;
+ }
rc = sde_hdcp_1x_write_ksv_fifo(hdcp);
} while (--v_retry && rc);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcfaff3c..4896474da320 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -180,6 +180,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429050c0..5d8dfe027b30 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 3fb13c7a0814..78f74b883877 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -65,8 +65,8 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
};
static void a5xx_irq_storm_worker(struct work_struct *work);
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor);
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor);
static void a5xx_gpmu_reset(struct work_struct *work);
static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
@@ -709,6 +709,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
if (data[1] != GPMU_FIRMWARE_ID)
goto err;
ret = _read_fw2_block_header(&data[2],
+ data[0] - 2,
GPMU_FIRMWARE_ID,
adreno_dev->gpucore->gpmu_major,
adreno_dev->gpucore->gpmu_minor);
@@ -1231,8 +1232,8 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
}
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor)
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor)
{
uint32_t header_size;
int i = 1;
@@ -1242,7 +1243,8 @@ static int _read_fw2_block_header(uint32_t *header, uint32_t id,
header_size = header[0];
/* Headers have limited size and always occur as pairs of words */
- if (header_size > MAX_HEADER_SIZE || header_size % 2)
+ if (header_size > MAX_HEADER_SIZE || header_size >= remain ||
+ header_size % 2 || header_size == 0)
return -EINVAL;
/* Sequences must have an identifying id first thing in their header */
if (id == GPMU_SEQUENCE_ID) {
@@ -1306,8 +1308,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct firmware *fw;
- uint32_t block_size = 0, block_total = 0, fw_size;
- uint32_t *block;
+ uint64_t block_size = 0, block_total = 0;
+ uint32_t fw_size, *block;
int ret = -EINVAL;
if (!adreno_dev->gpucore->regfw_name)
@@ -1329,7 +1331,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* All offset numbers calculated from file description */
while (block_total < fw_size) {
block_size = block[0];
- if (block_size >= fw_size || block_size < 2)
+ if (((block_total + block_size) >= fw_size)
+ || block_size < 5)
goto err;
if (block[1] != GPMU_SEQUENCE_ID)
goto err;
@@ -1337,6 +1340,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* For now ignore blocks other than the LM sequence */
if (block[4] == LM_SEQUENCE_ID) {
ret = _read_fw2_block_header(&block[2],
+ block_size - 2,
GPMU_SEQUENCE_ID,
adreno_dev->gpucore->lm_major,
adreno_dev->gpucore->lm_minor);
@@ -1344,6 +1348,9 @@ static void _load_regfile(struct adreno_device *adreno_dev)
goto err;
adreno_dev->lm_fw = fw;
+
+ if (block[2] > (block_size - 2))
+ goto err;
adreno_dev->lm_sequence = block + block[2] + 3;
adreno_dev->lm_size = block_size - block[2] - 2;
}
@@ -1356,7 +1363,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
err:
release_firmware(fw);
KGSL_PWR_ERR(device,
- "Register file failed to load sz=%d bsz=%d header=%d\n",
+ "Register file failed to load sz=%d bsz=%llu header=%d\n",
fw_size, block_size, ret);
return;
}
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 496fc6a9248e..09effbd39a9c 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -765,6 +765,8 @@ static void _a5xx_do_crashdump(struct kgsl_device *device)
crash_dump_valid = false;
+ if (!device->snapshot_crashdumper)
+ return;
if (capturescript.gpuaddr == 0 || registers.gpuaddr == 0)
return;
@@ -870,8 +872,7 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
ARRAY_SIZE(a5xx_vbif_snapshot_registers));
/* Try to run the crash dumper */
- if (device->snapshot_crashdumper)
- _a5xx_do_crashdump(device);
+ _a5xx_do_crashdump(device);
kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
snapshot, a5xx_snapshot_registers, NULL);
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index c46d5ee3c468..de4ba83903f9 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2742,6 +2742,10 @@ static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
int cacheop;
int mode;
+ /* Cache ops are not allowed on secure memory */
+ if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
+ return 0;
+
/*
* Flush is defined as (clean | invalidate). If both bits are set, then
* do a flush, otherwise check for the individual bits and clean or inv
@@ -3439,6 +3443,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
return 0;
}
+/* entry->bind_lock must be held by the caller */
static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
uint64_t v_offset,
struct kgsl_memdesc *memdesc,
@@ -3467,10 +3472,16 @@ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
parent = *node;
this = rb_entry(parent, struct sparse_bind_object, node);
- if (new->v_off < this->v_off)
+ if ((new->v_off < this->v_off) &&
+ ((new->v_off + new->size) <= this->v_off))
node = &parent->rb_left;
- else if (new->v_off > this->v_off)
+ else if ((new->v_off > this->v_off) &&
+ (new->v_off >= (this->v_off + this->size)))
node = &parent->rb_right;
+ else {
+ kfree(new);
+ return -EADDRINUSE;
+ }
}
rb_link_node(&new->node, parent, node);
@@ -3691,8 +3702,11 @@ static int _sparse_bind(struct kgsl_process_private *process,
return ret;
}
+ spin_lock(&virt_entry->bind_lock);
ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
p_offset, size, flags);
+ spin_unlock(&virt_entry->bind_lock);
+
if (ret == 0)
memdesc->cur_bindings += size / PAGE_SIZE;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 8c998a5d791b..c3b3ccf48a7b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -2181,9 +2181,11 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].gpu_freq, clocks[0]);
- kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
- clk_round_rate(pwr->grp_clks[6], KGSL_RBBMTIMER_CLK_FREQ),
- clocks[6]);
+ if (pwr->grp_clks[6] != NULL)
+ kgsl_pwrctrl_clk_set_rate(pwr->grp_clks[6],
+ clk_round_rate(pwr->grp_clks[6],
+ KGSL_RBBMTIMER_CLK_FREQ),
+ clocks[6]);
_isense_clk_set_rate(pwr, pwr->num_pwrlevels - 1);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index d57a2f75dccf..32c6a40a408f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -72,6 +72,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..639d1a9c8793 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..4b58e8aaf5c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 28ab4e52dab5..b3aa73f1a5a1 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -192,8 +193,7 @@
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
#define FG_RR_ADC_STS_CHANNEL_STS 0x2
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50
#define FG_RR_CONV_MAX_RETRY_CNT 50
#define FG_RR_TP_REV_VERSION1 21
#define FG_RR_TP_REV_VERSION2 29
@@ -235,6 +235,7 @@ struct rradc_chip {
struct device_node *revid_dev_node;
struct pmic_revid_data *pmic_fab_id;
int volt;
+ struct power_supply *usb_trig;
};
struct rradc_channels {
@@ -726,6 +727,24 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip)
return rc;
}
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+ union power_supply_propval pval;
+ int rc;
+ bool usb_present = false;
+
+ if (!chip->usb_trig) {
+ pr_debug("USB property not present\n");
+ return usb_present;
+ }
+
+ rc = power_supply_get_property(chip->usb_trig,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+
+ return usb_present;
+}
+
static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf, u16 status)
{
@@ -745,8 +764,18 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
pr_debug("%s is not ready; nothing to read:0x%x\n",
rradc_chans[prop->channel].datasheet_name, buf[0]);
- usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
- FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+
+ if (((prop->channel == RR_ADC_CHG_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TEMP) ||
+ (prop->channel == RR_ADC_USBIN_I) ||
+ (prop->channel == RR_ADC_DIE_TEMP)) &&
+ ((!rradc_is_usb_present(chip)))) {
+ pr_debug("USB not present for %d\n", prop->channel);
+ rc = -ENODATA;
+ break;
+ }
+
+ msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
retry_cnt++;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
@@ -764,7 +793,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf)
{
- int rc = 0;
+ int rc = 0, ret = 0;
u16 status = 0;
rc = rradc_enable_continuous_mode(chip);
@@ -777,23 +806,25 @@ static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
pr_err("status read failed:%d\n", rc);
- return rc;
+ ret = rc;
+ goto disable;
}
rc = rradc_check_status_ready_with_retry(chip, prop,
buf, status);
if (rc < 0) {
pr_err("Status read failed:%d\n", rc);
- return rc;
+ ret = rc;
}
+disable:
rc = rradc_disable_continuous_mode(chip);
if (rc < 0) {
pr_err("Failed to switch to non continuous mode\n");
- return rc;
+ ret = rc;
}
- return rc;
+ return ret;
}
static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
@@ -1149,6 +1180,10 @@ static int rradc_probe(struct platform_device *pdev)
indio_dev->channels = chip->iio_chans;
indio_dev->num_channels = chip->nchannels;
+ chip->usb_trig = power_supply_get_by_name("usb");
+ if (!chip->usb_trig)
+ pr_debug("Error obtaining usb power supply\n");
+
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index ce6ff9b301bb..7e2dc5e56632 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -381,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5be14ad29d46..dbf09836ff30 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -905,6 +905,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
},
},
{
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ },
+ {
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 08bfb83a9447..06f7f569f417 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -105,7 +105,7 @@ static void fts_interrupt_enable(struct fts_ts_info *info);
static int fts_init_hw(struct fts_ts_info *info);
static int fts_mode_handler(struct fts_ts_info *info, int force);
static int fts_command(struct fts_ts_info *info, unsigned char cmd);
-
+static void fts_unblank(struct fts_ts_info *info);
static int fts_chip_initialization(struct fts_ts_info *info);
void touch_callback(unsigned int status)
@@ -1487,8 +1487,12 @@ static int fts_init(struct fts_ts_info *info)
error = fts_interrupt_install(info);
- if (error != OK)
+ if (error != OK) {
logError(1, "%s Init (1) error (ERROR = %08X)\n", error);
+ return error;
+ }
+
+ fts_unblank(info);
return error;
}
@@ -1773,6 +1777,26 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va
}
+static void fts_unblank(struct fts_ts_info *info)
+{
+ int i;
+
+ for (i = 0; i < TOUCH_ID_MAX; i++) {
+ input_mt_slot(info->input_dev, i);
+ input_mt_report_slot_state(info->input_dev,
+ (i < FINGER_MAX) ? MT_TOOL_FINGER : MT_TOOL_PEN, 0);
+ }
+ input_sync(info->input_dev);
+
+ info->resume_bit = 1;
+
+ fts_mode_handler(info, 0);
+
+ info->sensor_sleep = false;
+
+ fts_enableInterrupt();
+}
+
static struct notifier_block fts_noti_block = {
.notifier_call = fts_fb_state_chg_callback,
};
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b30739de79e7..6317478916ef 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1243,6 +1243,7 @@ static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
/* pages will be freed later (after being unassigned) */
+ list_del(&it->list);
kfree(it);
}
}
@@ -1956,10 +1957,20 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
-
arm_smmu_disable_clocks(smmu_domain->smmu);
+ if (smmu_domain->pgtbl_ops) {
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+ /* unassign any freed page table memory */
+ if (arm_smmu_is_master_side_secure(smmu_domain)) {
+ arm_smmu_secure_domain_lock(smmu_domain);
+ arm_smmu_secure_pool_destroy(smmu_domain);
+ arm_smmu_unassign_table(smmu_domain);
+ arm_smmu_secure_domain_unlock(smmu_domain);
+ }
+ smmu_domain->pgtbl_ops = NULL;
+ }
+
free_irqs:
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..6f1dbd52ec91 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 620268b63b2a..966227a3df1a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -596,9 +596,18 @@ config LEDS_QPNP
LEDs in both PWM and light pattern generator (LPG) modes. For older
PMICs, it also supports WLEDs and flash LEDs.
+config LEDS_QPNP_FLASH
+ tristate "Support for QPNP Flash LEDs"
+ depends on LEDS_CLASS && SPMI
+ help
+ This driver supports the flash LED functionality of Qualcomm
+ Technologies, Inc. QPNP PMICs. This driver supports PMICs up through
+ PM8994. It can configure the flash LED target current for several
+ independent channels.
+
config LEDS_QPNP_FLASH_V2
tristate "Support for QPNP V2 Flash LEDs"
- depends on LEDS_CLASS && MFD_SPMI_PMIC
+ depends on LEDS_CLASS && MFD_SPMI_PMIC && !LEDS_QPNP_FLASH
help
This driver supports the flash V2 LED functionality of Qualcomm
Technologies, Inc. QPNP PMICs. This driver supports PMICs starting
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aa5ba0cf4de6..8d8ba9175810 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
new file mode 100644
index 000000000000..cd76941b87ca
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -0,0 +1,2683 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/leds-qpnp-flash.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "leds.h"
+
+#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURRENT(base) (base + 0x41)
+#define FLASH_LED0_CURRENT(base) (base + 0x42)
+#define FLASH_LED1_CURRENT(base) (base + 0x43)
+#define FLASH_CLAMP_CURRENT(base) (base + 0x44)
+#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_THERMAL_DRATE(base) (base + 0x52)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A)
+#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C)
+#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D)
+#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0)
+#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA)
+#define FLASH_TORCH(base) (base + 0xE4)
+
+#define FLASH_STATUS_REG_MASK 0xFF
+#define FLASH_LED_FAULT_STATUS(base) (base + 0x08)
+#define INT_LATCHED_STS(base) (base + 0x18)
+#define IN_POLARITY_HIGH(base) (base + 0x12)
+#define INT_SET_TYPE(base) (base + 0x11)
+#define INT_EN_SET(base) (base + 0x15)
+#define INT_LATCHED_CLR(base) (base + 0x14)
+
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_STARTUP_DLY_MASK 0x03
+#define FLASH_VREG_OK_FORCE_MASK 0xC0
+#define FLASH_FAULT_DETECT_MASK 0x80
+#define FLASH_THERMAL_DERATE_MASK 0xBF
+#define FLASH_SECURE_MASK 0xFF
+#define FLASH_TORCH_MASK 0x03
+#define FLASH_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_MODULE_ENABLE_MASK 0xE0
+#define FLASH_STROBE_MASK 0xC0
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+#define FLASH_VPH_PWR_DROOP_MASK 0xF3
+#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
+#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
+#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
+
+#define FLASH_LED_TRIGGER_DEFAULT "none"
+#define FLASH_LED_HEADROOM_DEFAULT_MV 500
+#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128
+#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200
+#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80
+#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3
+#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3
+#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200
+#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10
+#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2
+#define FLASH_RAMP_UP_DELAY_US_MIN 1000
+#define FLASH_RAMP_UP_DELAY_US_MAX 1001
+#define FLASH_RAMP_DN_DELAY_US_MIN 2160
+#define FLASH_RAMP_DN_DELAY_US_MAX 2161
+#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000
+#define FLASH_TORCH_MAX_LEVEL 0x0F
+#define FLASH_MAX_LEVEL 0x4F
+#define FLASH_LED_FLASH_HW_VREG_OK 0x40
+#define FLASH_LED_FLASH_SW_VREG_OK 0x80
+#define FLASH_LED_STROBE_TYPE_HW 0x04
+#define FLASH_DURATION_DIVIDER 10
+#define FLASH_LED_HEADROOM_DIVIDER 100
+#define FLASH_LED_HEADROOM_OFFSET 2
+#define FLASH_LED_MAX_CURRENT_MA 1000
+#define FLASH_LED_THERMAL_THRESHOLD_MIN 95
+#define FLASH_LED_THERMAL_DEVIDER 10
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500
+#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100
+#define FLASH_LED_HDRM_SNS_ENABLE 0x81
+#define FLASH_LED_HDRM_SNS_DISABLE 0x01
+#define FLASH_LED_UA_PER_MA 1000
+#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20
+#define FLASH_LED_MASK3_ENABLE_SHIFT 7
+#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60
+#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000
+#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001
+#define FLASH_LED_OPEN_FAULT_DETECTED 0xC
+
+#define FLASH_UNLOCK_SECURE 0xA5
+#define FLASH_LED_TORCH_ENABLE 0x00
+#define FLASH_LED_TORCH_DISABLE 0x03
+#define FLASH_MODULE_ENABLE 0x80
+#define FLASH_LED0_TRIGGER 0x80
+#define FLASH_LED1_TRIGGER 0x40
+#define FLASH_LED0_ENABLEMENT 0x40
+#define FLASH_LED1_ENABLEMENT 0x20
+#define FLASH_LED_DISABLE 0x00
+#define FLASH_LED_MIN_CURRENT_MA 13
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+/*
+ * ID represents physical LEDs for individual control purpose.
+ */
+enum flash_led_id {
+ FLASH_LED_0 = 0,
+ FLASH_LED_1,
+ FLASH_LED_SWITCH,
+};
+
+enum flash_led_type {
+ FLASH = 0,
+ TORCH,
+ SWITCH,
+};
+
+enum thermal_derate_rate {
+ RATE_1_PERCENT = 0,
+ RATE_1P25_PERCENT,
+ RATE_2_PERCENT,
+ RATE_2P5_PERCENT,
+ RATE_5_PERCENT,
+};
+
+enum current_ramp_steps {
+ RAMP_STEP_0P2_US = 0,
+ RAMP_STEP_0P4_US,
+ RAMP_STEP_0P8_US,
+ RAMP_STEP_1P6_US,
+ RAMP_STEP_3P3_US,
+ RAMP_STEP_6P7_US,
+ RAMP_STEP_13P5_US,
+ RAMP_STEP_27US,
+};
+
+struct flash_regulator_data {
+ struct regulator *regs;
+ const char *reg_name;
+ u32 max_volt_uv;
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct flash_regulator_data *reg_data;
+ u16 max_current;
+ u16 prgm_current;
+ u16 prgm_current2;
+ u16 duration;
+ u8 id;
+ u8 type;
+ u8 trigger;
+ u8 enable;
+ u8 num_regulators;
+ bool flash_on;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+ unsigned int temp_threshold_num;
+ unsigned int temp_derate_curr_num;
+ unsigned int *die_temp_derate_curr_ma;
+ unsigned int *die_temp_threshold_degc;
+ u16 ramp_up_step;
+ u16 ramp_dn_step;
+ u16 vph_pwr_droop_threshold;
+ u16 headroom;
+ u16 clamp_current;
+ u8 thermal_derate_threshold;
+ u8 vph_pwr_droop_debounce_time;
+ u8 startup_dly;
+ u8 thermal_derate_rate;
+ bool pmic_charger_support;
+ bool self_check_en;
+ bool thermal_derate_en;
+ bool current_ramp_en;
+ bool vph_pwr_droop_en;
+ bool hdrm_sns_ch0_en;
+ bool hdrm_sns_ch1_en;
+ bool power_detect_en;
+ bool mask3_en;
+ bool follow_rb_disable;
+ bool die_current_derate_en;
+};
+
+struct qpnp_flash_led_buffer {
+ struct mutex debugfs_lock; /* Prevent thread concurrency */
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ char data[0];
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+ struct pmic_revid_data *revid_data;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct flash_led_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct flash_node_data *flash_node;
+ struct power_supply *battery_psy;
+ struct workqueue_struct *ordered_workq;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct mutex flash_led_lock;
+ struct qpnp_flash_led_buffer *log;
+ struct dentry *dbgfs_root;
+ int num_leds;
+ u32 buffer_cnt;
+ u16 base;
+ u16 current_addr;
+ u16 current2_addr;
+ u8 peripheral_type;
+ u8 fault_reg;
+ bool gpio_enabled;
+ bool charging_enabled;
+ bool strobe_debug;
+ bool dbg_feature_en;
+ bool open_fault;
+};
+
+static u8 qpnp_flash_led_ctrl_dbg_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D,
+};
+
+static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
+ struct file *file)
+{
+ struct qpnp_flash_led_buffer *log;
+ size_t logbufsize = SZ_4K;
+
+ log = kzalloc(logbufsize, GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+ mutex_init(&log->debugfs_lock);
+ led->log = log;
+
+ led->buffer_cnt = 1;
+ file->private_data = led;
+
+ return 0;
+}
+
+static int flash_led_dfs_open(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = inode->i_private;
+
+ return flash_led_dbgfs_file_open(led, file);
+}
+
+static int flash_led_dfs_close(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = file->private_data;
+
+ if (led && led->log) {
+ file->private_data = NULL;
+ mutex_destroy(&led->log->debugfs_lock);
+ kfree(led->log);
+ }
+
+ return 0;
+}
+
+#define MIN_BUFFER_WRITE_LEN 20
+static int print_to_log(struct qpnp_flash_led_buffer *log,
+ const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *log_buf;
+ size_t size = log->len - log->wpos;
+
+ if (size < MIN_BUFFER_WRITE_LEN)
+ return 0; /* not enough buffer left */
+
+ log_buf = &log->data[log->wpos];
+ va_start(args, fmt);
+ cnt = vscnprintf(log_buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led *led = fp->private_data;
+ struct qpnp_flash_led_buffer *log = led->log;
+ uint val;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && led->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ rc = regmap_read(led->regmap, INT_LATCHED_STS(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ INT_LATCHED_STS(led->base), rc);
+ goto unlock_mutex;
+ }
+ led->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", val);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led *led = fp->private_data;
+ struct qpnp_flash_led_buffer *log = led->log;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && led->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ led->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", led->fault_reg);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+
+ struct qpnp_flash_led *led = file->private_data;
+ char *kbuf;
+
+ mutex_lock(&led->log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (!ret) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->strobe_debug = true;
+ else
+ led->strobe_debug = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&led->log->debugfs_lock);
+ return ret;
+}
+
+static ssize_t flash_led_dfs_dbg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+ struct qpnp_flash_led *led = file->private_data;
+ char *kbuf;
+
+ mutex_lock(&led->log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->dbg_feature_en = true;
+ else
+ led->dbg_feature_en = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&led->log->debugfs_lock);
+ return ret;
+}
+
+static const struct file_operations flash_led_dfs_latched_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_latched_reg_read,
+};
+
+static const struct file_operations flash_led_dfs_strobe_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_fault_reg_read,
+ .write = flash_led_dfs_fault_reg_enable,
+};
+
+static const struct file_operations flash_led_dfs_dbg_feature_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .write = flash_led_dfs_dbg_enable,
+};
+
+static int
+qpnp_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(led->regmap, addr, mask, val);
+ if (rc)
+ dev_err(&led->pdev->dev,
+ "Unable to update_bits to addr=%x, rc(%d)\n", addr, rc);
+
+ dev_dbg(&led->pdev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr);
+
+ return rc;
+}
+
+static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led,
+ int64_t die_temp_degc)
+{
+ int die_temp_curr_ma;
+
+ if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0])
+ die_temp_curr_ma = 0;
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3];
+ else
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4];
+
+ return die_temp_curr_ma;
+}
+
+static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc;
+
+ rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result);
+ if (rc) {
+ pr_err("failed to read the die temp\n");
+ return -EINVAL;
+ }
+
+ return die_temp_result.physical;
+}
+
+static int qpnp_get_pmic_revid(struct qpnp_flash_led *led)
+{
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(led->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ dev_err(&led->pdev->dev,
+ "qcom,pmic-revid property missing\n");
+ return -EINVAL;
+ }
+
+ led->revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(led->revid_data)) {
+ pr_err("Couldn't get revid data rc = %ld\n",
+ PTR_ERR(led->revid_data));
+ return PTR_ERR(led->revid_data);
+ }
+
+ return 0;
+}
+
+static int
+qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node,
+ struct qpnp_flash_led *led)
+{
+ union power_supply_propval prop;
+ int64_t chg_temp_milidegc, die_temp_degc;
+ int max_curr_avail_ma = 2000;
+ int allowed_die_temp_curr_ma = 2000;
+ int rc;
+
+ if (led->pdata->power_detect_en) {
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to query power supply\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When charging is enabled, enforce this new enablement
+ * sequence to reduce fuel gauge reading resolution.
+ */
+ if (led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ return -EINVAL;
+ }
+
+ usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN,
+ FLASH_LED_CURRENT_READING_DELAY_MAX);
+ }
+
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop);
+ if (!prop.intval) {
+ dev_err(&led->pdev->dev,
+ "battery too low for flash\n");
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA);
+ }
+
+ /*
+ * When thermal mitigation is available, this logic will execute to
+ * derate current based upon the PMIC die temperature.
+ */
+ if (led->pdata->die_current_derate_en) {
+ chg_temp_milidegc = qpnp_flash_led_get_die_temp(led);
+ if (chg_temp_milidegc < 0)
+ return -EINVAL;
+
+ die_temp_degc = div_s64(chg_temp_milidegc, 1000);
+ allowed_die_temp_curr_ma =
+ qpnp_flash_led_get_allowed_die_temp_curr(led,
+ die_temp_degc);
+ if (allowed_die_temp_curr_ma < 0)
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma)
+ ? allowed_die_temp_curr_ma : max_curr_avail_ma;
+
+ return max_curr_avail_ma;
+}
+
+static ssize_t qpnp_flash_led_die_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable die_temp feature; non-zero to enable feature*/
+ if (val == 0)
+ led->pdata->die_current_derate_en = false;
+ else
+ led->pdata->die_current_derate_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct flash_node_data *flash_node;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW;
+ else
+ flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int rc, i, count = 0;
+ u16 addr;
+ uint val;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+ for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) {
+ addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i];
+ rc = regmap_read(led->regmap, addr, &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ addr, rc);
+ return -EINVAL;
+ }
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "REG_0x%x = 0x%02x\n", addr, val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_current_derate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable derate feature; non-zero to enable derate feature */
+ if (val == 0)
+ led->pdata->power_detect_en = false;
+ else
+ led->pdata->power_detect_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int max_curr_avail_ma = 0;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (led->flash_node[0].flash_on)
+ max_curr_avail_ma += led->flash_node[0].max_current;
+ if (led->flash_node[1].flash_on)
+ max_curr_avail_ma += led->flash_node[1].max_current;
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
+}
+
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(strobe, 0664, NULL, qpnp_led_strobe_type_store),
+ __ATTR(reg_dump, 0664, qpnp_flash_led_dump_regs_show, NULL),
+ __ATTR(enable_current_derate, 0664, NULL,
+ qpnp_flash_led_current_derate_store),
+ __ATTR(max_allowed_current, 0664, qpnp_flash_led_max_current_show,
+ NULL),
+ __ATTR(enable_die_temp_current_derate, 0664, NULL,
+ qpnp_flash_led_die_temp_store),
+};
+
+static int qpnp_flash_led_get_thermal_derate_rate(const char *rate)
+{
+ /*
+ * return 5% derate as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(rate, "1_PERCENT") == 0)
+ return RATE_1_PERCENT;
+ else if (strcmp(rate, "1P25_PERCENT") == 0)
+ return RATE_1P25_PERCENT;
+ else if (strcmp(rate, "2_PERCENT") == 0)
+ return RATE_2_PERCENT;
+ else if (strcmp(rate, "2P5_PERCENT") == 0)
+ return RATE_2P5_PERCENT;
+ else if (strcmp(rate, "5_PERCENT") == 0)
+ return RATE_5_PERCENT;
+ else
+ return RATE_5_PERCENT;
+}
+
+static int qpnp_flash_led_get_ramp_step(const char *step)
+{
+ /*
+ * return 27 us as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(step, "0P2_US") == 0)
+ return RAMP_STEP_0P2_US;
+ else if (strcmp(step, "0P4_US") == 0)
+ return RAMP_STEP_0P4_US;
+ else if (strcmp(step, "0P8_US") == 0)
+ return RAMP_STEP_0P8_US;
+ else if (strcmp(step, "1P6_US") == 0)
+ return RAMP_STEP_1P6_US;
+ else if (strcmp(step, "3P3_US") == 0)
+ return RAMP_STEP_3P3_US;
+ else if (strcmp(step, "6P7_US") == 0)
+ return RAMP_STEP_6P7_US;
+ else if (strcmp(step, "13P5_US") == 0)
+ return RAMP_STEP_13P5_US;
+ else
+ return RAMP_STEP_27US;
+}
+
+static u8 qpnp_flash_led_get_droop_debounce_time(u8 val)
+{
+ /*
+ * return 10 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 0:
+ return 0;
+ case 10:
+ return 1;
+ case 32:
+ return 2;
+ case 64:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static u8 qpnp_flash_led_get_startup_dly(u8 val)
+{
+ /*
+ * return 128 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 10:
+ return 0;
+ case 32:
+ return 1;
+ case 64:
+ return 2;
+ case 128:
+ return 3;
+ default:
+ return 3;
+ }
+}
+
+static int
+qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led)
+{
+ int rc;
+ uint val;
+
+ rc = regmap_read(led->regmap,
+ FLASH_LED_PERIPHERAL_SUBTYPE(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read peripheral subtype\n");
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ union power_supply_propval psy_prop;
+ int rc;
+ uint val, tmp;
+
+ rc = regmap_read(led->regmap, FLASH_LED_STROBE_CTRL(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Unable to read strobe reg\n");
+ return -EINVAL;
+ }
+
+ tmp = (~flash_node->trigger) & val;
+ if (!tmp) {
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (led->battery_psy &&
+ led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to enble charger i/p current limit\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return -EINVAL;
+ }
+
+ if (led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_suspend);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to disable GPIO\n");
+ return -EINVAL;
+ }
+ led->gpio_enabled = false;
+ }
+
+ if (led->battery_psy) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED0_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED1_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH)
+ flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW;
+
+ return 0;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int flash_regulator_parse_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node) {
+
+ int i = 0, rc;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ const char *temp_string;
+ u32 val;
+
+ flash_node->reg_data = devm_kzalloc(&led->pdev->dev,
+ sizeof(struct flash_regulator_data *) *
+ flash_node->num_regulators,
+ GFP_KERNEL);
+ if (!flash_node->reg_data) {
+ dev_err(&led->pdev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "regulator-name",
+ &temp_string);
+ if (!rc)
+ flash_node->reg_data[i].reg_name = temp_string;
+ else {
+ dev_err(&led->pdev->dev,
+ "Unable to read regulator name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "max-voltage", &val);
+ if (!rc) {
+ flash_node->reg_data[i].max_volt_uv = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read max voltage\n");
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int flash_regulator_setup(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_setup;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ flash_node->reg_data[i].regs =
+ regulator_get(flash_node->cdev.dev,
+ flash_node->reg_data[i].reg_name);
+ if (IS_ERR(flash_node->reg_data[i].regs)) {
+ rc = PTR_ERR(flash_node->reg_data[i].regs);
+ dev_err(&led->pdev->dev,
+ "Failed to get regulator\n");
+ goto error_regulator_setup;
+ }
+
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ rc = regulator_set_voltage(flash_node->reg_data[i].regs,
+ flash_node->reg_data[i].max_volt_uv,
+ flash_node->reg_data[i].max_volt_uv);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator set voltage failed\n");
+ regulator_put(flash_node->reg_data[i].regs);
+ goto error_regulator_setup;
+ }
+ }
+ }
+
+ return rc;
+
+error_regulator_setup:
+ while (i--) {
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ regulator_set_voltage(flash_node->reg_data[i].regs,
+ 0, flash_node->reg_data[i].max_volt_uv);
+ }
+
+ regulator_put(flash_node->reg_data[i].regs);
+ }
+
+ return rc;
+}
+
+static int flash_regulator_enable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_enable;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ rc = regulator_enable(flash_node->reg_data[i].regs);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator enable failed\n");
+ goto error_regulator_enable;
+ }
+ }
+
+ return rc;
+
+error_regulator_enable:
+ while (i--)
+ regulator_disable(flash_node->reg_data[i].regs);
+
+ return rc;
+}
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+ int *max_current)
+{
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ int rc;
+
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+ dev_err(&led->pdev->dev, "Invalid options %d\n", options);
+ return -EINVAL;
+ }
+
+ if (options & ENABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "enable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & DISABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, false);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "disable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & QUERY_MAX_CURRENT) {
+ rc = qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "query max current failed, rc=%d\n", rc);
+ return rc;
+ }
+ *max_current = rc;
+ }
+
+ return 0;
+}
+
+static void qpnp_flash_led_work(struct work_struct *work)
+{
+ struct flash_node_data *flash_node = container_of(work,
+ struct flash_node_data, work);
+ struct qpnp_flash_led *led = dev_get_drvdata(&flash_node->pdev->dev);
+ union power_supply_propval psy_prop;
+ int rc, brightness = flash_node->cdev.brightness;
+ int max_curr_avail_ma = 0;
+ int total_curr_ma = 0;
+ int i;
+ u8 val;
+ uint temp;
+
+ mutex_lock(&led->flash_led_lock);
+
+ if (!brightness)
+ goto turn_off;
+
+ if (led->open_fault) {
+ dev_err(&led->pdev->dev, "Open fault detected\n");
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+
+ if (!flash_node->flash_on && flash_node->num_regulators > 0) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc) {
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+ }
+
+ if (!led->gpio_enabled && led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_active);
+ if (rc) {
+ dev_err(&led->pdev->dev, "failed to enable GPIO\n");
+ goto error_enable_gpio;
+ }
+ led->gpio_enabled = true;
+ }
+
+ if (led->dbg_feature_en) {
+ rc = qpnp_led_masked_write(led,
+ INT_SET_TYPE(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_SET_TYPE write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ IN_POLARITY_HIGH(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "IN_POLARITY_HIGH write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_EN_SET(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev, "INT_EN_SET write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_LATCHED_CLR(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_LATCHED_CLR write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH) {
+ led->flash_node[led->num_leds - 1].trigger |=
+ (0x80 >> flash_node->id);
+ if (flash_node->id == FLASH_LED_0)
+ led->flash_node[led->num_leds - 1].prgm_current =
+ flash_node->prgm_current;
+ else if (flash_node->id == FLASH_LED_1)
+ led->flash_node[led->num_leds - 1].prgm_current2 =
+ flash_node->prgm_current;
+ }
+
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL /
+ flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->type == FLASH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+
+ psy_prop.intval = true;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ if (led->battery_psy) {
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &psy_prop);
+ if (psy_prop.intval < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid battery status\n");
+ goto exit_flash_led_work;
+ }
+
+ if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_CHARGING)
+ led->charging_enabled = true;
+ else if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_DISCHARGING
+ || psy_prop.intval ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING)
+ led->charging_enabled = false;
+ }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current
+ (flash_node, led);
+ if (max_curr_avail_ma < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to get max avail curr\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ total_curr_ma += flash_node->prgm_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ total_curr_ma += flash_node->prgm_current2;
+
+ if (max_curr_avail_ma < total_curr_ma) {
+ flash_node->prgm_current =
+ (flash_node->prgm_current *
+ max_curr_avail_ma) / total_curr_ma;
+ flash_node->prgm_current2 =
+ (flash_node->prgm_current2 *
+ max_curr_avail_ma) / total_curr_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ if (max_curr_avail_ma < flash_node->prgm_current) {
+ dev_err(&led->pdev->dev,
+ "battery only supprots %d mA\n",
+ max_curr_avail_ma);
+ flash_node->prgm_current =
+ (u16)max_curr_avail_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL
+ / flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER)
+ / FLASH_DURATION_DIVIDER);
+ rc = qpnp_led_masked_write(led,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Safety timer reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (!led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_UP_DELAY_US_MIN,
+ FLASH_RAMP_UP_DELAY_US_MAX);
+ }
+
+ if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to disable charger i/p curr limit\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->strobe_debug && led->dbg_feature_en) {
+ udelay(2000);
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base),
+ &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr= %x, rc(%d)\n",
+ FLASH_LED_FAULT_STATUS(led->base), rc);
+ goto exit_flash_led_work;
+ }
+ led->fault_reg = temp;
+ }
+ } else {
+ pr_err("Both Torch and Flash cannot be select at same time\n");
+ for (i = 0; i < led->num_leds; i++)
+ led->flash_node[i].flash_on = false;
+ goto turn_off;
+ }
+
+ flash_node->flash_on = true;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+
+turn_off:
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH)
+ led->flash_node[led->num_leds - 1].trigger &=
+ ~(0x80 >> flash_node->id);
+ if (flash_node->type == TORCH) {
+ /*
+ * Checking LED fault status detects hardware open fault.
+ * If fault occurs, all subsequent LED enablement requests
+ * will be rejected to protect hardware.
+ */
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base), &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to read out fault status register\n");
+ goto exit_flash_led_work;
+ }
+
+ led->open_fault |= (val & FLASH_LED_OPEN_FAULT_DETECTED);
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger
+ | FLASH_LED_STROBE_TYPE_HW),
+ FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX);
+exit_flash_hdrm_sns:
+ if (led->pdata->hdrm_sns_ch0_en) {
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_1 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+exit_flash_led_work:
+ rc = qpnp_flash_led_module_disable(led, flash_node);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ goto exit_flash_led_work;
+ }
+error_enable_gpio:
+ if (flash_node->flash_on && flash_node->num_regulators > 0)
+ flash_regulator_enable(led, flash_node, false);
+
+ flash_node->flash_on = false;
+ mutex_unlock(&led->flash_led_lock);
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (value < LED_OFF) {
+ pr_err("Invalid brightness value\n");
+ return;
+ }
+
+ if (value > flash_node->cdev.max_brightness)
+ value = flash_node->cdev.max_brightness;
+
+ flash_node->cdev.brightness = value;
+ if (led->flash_node[led->num_leds - 1].id ==
+ FLASH_LED_SWITCH) {
+ if (flash_node->type == TORCH)
+ led->flash_node[led->num_leds - 1].type = TORCH;
+ else if (flash_node->type == FLASH)
+ led->flash_node[led->num_leds - 1].type = FLASH;
+
+ led->flash_node[led->num_leds - 1].max_current
+ = flash_node->max_current;
+
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_1) {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+
+ flash_node->prgm_current = value;
+ flash_node->flash_on = value ? true : false;
+ } else if (flash_node->id == FLASH_LED_SWITCH) {
+ if (!value) {
+ flash_node->prgm_current = 0;
+ flash_node->prgm_current2 = 0;
+ }
+ }
+ } else {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = value;
+ }
+
+ queue_work(led->ordered_workq, &flash_node->work);
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val, temp_val;
+ uint val_int;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "LED timer ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER -
+ FLASH_LED_HEADROOM_OFFSET);
+ rc = qpnp_led_masked_write(led,
+ FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Headroom reg write failed\n");
+ return rc;
+ }
+
+ val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly);
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_STARTUP_DELAY(led->base),
+ FLASH_STARTUP_DLY_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Startup delay reg write failed\n");
+ return rc;
+ }
+
+ val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL /
+ FLASH_LED_MAX_CURRENT_MA);
+ rc = qpnp_led_masked_write(led,
+ FLASH_CLAMP_CURRENT(led->base),
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Clamp current reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->pmic_charger_support)
+ val = FLASH_LED_FLASH_HW_VREG_OK;
+ else
+ val = FLASH_LED_FLASH_SW_VREG_OK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_OK_FORCE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VREG OK force reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->self_check_en)
+ val = FLASH_MODULE_ENABLE;
+ else
+ val = FLASH_LED_DISABLE;
+ rc = qpnp_led_masked_write(led,
+ FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Fault detect reg write failed\n");
+ return rc;
+ }
+
+ val = 0x0;
+ val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT;
+ val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE;
+ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_MODULE_CONTRL_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Mask module enable failed\n");
+ return rc;
+ }
+
+ rc = regmap_read(led->regmap, FLASH_PERPH_RESET_CTRL(led->base),
+ &val_int);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ FLASH_PERPH_RESET_CTRL(led->base), rc);
+ return -EINVAL;
+ }
+ val = (u8)val_int;
+
+ if (led->pdata->follow_rb_disable) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val |= FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ }
+
+ if (!led->pdata->thermal_derate_en)
+ val = 0x0;
+ else {
+ val = led->pdata->thermal_derate_en << 7;
+ val |= led->pdata->thermal_derate_rate << 3;
+ val |= (led->pdata->thermal_derate_threshold -
+ FLASH_LED_THERMAL_THRESHOLD_MIN) /
+ FLASH_LED_THERMAL_DEVIDER;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_THERMAL_DRATE(led->base),
+ FLASH_THERMAL_DERATE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Thermal derate reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->current_ramp_en)
+ val = 0x0;
+ else {
+ val = led->pdata->current_ramp_en << 7;
+ val |= led->pdata->ramp_up_step << 3;
+ val |= led->pdata->ramp_dn_step;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Current ramp reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->vph_pwr_droop_en)
+ val = 0x0;
+ else {
+ val = led->pdata->vph_pwr_droop_en << 7;
+ val |= ((led->pdata->vph_pwr_droop_threshold -
+ FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) /
+ FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4;
+ temp_val =
+ qpnp_flash_led_get_droop_debounce_time(
+ led->pdata->vph_pwr_droop_debounce_time);
+ if (temp_val == 0xFF) {
+ dev_err(&led->pdev->dev, "Invalid debounce time\n");
+ return temp_val;
+ }
+
+ val |= temp_val;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_VPH_PWR_DROOP(led->base),
+ FLASH_VPH_PWR_DROOP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VPH PWR droop reg write failed\n");
+ return rc;
+ }
+
+ led->battery_psy = power_supply_get_by_name("battery");
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to get battery power supply\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ const char *temp_string;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ int rc = 0, num_regs = 0;
+ u32 val;
+
+ rc = of_property_read_string(node, "label", &temp_string);
+ if (!rc) {
+ if (strcmp(temp_string, "flash") == 0)
+ flash_node->type = FLASH;
+ else if (strcmp(temp_string, "torch") == 0)
+ flash_node->type = TORCH;
+ else if (strcmp(temp_string, "switch") == 0)
+ flash_node->type = SWITCH;
+ else {
+ dev_err(&led->pdev->dev, "Wrong flash LED type\n");
+ return -EINVAL;
+ }
+ } else if (rc < 0) {
+ dev_err(&led->pdev->dev, "Unable to read flash type\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read current\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,id", &val);
+ if (!rc)
+ flash_node->id = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led ID\n");
+ return rc;
+ }
+
+ if (flash_node->type == SWITCH || flash_node->type == FLASH) {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ flash_node->duration = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read duration\n");
+ return rc;
+ }
+ }
+
+ switch (led->peripheral_type) {
+ case FLASH_SUBTYPE_SINGLE:
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ break;
+ case FLASH_SUBTYPE_DUAL:
+ if (flash_node->id == FLASH_LED_0)
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ else if (flash_node->id == FLASH_LED_1)
+ flash_node->trigger = FLASH_LED1_TRIGGER;
+ break;
+ default:
+ dev_err(&led->pdev->dev, "Invalid peripheral type\n");
+ }
+
+ while ((temp = of_get_next_child(node, temp))) {
+ if (of_find_property(temp, "regulator-name", NULL))
+ num_regs++;
+ }
+
+ if (num_regs)
+ flash_node->num_regulators = num_regs;
+
+ return rc;
+}
+
+static int qpnp_flash_led_parse_common_dt(
+ struct qpnp_flash_led *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val, temp_val;
+ const char *temp;
+
+ led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV;
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->pdata->headroom = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read headroom\n");
+ return rc;
+ }
+
+ led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US;
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->pdata->startup_dly = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read startup delay\n");
+ return rc;
+ }
+
+ led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA;
+ rc = of_property_read_u32(node, "qcom,clamp-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->pdata->clamp_current = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read clamp current\n");
+ return rc;
+ }
+
+ led->pdata->pmic_charger_support =
+ of_property_read_bool(node,
+ "qcom,pmic-charger-support");
+
+ led->pdata->self_check_en =
+ of_property_read_bool(node, "qcom,self-check-enabled");
+
+ led->pdata->thermal_derate_en =
+ of_property_read_bool(node,
+ "qcom,thermal-derate-enabled");
+
+ if (led->pdata->thermal_derate_en) {
+ led->pdata->thermal_derate_rate =
+ FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT;
+ rc = of_property_read_string(node, "qcom,thermal-derate-rate",
+ &temp);
+ if (!rc) {
+ temp_val =
+ qpnp_flash_led_get_thermal_derate_rate(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_rate = (u8)temp_val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_threshold =
+ FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C;
+ rc = of_property_read_u32(node, "qcom,thermal-derate-threshold",
+ &val);
+ if (!rc)
+ led->pdata->thermal_derate_threshold = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate threshold\n");
+ return rc;
+ }
+ }
+
+ led->pdata->current_ramp_en =
+ of_property_read_bool(node,
+ "qcom,current-ramp-enabled");
+ if (led->pdata->current_ramp_en) {
+ led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_up_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp up step values\n");
+ return -EINVAL;
+ }
+ led->pdata->ramp_up_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp up steps\n");
+ return rc;
+ }
+
+ led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp down step values\n");
+ return rc;
+ }
+ led->pdata->ramp_dn_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp down steps\n");
+ return rc;
+ }
+ }
+
+ led->pdata->vph_pwr_droop_en = of_property_read_bool(node,
+ "qcom,vph-pwr-droop-enabled");
+ if (led->pdata->vph_pwr_droop_en) {
+ led->pdata->vph_pwr_droop_threshold =
+ FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-threshold", &val);
+ if (!rc) {
+ led->pdata->vph_pwr_droop_threshold = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop threshold\n");
+ return rc;
+ }
+
+ led->pdata->vph_pwr_droop_debounce_time =
+ FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-debounce-time", &val);
+ if (!rc)
+ led->pdata->vph_pwr_droop_debounce_time = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop debounce time\n");
+ return rc;
+ }
+ }
+
+ led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch0-enabled");
+
+ led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch1-enabled");
+
+ led->pdata->power_detect_en = of_property_read_bool(node,
+ "qcom,power-detect-enabled");
+
+ led->pdata->mask3_en = of_property_read_bool(node,
+ "qcom,otst2-module-enabled");
+
+ led->pdata->follow_rb_disable = of_property_read_bool(node,
+ "qcom,follow-otst2-rb-disabled");
+
+ led->pdata->die_current_derate_en = of_property_read_bool(node,
+ "qcom,die-current-derate-enabled");
+
+ if (led->pdata->die_current_derate_en) {
+ led->vadc_dev = qpnp_get_vadc(&led->pdev->dev, "die-temp");
+ if (IS_ERR(led->vadc_dev)) {
+ pr_err("VADC channel property Missing\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(node, "qcom,die-temp-threshold",
+ &led->pdata->temp_threshold_num)) {
+ if (led->pdata->temp_threshold_num > 0) {
+ led->pdata->die_temp_threshold_degc =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_threshold_num,
+ GFP_KERNEL);
+
+ if (led->pdata->die_temp_threshold_degc
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die temp array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_threshold_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-threshold",
+ led->pdata->die_temp_threshold_degc,
+ led->pdata->temp_threshold_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ if (of_find_property(node, "qcom,die-temp-derate-current",
+ &led->pdata->temp_derate_curr_num)) {
+ if (led->pdata->temp_derate_curr_num > 0) {
+ led->pdata->die_temp_derate_curr_ma =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_derate_curr_num,
+ GFP_KERNEL);
+ if (led->pdata->die_temp_derate_curr_ma
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die derate current array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_derate_curr_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-derate-current",
+ led->pdata->die_temp_derate_curr_ma,
+ led->pdata->temp_derate_curr_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp limits rc =%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+ if (led->pdata->temp_threshold_num !=
+ led->pdata->temp_derate_curr_num) {
+ pr_err("Both array size are not same\n");
+ return -EINVAL;
+ }
+ }
+
+ led->pinctrl = devm_pinctrl_get(&led->pdev->dev);
+ if (IS_ERR_OR_NULL(led->pinctrl)) {
+ dev_err(&led->pdev->dev, "Unable to acquire pinctrl\n");
+ led->pinctrl = NULL;
+ return 0;
+ }
+
+ led->gpio_state_active = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_enable");
+ if (IS_ERR_OR_NULL(led->gpio_state_active)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED active state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_active);
+ }
+
+ led->gpio_state_suspend = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_disable");
+ if (IS_ERR_OR_NULL(led->gpio_state_suspend)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED disable state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_suspend);
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led;
+ unsigned int base;
+ struct device_node *node, *temp;
+ struct dentry *root, *file;
+ int rc, i = 0, j, num_leds = 0;
+ u32 val;
+
+ root = NULL;
+ node = pdev->dev.of_node;
+ if (node == NULL) {
+ dev_info(&pdev->dev, "No flash device defined\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!led->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ led->base = base;
+ led->pdev = pdev;
+ led->current_addr = FLASH_LED0_CURRENT(led->base);
+ led->current2_addr = FLASH_LED1_CURRENT(led->base);
+
+ led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
+ if (!led->pdata)
+ return -ENOMEM;
+
+ led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led);
+ if (led->peripheral_type < 0) {
+ dev_err(&pdev->dev, "Failed to get peripheral type\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_parse_common_dt(led, node);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to get common config for flash LEDs\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_init_settings(led);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to initialize flash LED\n");
+ return rc;
+ }
+
+ rc = qpnp_get_pmic_revid(led);
+ if (rc)
+ return rc;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led->flash_node = devm_kzalloc(&pdev->dev,
+ (sizeof(struct flash_node_data) * num_leds),
+ GFP_KERNEL);
+ if (!led->flash_node) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&led->flash_led_lock);
+
+ led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0);
+ if (!led->ordered_workq) {
+ dev_err(&pdev->dev, "Failed to allocate ordered workqueue\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led->flash_node[i].cdev.brightness_set =
+ qpnp_flash_led_brightness_set;
+ led->flash_node[i].cdev.brightness_get =
+ qpnp_flash_led_brightness_get;
+ led->flash_node[i].pdev = pdev;
+
+ INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work);
+ rc = of_property_read_string(temp, "qcom,led-name",
+ &led->flash_node[i].cdev.name);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read flash name\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(temp, "qcom,default-led-trigger",
+ &led->flash_node[i].cdev.default_trigger);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read trigger name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->flash_node[i].max_current = (u16)val;
+ led->flash_node[i].cdev.max_brightness = val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read max current\n");
+ return rc;
+ }
+ rc = led_classdev_register(&pdev->dev,
+ &led->flash_node[i].cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to register led\n");
+ goto error_led_register;
+ }
+
+ led->flash_node[i].cdev.dev->of_node = temp;
+
+ rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to parse config for each LED\n");
+ goto error_led_register;
+ }
+
+ if (led->flash_node[i].num_regulators) {
+ rc = flash_regulator_parse_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to parse regulator data\n");
+ goto error_led_register;
+ }
+
+ rc = flash_regulator_setup(led, &led->flash_node[i],
+ true);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to set up regulator\n");
+ goto error_led_register;
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc =
+ sysfs_create_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc)
+ goto error_led_register;
+ }
+
+ i++;
+ }
+
+ led->num_leds = i;
+
+ root = debugfs_create_dir("flashLED", NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in kernel");
+ goto error_led_debugfs;
+ }
+
+ led->dbgfs_root = root;
+ file = debugfs_create_file("enable_debug", 0600, root, led,
+ &flash_led_dfs_dbg_feature_fops);
+ if (!file) {
+ pr_err("error creating 'enable_debug' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("latched", 0600, root, led,
+ &flash_led_dfs_latched_reg_fops);
+ if (!file) {
+ pr_err("error creating 'latched' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("strobe", 0600, root, led,
+ &flash_led_dfs_strobe_reg_fops);
+ if (!file) {
+ pr_err("error creating 'strobe' entry\n");
+ goto error_led_debugfs;
+ }
+
+ dev_set_drvdata(&pdev->dev, led);
+
+ return 0;
+
+error_led_debugfs:
+ i = led->num_leds - 1;
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+error_led_register:
+ for (; i >= 0; i--) {
+ for (; j >= 0; j--)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return rc;
+}
+
+static int qpnp_flash_led_remove(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
+ int i, j;
+
+ for (i = led->num_leds - 1; i >= 0; i--) {
+ if (led->flash_node[i].reg_data) {
+ if (led->flash_node[i].flash_on)
+ flash_regulator_enable(led,
+ &led->flash_node[i], false);
+ flash_regulator_setup(led, &led->flash_node[i],
+ false);
+ }
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(led->dbgfs_root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-flash-led",},
+ { },
+};
+
+static struct platform_driver qpnp_flash_led_driver = {
+ .driver = {
+ .name = "qcom,qpnp-flash-led",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_flash_led_probe,
+ .remove = qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+ return platform_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+ platform_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash");
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c3ea03c9a1a8..02619cabda8b 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -333,6 +333,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2410df1c2a05..0ee41fd9d850 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
- wake_up_gc(op->c);
-
if (op->bypass)
return bch_data_invalidate(cl);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
@@ -1014,7 +1014,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;
- if (bdi_congested(&q->backing_dev_info, bits))
+ if (bdi_congested(q->backing_dev_info, bits))
return 1;
if (cached_dev_get(dc)) {
@@ -1023,7 +1023,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
cached_dev_put(dc);
@@ -1037,7 +1037,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1130,7 +1130,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
@@ -1141,7 +1141,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 7b5880b8874c..13acf48c5210 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -802,7 +802,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
- q->backing_dev_info.congested_data = d;
+ q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(dc);
+ bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
@@ -1127,9 +1129,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
- dc->disk.disk->queue->backing_dev_info.ra_pages =
- max(dc->disk.disk->queue->backing_dev_info.ra_pages,
- q->backing_dev_info.ra_pages);
+ dc->disk.disk->queue->backing_dev_info->ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+ q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
@@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err;
bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (!IS_ERR(bdev))
+ bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index b3ff57d61dde..4fbb5532f24c 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@ STORE(__cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
+ ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
@@ -226,7 +226,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+ v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index db3ae4c2b223..6c18e3ec3e48 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
+/**
+ * bch_hprint() - formats @v to human readable string for sysfs.
+ *
+ * @v - signed 64 bit integer
+ * @buf - the (at least 8 byte) buffer to format the result into.
+ *
+ * Returns the number of bytes used by format.
+ */
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
- char dec[4] = "";
- int u, t = 0;
-
- for (u = 0; v >= 1024 || v <= -1024; u++) {
- t = v & ~(~0 << 10);
- v >>= 10;
- }
-
- if (!u)
- return sprintf(buf, "%llu", v);
-
- if (v < 100 && v > -100)
- snprintf(dec, sizeof(dec), ".%i", t / 100);
-
- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+ int u = 0, t;
+
+ uint64_t q;
+
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
+
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
+
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..bbb1dc9e1639 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -21,7 +21,8 @@
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
@@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty_finish, system_wq);
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
@@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty, system_wq);
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
@@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
- op.inode = dc->disk.id;
+ op.inode = d->id;
- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
+ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..daec4fd782ea 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
+static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+{
+ uint64_t i, ret = 0;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+
+ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ ret += bcache_dev_sectors_dirty(d);
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ return ret;
+}
+
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e919787a..7a50728b9389 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0da5efaad85c..54e50fc908e9 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2288,7 +2288,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 32e76c5ee741..11c52567304f 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b3d78bba3a79..9411deaaddf9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1660,7 +1660,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE];
if (likely(q))
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a1cc797fe88f..5f1a943d9e81 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2634,7 +2634,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1;
q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1a7b11d57256..47ac131099d9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2220,7 +2220,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
+ r = md->queue->backing_dev_info->wb.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
@@ -2302,7 +2302,7 @@ static void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
+ md->queue->backing_dev_info->congested_data = md;
}
static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2313,7 +2313,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6ba3227e29b2..79223dceb1c2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -68,7 +68,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
rcu_read_unlock();
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0a856cb181e9..1cd819202553 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5284,8 +5284,8 @@ int md_run(struct mddev *mddev)
return err;
}
if (mddev->queue) {
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = md_congested;
+ mddev->queue->backing_dev_info->congested_data = mddev;
+ mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5642,7 +5642,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index dd483bb2e111..fb03ed86d57a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -166,7 +166,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8e5db0cb5aa..7a67e7dcf546 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -35,7 +35,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
}
@@ -415,8 +415,8 @@ static int raid0_run(struct mddev *mddev)
*/
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2* stripe;
}
dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f24a9e14021d..a3ec3c5a8ee9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -730,9 +730,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed
*/
if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ ret &= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e5ee4e9e0ea5..186d753b7fdb 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -838,7 +838,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -3698,8 +3698,8 @@ static int run(struct mddev *mddev)
* maybe...
*/
stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
if (md_integrity_register(mddev))
@@ -4493,8 +4493,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8f60520c8392..165da5b94999 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5822,6 +5822,8 @@ static void raid5_do_work(struct work_struct *work)
spin_unlock_irq(&conf->device_lock);
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -6121,10 +6123,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info.capabilities |=
+ mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- mddev->queue->backing_dev_info.capabilities &=
+ mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
}
@@ -6968,8 +6970,8 @@ static int run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -7550,8 +7552,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
}
}
diff --git a/drivers/media/platform/msm/ais/msm.c b/drivers/media/platform/msm/ais/msm.c
index ccfdfba2ceee..902e05b3329b 100644
--- a/drivers/media/platform/msm/ais/msm.c
+++ b/drivers/media/platform/msm/ais/msm.c
@@ -292,6 +292,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
return;
while (1) {
+ unsigned long wl_flags;
if (try_count > 5) {
pr_err("%s : not able to delete stream %d\n",
@@ -299,18 +300,20 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
break;
}
- write_lock(&session->stream_rwlock);
+ write_lock_irqsave(&session->stream_rwlock, wl_flags);
try_count++;
stream = msm_queue_find(&session->stream_q, struct msm_stream,
list, __msm_queue_find_stream, &stream_id);
if (!stream) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
return;
}
if (msm_vb2_get_stream_state(stream) != 1) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
continue;
}
@@ -320,7 +323,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
kfree(stream);
stream = NULL;
spin_unlock_irqrestore(&(session->stream_q.lock), flags);
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock, wl_flags);
break;
}
diff --git a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
index 36aa3f62fbec..1cbc49c8485c 100644
--- a/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/ais/msm_vb2/msm_vb2.c
@@ -47,22 +47,23 @@ static int msm_vb2_buf_init(struct vb2_buffer *vb)
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned long rl_flags;
session = msm_get_session_from_vb2q(vb->vb2_queue);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s: Couldn't find stream\n", __func__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
msm_vb2_buf->in_freeq = 0;
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
@@ -71,7 +72,7 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
@@ -84,19 +85,19 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
spin_lock_irqsave(&stream->stream_lock, flags);
list_add_tail(&msm_vb2->list, &stream->queued_list);
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
static void msm_vb2_buf_finish(struct vb2_buffer *vb)
@@ -104,7 +105,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2_entry, *temp;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
@@ -118,12 +119,12 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -136,7 +137,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
}
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
static void msm_vb2_stop_stream(struct vb2_queue *q)
@@ -144,19 +145,19 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
struct msm_vb2_buffer *msm_vb2, *temp;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vb2_v4l2_buf;
session = msm_get_session_from_vb2q(q);
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(q);
if (!stream) {
pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -176,7 +177,7 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
int msm_vb2_get_stream_state(struct msm_stream *stream)
@@ -255,17 +256,17 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -291,7 +292,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -302,18 +303,18 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -337,7 +338,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -349,17 +350,17 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
struct msm_vb2_buffer *msm_vb2;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
int rc = 0;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -374,6 +375,8 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n",
vb, session_id, stream_id);
spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 =
@@ -390,7 +393,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -398,7 +401,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
unsigned int stream_id, uint32_t sequence,
struct timeval *ts, uint32_t reserved)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
@@ -409,11 +412,11 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -428,6 +431,8 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
session_id, stream_id, vb);
spin_unlock_irqrestore(&stream->stream_lock, flags);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 = container_of(vb2_v4l2_buf,
@@ -448,7 +453,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -459,18 +464,18 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
long rc = -EINVAL;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return rc;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -499,14 +504,14 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
EXPORT_SYMBOL(msm_vb2_return_buf_by_idx);
static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
@@ -516,11 +521,11 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -532,7 +537,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
index f6d7f5fb8d32..8a49c7cf9f4a 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
@@ -424,7 +424,7 @@ int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
curr_vreg = &cam_vreg[j];
reg_ptr[j] = regulator_get(dev,
curr_vreg->reg_name);
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s get failed\n",
__func__,
curr_vreg->reg_name);
@@ -531,7 +531,7 @@ int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
continue;
} else
j = i;
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s null regulator\n",
__func__, cam_vreg[j].reg_name);
goto disable_vreg;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index a29a9b4334c3..b5b160bc48c8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -35,12 +35,13 @@
#define VFE40_STATS_BURST_LEN_8916_VERSION 2
#define VFE40_FETCH_BURST_LEN 3
#define VFE40_UB_SIZE 1536 /* 1536 * 128 bits = 24KB */
+#define VFE40_STATS_SIZE 392
#define VFE40_UB_SIZE_8952 2048 /* 2048 * 128 bits = 32KB */
#define VFE40_UB_SIZE_8916 3072 /* 3072 * 128 bits = 48KB */
#define VFE40_EQUAL_SLICE_UB 190 /* (UB_SIZE - STATS SIZE)/6 */
#define VFE40_EQUAL_SLICE_UB_8916 236
#define VFE40_TOTAL_WM_UB 1144 /* UB_SIZE - STATS SIZE */
-#define VFE40_TOTAL_WM_UB_8916 1656
+#define VFE40_TOTAL_WM_UB_8916 2680
#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
@@ -104,7 +105,11 @@ static uint32_t msm_vfe40_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
static uint32_t msm_vfe40_get_ub_size(struct vfe_device *vfe_dev)
{
- if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION) {
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8939_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8953_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8917_VERSION) {
vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB_8916;
return VFE40_TOTAL_WM_UB_8916;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 63f5497e63b8..5bcb3034b82a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -674,6 +674,7 @@ void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ uint32_t framedrop_period = 0;
stream_info->runtime_num_burst_capture = stream_info->num_burst_capture;
/**
@@ -682,9 +683,15 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
* by the request frame api
*/
if (!stream_info->controllable_output) {
- stream_info->current_framedrop_period =
+ framedrop_period =
msm_isp_get_framedrop_period(
stream_info->frame_skip_pattern);
+ if (stream_info->frame_skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period =
+ framedrop_period;
}
msm_isp_cfg_framedrop_reg(stream_info);
@@ -2909,6 +2916,8 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
* those state transitions instead of directly forcing stream to
* be INACTIVE
*/
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
intf = SRC_TO_INTF(stream_info->stream_src);
if (stream_info->lpm_mode == 0 &&
stream_info->state != PAUSED) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
index 872dc59d218e..b04560fe42bc 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -5,4 +5,5 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/ ir_led/ ir_cut/
+obj-$(CONFIG_MSMB_CAMERA) += laser_led/
obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index 3cb6b55ccc8c..f2c765a4649f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -331,6 +331,9 @@ static int32_t msm_cci_addr_to_num_bytes(
case MSM_CAMERA_I2C_3B_ADDR:
retVal = 3;
break;
+ case MSM_CAMERA_I2C_DWORD_ADDR:
+ retVal = 4;
+ break;
default:
pr_err("%s: %d failed: %d\n", __func__, __LINE__, addr_type);
retVal = 1;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
index 6d9b0e987d0d..fc6ceb1b590f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -67,7 +67,8 @@ int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR
- && client->addr_type != MSM_CAMERA_I2C_3B_ADDR)
+ && client->addr_type != MSM_CAMERA_I2C_3B_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_DWORD_ADDR)
|| num_byte == 0)
return rc;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile b/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile
new file mode 100644
index 000000000000..e981fc2e1f9c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_laser_led.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c
new file mode 100644
index 000000000000..c368f081f97b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c
@@ -0,0 +1,573 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include "msm_laser_led.h"
+#include "msm_camera_dt_util.h"
+#include "msm_sd.h"
+#include "msm_cci.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_laser_led_mutex);
+
+static struct v4l2_file_operations msm_laser_led_v4l2_subdev_fops;
+
+static const struct of_device_id msm_laser_led_dt_match[] = {
+ {.compatible = "qcom,laser-led", .data = NULL},
+ {}
+};
+
+static long msm_laser_led_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg);
+
+static int32_t msm_laser_led_get_subdev_id(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl, void __user *arg)
+{
+ int32_t __user *subdev_id = (int32_t __user *)arg;
+
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("subdevice ID is not valid\n");
+ return -EINVAL;
+ }
+
+ if (laser_led_ctrl->laser_led_device_type !=
+ MSM_CAMERA_PLATFORM_DEVICE) {
+ pr_err("device type is not matching\n");
+ return -EINVAL;
+ }
+
+ if (copy_to_user(arg, &laser_led_ctrl->pdev->id,
+ sizeof(int32_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ CDBG("Exit: subdev_id %d\n", laser_led_ctrl->pdev->id);
+ return 0;
+}
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+#ifdef CONFIG_COMPAT
+static int32_t msm_laser_led_init(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ struct msm_laser_led_cfg_data_t32 __user *laser_led_data)
+#else
+static int32_t msm_laser_led_init(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ struct msm_laser_led_cfg_data_t __user *laser_led_data)
+#endif
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+
+ if (laser_led_ctrl->laser_led_state == MSM_CAMERA_LASER_LED_INIT) {
+ pr_err("Invalid laser_led state = %d\n",
+ laser_led_ctrl->laser_led_state);
+ return 0;
+ }
+
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &laser_led_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+
+ cci_client = laser_led_ctrl->i2c_client.cci_client;
+
+ if (copy_from_user(&(cci_client->sid),
+ &(laser_led_data->i2c_addr),
+ sizeof(uint16_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ cci_client->sid = cci_client->sid >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+
+ if (copy_from_user(&(cci_client->i2c_freq_mode),
+ &(laser_led_data->i2c_freq_mode),
+ sizeof(enum i2c_freq_mode_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_INIT;
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int msm_laser_led_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_laser_led_ctrl_t *l_ctrl = v4l2_get_subdevdata(sd);
+
+ CDBG("Enter\n");
+ if (!l_ctrl) {
+ pr_err("failed: subdev data is null\n");
+ return -EINVAL;
+ }
+ mutex_lock(l_ctrl->laser_led_mutex);
+ if (l_ctrl->laser_led_device_type == MSM_CAMERA_PLATFORM_DEVICE &&
+ l_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_RELEASE) {
+ rc = l_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &l_ctrl->i2c_client, MSM_CCI_RELEASE);
+ if (rc < 0)
+ pr_err("cci_init failed: %d\n", rc);
+ }
+ l_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE;
+ mutex_unlock(l_ctrl->laser_led_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_laser_led_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ CDBG("Enter\n");
+ switch (cmd) {
+ case VIDIOC_MSM_LASER_LED_CFG32:
+ cmd = VIDIOC_MSM_LASER_LED_CFG;
+ default:
+ rc = msm_laser_led_subdev_ioctl(sd, cmd, arg);
+ }
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static long msm_laser_led_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return msm_laser_led_subdev_do_ioctl(file, cmd, (void *)arg);
+}
+
+static int32_t msm_laser_led_control32(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ void __user *argp)
+{
+ struct msm_camera_i2c_reg_setting32 conf_array32;
+ struct msm_camera_i2c_reg_setting conf_array;
+ int32_t rc = 0;
+ struct msm_laser_led_cfg_data_t32 laser_led_data;
+ uint32_t *debug_reg;
+ int i;
+ uint16_t local_data;
+
+ if (laser_led_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_INIT) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, laser_led_ctrl->laser_led_state);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&laser_led_data,
+ argp,
+ sizeof(struct msm_laser_led_cfg_data_t32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&conf_array32,
+ (compat_ptr)(laser_led_data.setting),
+ sizeof(struct msm_camera_i2c_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ conf_array.addr_type = conf_array32.addr_type;
+ conf_array.data_type = conf_array32.data_type;
+ conf_array.delay = conf_array32.delay;
+ conf_array.size = conf_array32.size;
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ conf_array.reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!conf_array.reg_setting)
+ return -ENOMEM;
+
+ if (copy_from_user(conf_array.reg_setting,
+ (compat_ptr)(conf_array32.reg_setting),
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(conf_array.reg_setting);
+ return -EFAULT;
+ }
+
+ debug_reg = kzalloc(laser_led_data.debug_reg_size *
+ (sizeof(uint32_t)), GFP_KERNEL);
+ if (!debug_reg) {
+ kfree(conf_array.reg_setting);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(debug_reg,
+ (void __user *)compat_ptr(laser_led_data.debug_reg),
+ laser_led_data.debug_reg_size *
+ sizeof(uint32_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(conf_array.reg_setting);
+ kfree(debug_reg);
+ return -EFAULT;
+ }
+
+ laser_led_ctrl->i2c_client.addr_type = conf_array.addr_type;
+
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table(&(laser_led_ctrl->i2c_client),
+ &conf_array);
+
+ for (i = 0; i < laser_led_data.debug_reg_size; i++) {
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_read(
+ &(laser_led_ctrl->i2c_client),
+ debug_reg[i],
+ &local_data, conf_array.data_type);
+ }
+
+ kfree(conf_array.reg_setting);
+ kfree(debug_reg);
+
+ return rc;
+}
+#endif
+
+static int32_t msm_laser_led_control(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ void __user *argp)
+{
+ struct msm_camera_i2c_reg_setting conf_array;
+ struct msm_laser_led_cfg_data_t laser_led_data;
+
+ uint32_t *debug_reg;
+ int i;
+ uint16_t local_data;
+ int32_t rc = 0;
+
+ if (laser_led_ctrl->laser_led_state != MSM_CAMERA_LASER_LED_INIT) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, laser_led_ctrl->laser_led_state);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&laser_led_data,
+ argp,
+ sizeof(struct msm_laser_led_cfg_data_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(&conf_array,
+ (laser_led_data.setting),
+ sizeof(struct msm_camera_i2c_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ conf_array.reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!conf_array.reg_setting)
+ return -ENOMEM;
+
+ if (copy_from_user(conf_array.reg_setting, (void __user *)(
+ conf_array.reg_setting),
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(conf_array.reg_setting);
+ return -EFAULT;
+ }
+
+ debug_reg = kzalloc(laser_led_data.debug_reg_size *
+ (sizeof(uint32_t)), GFP_KERNEL);
+ if (!debug_reg) {
+ kfree(conf_array.reg_setting);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(debug_reg,
+ (laser_led_data.debug_reg),
+ laser_led_data.debug_reg_size *
+ sizeof(uint32_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(debug_reg);
+ kfree(conf_array.reg_setting);
+ return -EFAULT;
+ }
+
+ laser_led_ctrl->i2c_client.addr_type = conf_array.addr_type;
+
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table(&(laser_led_ctrl->i2c_client),
+ &conf_array);
+
+ for (i = 0; i < laser_led_data.debug_reg_size; i++) {
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_read(
+ &(laser_led_ctrl->i2c_client),
+ debug_reg[i],
+ &local_data, conf_array.data_type);
+ }
+
+ kfree(conf_array.reg_setting);
+ kfree(debug_reg);
+
+ return rc;
+}
+
+static int32_t msm_laser_led_config(struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ enum msm_laser_led_cfg_type_t cfg_type;
+
+#ifdef CONFIG_COMPAT
+ struct msm_laser_led_cfg_data_t32 __user *laser_led_data =
+ (struct msm_laser_led_cfg_data_t32 __user *) argp;
+#else
+ struct msm_laser_led_cfg_data_t __user *laser_led_data =
+ (struct msm_laser_led_cfg_data_t __user *) argp;
+#endif
+
+ mutex_lock(laser_led_ctrl->laser_led_mutex);
+
+ if (copy_from_user(&(cfg_type),
+ &(laser_led_data->cfg_type),
+ sizeof(enum msm_laser_led_cfg_type_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ mutex_unlock(laser_led_ctrl->laser_led_mutex);
+ return -EFAULT;
+ }
+
+ CDBG("type %d\n", cfg_type);
+
+ switch (cfg_type) {
+ case CFG_LASER_LED_INIT:
+ rc = msm_laser_led_init(laser_led_ctrl, laser_led_data);
+ break;
+ case CFG_LASER_LED_CONTROL:
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ rc = msm_laser_led_control32(laser_led_ctrl, argp);
+ else
+#endif
+ rc = msm_laser_led_control(laser_led_ctrl, argp);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(laser_led_ctrl->laser_led_mutex);
+
+ CDBG("Exit: type %d\n", cfg_type);
+
+ return rc;
+}
+
+static long msm_laser_led_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_laser_led_ctrl_t *lctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err(" v4l2 ir led subdevice is NULL\n");
+ return -EINVAL;
+ }
+ lctrl = v4l2_get_subdevdata(sd);
+ if (!lctrl) {
+ pr_err("lctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_laser_led_get_subdev_id(lctrl, argp);
+ case VIDIOC_MSM_LASER_LED_CFG:
+ return msm_laser_led_config(lctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!lctrl->i2c_client.i2c_func_tbl) {
+ pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n");
+ return -EINVAL;
+ }
+ return msm_laser_led_close(sd, NULL);
+
+ default:
+ pr_err("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_laser_led_subdev_core_ops = {
+ .ioctl = msm_laser_led_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_laser_led_subdev_ops = {
+ .core = &msm_laser_led_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_laser_led_internal_ops = {
+ .close = msm_laser_led_close,
+};
+
+static int32_t msm_laser_led_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_laser_led_ctrl_t *laser_led_ctrl = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+ if (!pdev->dev.of_node) {
+ pr_err("IR LED device node is not present in device tree\n");
+ return -EINVAL;
+ }
+
+ laser_led_ctrl = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_laser_led_ctrl_t), GFP_KERNEL);
+ if (!laser_led_ctrl)
+ return -ENOMEM;
+
+ laser_led_ctrl->pdev = pdev;
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ kfree(laser_led_ctrl);
+ pr_err("reading cell index failed: rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master",
+ &laser_led_ctrl->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", laser_led_ctrl->cci_master, rc);
+ if (rc < 0 || laser_led_ctrl->cci_master >= MASTER_MAX) {
+ kfree(laser_led_ctrl);
+ pr_err("invalid cci master info: rc %d\n", rc);
+ return rc;
+ }
+
+ laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE;
+ laser_led_ctrl->power_info.dev = &laser_led_ctrl->pdev->dev;
+ laser_led_ctrl->laser_led_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ laser_led_ctrl->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ laser_led_ctrl->laser_led_mutex = &msm_laser_led_mutex;
+
+ laser_led_ctrl->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!laser_led_ctrl->i2c_client.cci_client)
+ return -ENOMEM;
+
+ cci_client = laser_led_ctrl->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = laser_led_ctrl->cci_master;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&laser_led_ctrl->msm_sd.sd, &msm_laser_led_subdev_ops);
+ v4l2_set_subdevdata(&laser_led_ctrl->msm_sd.sd, laser_led_ctrl);
+
+ laser_led_ctrl->msm_sd.sd.internal_ops = &msm_laser_led_internal_ops;
+ laser_led_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(laser_led_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(laser_led_ctrl->msm_sd.sd.name),
+ "msm_camera_laser_led");
+ media_entity_init(&laser_led_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ laser_led_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ laser_led_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_LASER_LED;
+ laser_led_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&laser_led_ctrl->msm_sd);
+
+ laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_RELEASE;
+
+ CDBG("laser_led sd name = %s\n",
+ laser_led_ctrl->msm_sd.sd.entity.name);
+ msm_laser_led_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_laser_led_v4l2_subdev_fops.compat_ioctl32 =
+ msm_laser_led_subdev_fops_ioctl;
+#endif
+ laser_led_ctrl->msm_sd.sd.devnode->fops =
+ &msm_laser_led_v4l2_subdev_fops;
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_laser_led_dt_match);
+
+static struct platform_driver msm_laser_led_platform_driver = {
+ .probe = msm_laser_led_platform_probe,
+ .driver = {
+ .name = "qcom,laser-led",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_laser_led_dt_match,
+ },
+};
+
+static int __init msm_laser_led_init_module(void)
+{
+ int32_t rc;
+
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_laser_led_platform_driver);
+ if (!rc) {
+ CDBG("Exit\n");
+ return rc;
+ }
+ pr_err("laser-led driver register failed: %d\n", rc);
+
+ return rc;
+}
+
+static void __exit msm_laser_led_exit_module(void)
+{
+ platform_driver_unregister(&msm_laser_led_platform_driver);
+}
+
+module_init(msm_laser_led_init_module);
+module_exit(msm_laser_led_exit_module);
+MODULE_DESCRIPTION("MSM IR LED");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h
new file mode 100644
index 000000000000..d5cb8b435d12
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MSM_LASER_LED_H
+#define MSM_LASER_LED_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/camera2.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/msm_cam_sensor.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_sd.h"
+
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_laser_led_state_t {
+ MSM_CAMERA_LASER_LED_INIT,
+ MSM_CAMERA_LASER_LED_RELEASE,
+};
+
+struct msm_laser_led_ctrl_t;
+
+struct msm_laser_led_ctrl_t {
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct msm_laser_led_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+ struct i2c_driver *i2c_driver;
+ struct platform_driver *pdriver;
+ struct msm_camera_i2c_client i2c_client;
+ enum msm_camera_device_type_t laser_led_device_type;
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *laser_led_v4l2_subdev_ops;
+ struct mutex *laser_led_mutex;
+ enum msm_camera_laser_led_state_t laser_led_state;
+ enum cci_i2c_master_t cci_master;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 57bc392f54fd..167ed5492088 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -297,6 +297,45 @@ static int32_t msm_sensor_fill_actuator_subdevid_by_name(
return rc;
}
+static int32_t msm_sensor_fill_laser_led_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0;
+ int32_t *laser_led_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!of_node)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ laser_led_subdev_id = &sensor_info->subdev_id[SUB_MODULE_LASER_LED];
+ /* set sudev id to -1 and try to found new id */
+ *laser_led_subdev_id = -1;
+
+
+ src_node = of_parse_phandle(of_node, "qcom,laserled-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,laser led cell index %d, rc %d\n", __func__,
+ val, rc);
+ of_node_put(src_node);
+ src_node = NULL;
+ if (rc < 0) {
+ pr_err("%s cell index not found %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ *laser_led_subdev_id = val;
+ }
+
+ return rc;
+}
+
static int32_t msm_sensor_fill_flash_subdevid_by_name(
struct msm_sensor_ctrl_t *s_ctrl)
{
@@ -981,6 +1020,11 @@ CSID_TG:
pr_err("%s failed %d\n", __func__, __LINE__);
goto free_camera_info;
}
+ rc = msm_sensor_fill_laser_led_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
rc = msm_sensor_fill_ois_subdevid_by_name(s_ctrl);
if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
index 302a7b16bc26..d3d48b0bbe4c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
@@ -33,6 +33,30 @@ static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl);
static struct i2c_driver msm_ois_i2c_driver;
+static int32_t data_type_to_num_bytes(
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t ret_val;
+
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ ret_val = 1;
+ break;
+ case MSM_CAMERA_I2C_WORD_DATA:
+ ret_val = 2;
+ break;
+ case MSM_CAMERA_I2C_DWORD_DATA:
+ ret_val = 4;
+ break;
+ default:
+ pr_err("unsupported data type: %d\n",
+ data_type);
+ ret_val = 1;
+ break;
+ }
+ return ret_val;
+}
+
static int32_t msm_ois_download(struct msm_ois_ctrl_t *o_ctrl)
{
uint16_t bytes_in_tx = 0;
@@ -155,7 +179,9 @@ static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl,
uint16_t size, struct reg_settings_ois_t *settings)
{
int32_t rc = -EFAULT;
- int32_t i = 0;
+ int32_t i = 0, num_byte_seq = 0;
+ uint8_t *reg_data_seq;
+
struct msm_camera_i2c_seq_reg_array *reg_setting;
CDBG("Enter\n");
@@ -233,13 +259,51 @@ static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl,
settings[i].data_type);
break;
}
+ break;
}
+ case MSM_OIS_READ: {
+ switch (settings[i].data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ case MSM_CAMERA_I2C_DWORD_DATA:
+
+ num_byte_seq =
+ data_type_to_num_bytes
+ (settings[i].data_type);
+ reg_data_seq = kzalloc(sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!reg_data_seq)
+ return -ENOMEM;
+
+ rc = msm_camera_cci_i2c_read_seq
+ (&o_ctrl->i2c_client,
+ settings[i].reg_addr,
+ reg_data_seq,
+ num_byte_seq);
+
+ memcpy(&settings[i].reg_data,
+ reg_data_seq, sizeof(uint32_t));
+
+ CDBG("ois data read 0x%x from address 0x%x",
+ settings[i].reg_addr,
+ settings[i].reg_data);
+
+ kfree(reg_data_seq);
+ reg_data_seq = NULL;
+
+ break;
+ default:
+ pr_err("Unsupport data type for MSM_OIS_READ: %d\n",
+ settings[i].data_type);
+ break;
+ }
+ break;
}
if (rc < 0)
break;
+ }
}
-
CDBG("Exit\n");
return rc;
}
@@ -348,7 +412,7 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
struct msm_ois_set_info_t *set_info)
{
struct reg_settings_ois_t *settings = NULL;
- int32_t rc = 0;
+ int32_t rc = 0, i = 0;
struct msm_camera_cci_client *cci_client = NULL;
CDBG("Enter\n");
@@ -390,6 +454,18 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
rc = msm_ois_write_settings(o_ctrl,
set_info->ois_params.setting_size,
settings);
+
+ for (i = 0; i < set_info->ois_params.setting_size; i++) {
+ if (set_info->ois_params.settings[i].i2c_operation
+ == MSM_OIS_READ) {
+ set_info->ois_params.settings[i].reg_data =
+ settings[i].reg_data;
+ CDBG("ois_data at addr 0x%x is 0x%x",
+ set_info->ois_params.settings[i].reg_addr,
+ set_info->ois_params.settings[i].reg_data);
+ }
+ }
+
kfree(settings);
if (rc < 0) {
pr_err("Error\n");
@@ -402,7 +478,6 @@ static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
return rc;
}
-
static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
void __user *argp)
{
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 037c6f3b12ab..53de11d7abf1 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -2180,6 +2180,15 @@ int create_pkt_cmd_session_set_property(
pkt->size += sizeof(u32) + sizeof(struct hfi_iframe_size);
break;
}
+ case HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME:
+ {
+ create_pkt_enable(pkt->rg_property_data,
+ HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES,
+ ((struct hal_enable *)pdata)->enable);
+ pkt->size += sizeof(u32) + sizeof(struct hfi_enable);
+ break;
+ }
+
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HAL_CONFIG_BUFFER_REQUIREMENTS:
case HAL_CONFIG_PRIORITY:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 947ade9c99ed..ec6695a670b0 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1423,7 +1423,16 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
(1 << V4L2_CID_MPEG_VIDC_VIDEO_IFRAME_SIZE_UNLIMITED)),
.qmenu = iframe_sizes,
},
-
+ {
+ .id = V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME,
+ .name = "Send encoder output buffer for skipped frames",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE,
+ .maximum = V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE,
+ .default_value =
+ V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE,
+ .step = 1,
+ }
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -3712,6 +3721,25 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
ctrl->val);
pdata = &iframesize_type;
break;
+ case V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME:
+ property_id = HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME;
+ switch (ctrl->val) {
+ case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE:
+ enable.enable = 1;
+ break;
+ case V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE:
+ enable.enable = 0;
+ break;
+ default:
+ dprintk(VIDC_ERR,
+ "Invalid send skipped frames control value %d\n",
+ ctrl->val);
+ rc = -ENOTSUPP;
+ break;
+ }
+ pdata = &enable;
+ break;
+
default:
dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id);
rc = -ENOTSUPP;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2eaae18bc2e9..733aa4769941 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1198,6 +1198,8 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
{
struct msm_vidc_inst *inst = instance;
struct msm_vidc_capability *capability = NULL;
+ enum hal_video_codec codec;
+ int i;
if (!inst || !fsize) {
dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
@@ -1206,15 +1208,36 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
}
if (!inst->core)
return -EINVAL;
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ codec = get_hal_codec(fsize->pixel_format);
+ if (codec == HAL_UNUSED_CODEC)
+ return -EINVAL;
- capability = &inst->capability;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise.min_width = capability->width.min;
- fsize->stepwise.max_width = capability->width.max;
- fsize->stepwise.step_width = capability->width.step_size;
- fsize->stepwise.min_height = capability->height.min;
- fsize->stepwise.max_height = capability->height.max;
- fsize->stepwise.step_height = capability->height.step_size;
+ for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+ if (inst->core->capabilities[i].codec == codec) {
+ capability = &inst->core->capabilities[i];
+ break;
+ }
+ }
+
+ if (capability) {
+ fsize->type = capability->width.step_size == 1 &&
+ capability->height.step_size == 1 ?
+ V4L2_FRMSIZE_TYPE_CONTINUOUS :
+ V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = capability->width.min;
+ fsize->stepwise.max_width = capability->width.max;
+ fsize->stepwise.step_width = capability->width.step_size;
+ fsize->stepwise.min_height = capability->height.min;
+ fsize->stepwise.max_height = capability->height.max;
+ fsize->stepwise.step_height = capability->height.step_size;
+ } else {
+ dprintk(VIDC_ERR, "%s: Invalid Pixel Fmt %#x\n",
+ __func__, fsize->pixel_format);
+ return -EINVAL;
+ }
return 0;
}
EXPORT_SYMBOL(msm_vidc_enum_framesizes);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 6cc5f9f50ba1..d946b035b284 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -243,6 +243,7 @@ enum hal_property {
HAL_PARAM_VENC_H264_TRANSFORM_8x8,
HAL_PARAM_VENC_VIDEO_SIGNAL_INFO,
HAL_PARAM_VENC_IFRAMESIZE_TYPE,
+ HAL_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAME
};
enum hal_domain {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 31af06cd88ef..1218f0a86bc4 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -388,6 +388,8 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x033)
#define HFI_PROPERTY_PARAM_VENC_IFRAMESIZE \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x034)
+#define HFI_PROPERTY_PARAM_VENC_SEND_OUTPUT_FOR_SKIPPED_FRAMES \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x035)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 57d2f89350d2..9532235b07de 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2004,6 +2004,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto done;
}
+ /* Validate the user-provided bit-size and offset */
+ if (mapping->size > 32 ||
+ mapping->offset + mapping->size > ctrl->info.size * 8) {
+ ret = -EINVAL;
+ goto done;
+ }
+
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 2f1c03783414..18045a7e24e0 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -788,7 +788,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
@@ -879,6 +880,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
int compatible_arg = 1;
long err = 0;
+ memset(&karg, 0, sizeof(karg));
/* First, convert the command. */
switch (cmd) {
case VIDIOC_G_FMT32: cmd = VIDIOC_G_FMT; break;
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index c6f2dbfe573d..687f55bd5afd 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -116,7 +116,7 @@
#define HDCP_CREATE_DEVICE_ID(x) (HDCP_DEVICE_ID | (x))
#define HDCP_TXMTR_HDMI HDCP_CREATE_DEVICE_ID(1)
-
+#define HDCP_TXMTR_DP HDCP_CREATE_DEVICE_ID(2)
#define HDCP_TXMTR_SERVICE_ID 0x0001000
#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x)
@@ -639,7 +639,8 @@ static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle,
case LC_SEND_L_PRIME_MESSAGE_ID:
return SKE_SEND_EKS_MESSAGE_ID;
case SKE_SEND_EKS_MESSAGE_ID:
- if (!handle->repeater_flag)
+ if (!handle->repeater_flag &&
+ handle->device_type == HDCP_TXMTR_DP)
return SKE_SEND_TYPE_ID;
case SKE_SEND_TYPE_ID:
case REPEATER_AUTH_STREAM_READY_MESSAGE_ID:
@@ -1778,6 +1779,19 @@ exit:
return rc;
}
+static void hdcp_lib_prep_type_id(struct hdcp_lib_handle *handle,
+ struct hdmi_hdcp_wakeup_data *cdata)
+{
+ memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
+ handle->listener_buf[0] = SKE_SEND_TYPE_ID;
+ handle->msglen = 2;
+ cdata->cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE;
+ cdata->send_msg_buf = handle->listener_buf;
+ cdata->send_msg_len = handle->msglen;
+ handle->last_msg = hdcp_lib_get_next_message(handle,
+ cdata);
+}
+
static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
{
struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID };
@@ -1802,18 +1816,29 @@ static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle)
cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
break;
case SKE_SEND_EKS_MESSAGE_ID:
+ /*
+ * a) if its a repeater irrespective of device type we
+ * start CMD_LINK_POLL to trigger repeater auth
+ * b) if its not a repeater and device is DP we
+ * first send the SKE_SEND_TYPE_ID and upon success
+ * enable encryption
+ * c) if its not a repeater and device is HDMI we
+ * dont send SKE_SEND_TYPE_ID and enable encryption
+ * and start part III of authentication
+ */
if (handle->repeater_flag) {
/* poll for link check */
cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
- } else {
- memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE);
- handle->listener_buf[0] = SKE_SEND_TYPE_ID;
- handle->msglen = 2;
- cdata.cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE;
- cdata.send_msg_buf = handle->listener_buf;
- cdata.send_msg_len = handle->msglen;
- handle->last_msg = hdcp_lib_get_next_message(handle,
- &cdata);
+ } else if (handle->device_type == HDCP_TXMTR_DP) {
+ hdcp_lib_prep_type_id(handle, &cdata);
+ } else if (handle->device_type == HDCP_TXMTR_HDMI) {
+ if (!hdcp_lib_enable_encryption(handle)) {
+ handle->authenticated = true;
+ cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
+ hdcp_lib_wakeup_client(handle, &cdata);
+ }
+ /* poll for link check */
+ cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL;
}
break;
case REPEATER_AUTH_SEND_ACK_MESSAGE_ID:
diff --git a/drivers/misc/qcom/qdsp6v2/audio_aac.c b/drivers/misc/qcom/qdsp6v2/audio_aac.c
index 94d563a211ec..1f02576a0848 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -46,7 +46,9 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac, 0, 0);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
index 91bbba176dfd..42b45ec7d9d9 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,9 +58,9 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
- aac_cfg.sample_rate,
- aac_cfg.ch_cfg);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index cf897947fff2..8c48a5c05bbe 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -3122,6 +3122,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
struct qseecom_send_cmd_req *req)
{
int ret = 0;
+ int ret2 = 0;
u32 reqd_len_sb_in = 0;
struct qseecom_client_send_data_ireq send_data_req = {0};
struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
@@ -3220,32 +3221,38 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
- return ret;
+ goto exit;
}
if (qseecom.qsee_reentrancy_support) {
ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+ if (ret)
+ goto exit;
} else {
if (resp.result == QSEOS_RESULT_INCOMPLETE) {
ret = __qseecom_process_incomplete_cmd(data, &resp);
if (ret) {
pr_err("process_incomplete_cmd failed err: %d\n",
ret);
- return ret;
+ goto exit;
}
} else {
if (resp.result != QSEOS_RESULT_SUCCESS) {
pr_err("Response result %d not supported\n",
resp.result);
ret = -EINVAL;
+ goto exit;
}
}
}
- ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+exit:
+ ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
data->client.sb_virt, data->client.sb_length,
ION_IOC_INV_CACHES);
- if (ret)
- pr_err("cache operation failed %d\n", ret);
+ if (ret2) {
+ pr_err("cache operation failed %d\n", ret2);
+ return ret2;
+ }
return ret;
}
@@ -6566,6 +6573,7 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
bool found_app = false;
unsigned long flags;
int ret = 0;
+ int ret2 = 0;
uint32_t reqd_len_sb_in = 0;
void *cmd_buf = NULL;
size_t cmd_len;
@@ -6675,43 +6683,47 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
- return ret;
+ goto exit;
}
if (qseecom.qsee_reentrancy_support) {
ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+ if (ret)
+ goto exit;
} else {
if (resp.result == QSEOS_RESULT_INCOMPLETE) {
ret = __qseecom_process_incomplete_cmd(data, &resp);
if (ret) {
pr_err("process_incomplete_cmd failed err: %d\n",
ret);
- return ret;
+ goto exit;
}
} else {
if (resp.result != QSEOS_RESULT_SUCCESS) {
pr_err("Response result %d not supported\n",
resp.result);
ret = -EINVAL;
+ goto exit;
}
}
}
- ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+exit:
+ ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
data->client.sb_virt, data->client.sb_length,
ION_IOC_INV_CACHES);
- if (ret) {
+ if (ret2) {
pr_err("cache operation failed %d\n", ret);
- return ret;
+ return ret2;
}
if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
- ret = __qseecom_update_qteec_req_buf(
+ ret2 = __qseecom_update_qteec_req_buf(
(struct qseecom_qteec_modfd_req *)req, data, true);
- if (ret)
- return ret;
+ if (ret2)
+ return ret2;
}
- return 0;
+ return ret;
}
static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c002fa5ff602..063e00517660 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1224,16 +1224,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
- mmc_put_card(card);
-
- err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-
if (mmc_card_cmdq(card)) {
if (mmc_cmdq_halt(card->host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(card->host), __func__);
}
+ mmc_put_card(card);
+
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
@@ -4000,7 +4000,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
} else {
pr_err("%s: %s: partition switch failed err = %d\n",
md->disk->disk_name, __func__, err);
- ret = 0;
+ ret = err;
goto out;
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0da9c5caea13..372f1fbbde4c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3301,6 +3301,13 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
+ /*
+ * Change in cd_gpio state, so make sure detection part is
+ * not overided because of manual resume.
+ */
+ if (cd_irq && mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -4165,6 +4172,18 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+ flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
void mmc_rescan(struct work_struct *work)
{
unsigned long flags;
@@ -4199,6 +4218,8 @@ void mmc_rescan(struct work_struct *work)
host->bus_ops->detect(host);
host->detect_change = 0;
+ if (host->ignore_bus_resume_flags)
+ host->ignore_bus_resume_flags = false;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -4456,7 +4477,8 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
- if (mmc_bus_manual_resume(host)) {
+ if (mmc_bus_manual_resume(host) &&
+ !host->ignore_bus_resume_flags) {
spin_unlock_irqrestore(&host->lock, flags);
break;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 9bef77ba29fd..21836eac001e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1237,7 +1237,10 @@ static int mmc_sd_suspend(struct mmc_host *host)
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
- }
+ /* if suspend fails, force mmc_detect_change during resume */
+ } else if (mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index df3fce93b6d1..45d2f69f5f1a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -4747,6 +4747,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
mmc_hostname(host->mmc), __func__, ret);
device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
}
+ if (sdhci_msm_is_bootdevice(&pdev->dev))
+ mmc_flush_detect_work(host->mmc);
+
/* Successful initialization */
goto out;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4cd2a7d0124f..7923bfdc9b30 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index de14dcc6f4ed..a342e39b9f43 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -958,6 +958,7 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
{
struct rmnet_mhi_private *rmnet_mhi_ptr;
struct mhi_result *result;
+ char ifalias[IFALIASZ];
int r = 0;
if (!cb_info || !cb_info->result) {
@@ -979,9 +980,16 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
* as we set mhi_enabled = 0, we gurantee rest of
* driver will not touch any critical data.
*/
+ snprintf(ifalias, sizeof(ifalias), "%s", "unidentified_netdev");
write_lock_irq(&rmnet_mhi_ptr->pm_lock);
rmnet_mhi_ptr->mhi_enabled = 0;
write_unlock_irq(&rmnet_mhi_ptr->pm_lock);
+ /* Set unidentified_net_dev string to ifalias
+ * on error notification
+ */
+ rtnl_lock();
+ dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias));
+ rtnl_unlock();
if (cb_info->chan == rmnet_mhi_ptr->rx_channel) {
rmnet_log(rmnet_mhi_ptr, MSG_INFO,
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 829be21f97b2..be258d90de9e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 49d9f0a789fe..7d0690433ee0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 4bb14d43e136..01175d94adca 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5479,7 +5479,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct wmi_start_scan_arg arg;
- int ret = 0;
+ const u8 *ptr;
+ int ret = 0, ie_skip_len = 0;
int i;
mutex_lock(&ar->conf_mutex);
@@ -5511,8 +5512,16 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.scan_id = ATH10K_SCAN_ID;
if (req->ie_len) {
- arg.ie_len = req->ie_len;
- memcpy(arg.ie, req->ie, arg.ie_len);
+ if (QCA_REV_WCN3990(ar)) {
+ ptr = req->ie;
+ while (ptr[0] == WLAN_EID_SUPP_RATES ||
+ ptr[0] == WLAN_EID_EXT_SUPP_RATES) {
+ ie_skip_len = ptr[1] + 2;
+ ptr += ie_skip_len;
+ }
+ }
+ arg.ie_len = req->ie_len - ie_skip_len;
+ memcpy(arg.ie, req->ie + ie_skip_len, arg.ie_len);
}
if (req->n_ssids) {
@@ -5521,6 +5530,11 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.ssids[i].len = req->ssids[i].ssid_len;
arg.ssids[i].ssid = req->ssids[i].ssid;
}
+ if (QCA_REV_WCN3990(ar)) {
+ arg.scan_ctrl_flags &=
+ ~(WMI_SCAN_ADD_BCAST_PROBE_REQ |
+ WMI_SCAN_CHAN_STAT_EVENT);
+ }
} else {
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
@@ -6419,7 +6433,13 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
arg.dwell_time_passive = scan_time_msec;
arg.max_scan_time = scan_time_msec;
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
- arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (QCA_REV_WCN3990(ar)) {
+ arg.scan_ctrl_flags &= ~(WMI_SCAN_FILTER_PROBE_REQ |
+ WMI_SCAN_CHAN_STAT_EVENT |
+ WMI_SCAN_ADD_BCAST_PROBE_REQ);
+ } else {
+ arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ }
arg.burst_duration_ms = duration;
ret = ath10k_start_scan(ar, &arg);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f5aa88a76f17..c42d7eebf465 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -27,6 +27,8 @@
#include "qmi.h"
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
#define WCN3990_MAX_IRQ 12
@@ -48,6 +50,7 @@ const char *ce_name[WCN3990_MAX_IRQ] = {
#define ATH10K_SNOC_TARGET_WAIT 3000
#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3
#define SNOC_HIF_POWER_DOWN_DELAY 30
+#define ATH10K_MAX_PROP_SIZE 32
static void ath10k_snoc_buffer_cleanup(struct ath10k *ar);
static int ath10k_snoc_request_irq(struct ath10k *ar);
@@ -1248,6 +1251,326 @@ int ath10k_snoc_pm_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+ int ret = 0;
+ char prop_name[ATH10K_MAX_PROP_SIZE];
+ struct regulator *reg;
+ const __be32 *prop;
+ int len = 0;
+ int i;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
+ ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ ret = PTR_ERR(reg);
+ goto out;
+ }
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (vreg_info->required) {
+ ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, ATH10K_MAX_PROP_SIZE,
+ "qcom,%s-config", vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len < (2 * sizeof(__be32))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ goto done;
+ }
+
+ for (i = 0; (i * sizeof(__be32)) < len; i++) {
+ switch (i) {
+ case 0:
+ vreg_info->min_v = be32_to_cpup(&prop[0]);
+ break;
+ case 1:
+ vreg_info->max_v = be32_to_cpup(&prop[1]);
+ break;
+ case 2:
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ break;
+ case 3:
+ vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n",
+ prop_name, i);
+ break;
+ }
+ }
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ ath10k_err(ar, "Clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n",
+ clk_info->name,
+ ret);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n",
+ clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+out:
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "vreg %s, set failed:min:%u,max:%u,ret: %d\n",
+ vreg_info->name, vreg_info->min_v,
+ vreg_info->max_v, ret);
+ break;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar,
+ "Reg %s, can't set load:%u,ret: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua, ret);
+ break;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "Regulator %s, can't enable: %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ if (!ret)
+ return 0;
+
+ for (; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't disable: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "Regulator %s, can't set load: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't set voltage: %d\n",
+ vreg_info->name, ret);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ int ret = 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n",
+ clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n",
+ clk_info->name, clk_info->freq,
+ ret);
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ ath10k_err(ar, "Clock %s, can't enable: %d\n",
+ clk_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ for (; i >= 0; i--) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n",
+ clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n");
+
+ ret = ath10k_wcn3990_vreg_on(ar);
+ if (ret)
+ goto out;
+
+ ret = ath10k_wcn3990_clk_init(ar);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ ath10k_wcn3990_vreg_off(ar);
+out:
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n");
+
+ ath10k_wcn3990_clk_deinit(ar);
+
+ ret = ath10k_wcn3990_vreg_off(ar);
+
+ return ret;
+}
+
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.tx_sg = ath10k_snoc_hif_tx_sg,
.start = ath10k_snoc_hif_start,
@@ -1275,6 +1598,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
struct device *dev;
u32 chip_id;
+ u32 i;
dev = &pdev->dev;
hw_rev = ATH10K_HW_WCN3990;
@@ -1308,22 +1632,43 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry,
(unsigned long)ar);
+ memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg));
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg));
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ goto err_stop_qmi_service;
+ }
+
ret = ath10k_snoc_claim(ar);
if (ret) {
ath10k_err(ar, "failed to claim device: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
+
ret = ath10k_snoc_bus_configure(ar);
if (ret) {
ath10k_err(ar, "failed to configure bus: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
ret = ath10k_snoc_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
@@ -1359,6 +1704,9 @@ err_free_irq:
err_free_pipes:
ath10k_snoc_free_pipes(ar);
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
err_stop_qmi_service:
ath10k_snoc_stop_qmi_service(ar);
@@ -1389,6 +1737,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_snoc_release_resource(ar);
ath10k_snoc_free_pipes(ar);
ath10k_snoc_stop_qmi_service(ar);
+ ath10k_hw_power_off(ar);
ath10k_core_destroy(ar);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d6e05ba18cb8..a02cb2ad928e 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -17,6 +17,7 @@
#include "ce.h"
#include "pci.h"
#include "qmi.h"
+#include <linux/kernel.h>
#include <soc/qcom/service-locator.h>
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -112,6 +113,38 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
+struct ath10k_wcn3990_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ATH10K_WCN3990_VREG_INFO_SIZE ARRAY_SIZE(vreg_cfg)
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ATH10K_WCN3990_CLK_INFO_SIZE ARRAY_SIZE(clk_cfg)
+
/* struct ath10k_snoc: SNOC info struct
* @dev: device structure
* @ar:ath10k base structure
@@ -157,6 +190,8 @@ struct ath10k_snoc {
atomic_t fw_crashed;
atomic_t pm_ops_inprogress;
struct ath10k_snoc_qmi_config qmi_cfg;
+ struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE];
+ struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE];
};
struct ath10k_event_pd_down_data {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 07b15f4c1db4..f5360444a083 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1553,11 +1553,7 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd->ie_len = __cpu_to_le32(arg->ie_len);
cmd->num_probes = __cpu_to_le32(3);
- if (QCA_REV_WCN3990(ar)) {
- cmd->common.scan_ctrl_flags = ar->fw_flags->flags;
- cmd->common.scan_ctrl_flags |=
- __cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
- } else {
+ if (!QCA_REV_WCN3990(ar)) {
cmd->common.scan_ctrl_flags ^=
__cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d6ec0de63582..86aedff096f6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -6192,6 +6192,8 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
| WMI_SCAN_EVENT_BSS_CHANNEL
| WMI_SCAN_EVENT_FOREIGN_CHANNEL
| WMI_SCAN_EVENT_DEQUEUED;
+ if (QCA_REV_WCN3990(ar))
+ arg->scan_ctrl_flags = ar->fw_flags->flags;
arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
arg->n_bssids = 1;
arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 7ae07a505c59..f59e5f86708b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2960,6 +2960,8 @@ struct wmi_start_scan_arg {
/* Different FW scan engine may choose to bail out on errors.
* Allow the driver to have influence over that. */
#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+/** add DS content in probe req frame */
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
#define WMI_SCAN_CLASS_MASK 0xFF000000
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
index 6891a38d7a59..5906b90b337d 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.c
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +38,9 @@
/* initial token to use on non-secure FTM measurement */
#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2
+/* maximum AOA burst period, limited by FW */
+#define WIL_AOA_MAX_BURST_PERIOD 255
+
#define WIL_TOF_FTM_MAX_LCI_LENGTH (240)
#define WIL_TOF_FTM_MAX_LCR_LENGTH (240)
@@ -62,6 +65,7 @@ nla_policy wil_nl80211_ftm_peer_policy[
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD] = { .type = NLA_U16 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ] = { .type = NLA_U32 },
};
@@ -311,8 +315,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
struct wmi_tof_session_start_cmd *cmd;
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.session_started) {
- wil_err(wil, "FTM session already running\n");
+ if (wil->ftm.session_started || wil->ftm.aoa_started) {
+ wil_err(wil, "FTM or AOA session already running\n");
rc = -EAGAIN;
goto out;
}
@@ -356,6 +360,7 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
}
cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID);
+ cmd->aoa_type = request->aoa_type;
cmd->num_of_dest = cpu_to_le16(request->n_peers);
for (i = 0; i < request->n_peers; i++) {
ether_addr_copy(cmd->ftm_dest_info[i].dst_mac,
@@ -398,6 +403,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
request->peers[i].params.burst_duration;
cmd->ftm_dest_info[i].burst_period =
cpu_to_le16(request->peers[i].params.burst_period);
+ cmd->ftm_dest_info[i].num_burst_per_aoa_meas =
+ request->peers[i].aoa_burst_period;
}
rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len);
@@ -482,8 +489,8 @@ wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.aoa_started) {
- wil_err(wil, "AOA measurement already running\n");
+ if (wil->ftm.aoa_started || wil->ftm.session_started) {
+ wil_err(wil, "AOA or FTM measurement already running\n");
rc = -EAGAIN;
goto out;
}
@@ -524,8 +531,8 @@ void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (!wil->ftm.aoa_started) {
- wil_info(wil, "AOA not started, not sending result\n");
+ if (!wil->ftm.aoa_started && !wil->ftm.session_started) {
+ wil_info(wil, "AOA/FTM not started, not sending result\n");
goto out;
}
@@ -678,6 +685,10 @@ void wil_aoa_evt_meas(struct wil6210_priv *wil,
int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data);
struct wil_aoa_meas_result *res;
+ if (data_len < 0) {
+ wil_err(wil, "AOA event too short (%d)\n", len);
+ return;
+ }
data_len = min_t(int, le16_to_cpu(evt->length), data_len);
res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL);
@@ -749,6 +760,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1];
struct nlattr *peer;
int rc, n_peers = 0, index = 0, tmp;
+ u32 aoa_type = 0;
if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
return -ENOTSUPP;
@@ -770,6 +782,14 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
return -EINVAL;
}
+ if (tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) {
+ aoa_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]);
+ if (aoa_type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) {
+ wil_err(wil, "invalid AOA type: %d\n", aoa_type);
+ return -EINVAL;
+ }
+ }
+
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp)
n_peers++;
@@ -793,6 +813,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
request->session_cookie =
nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]);
+ request->aoa_type = aoa_type;
request->n_peers = n_peers;
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp) {
@@ -821,6 +842,18 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID])
request->peers[index].secure_token_id = nla_get_u8(
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]) {
+ request->peers[index].aoa_burst_period = nla_get_u16(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]);
+ if (request->peers[index].aoa_burst_period >
+ WIL_AOA_MAX_BURST_PERIOD) {
+ wil_err(wil, "Invalid AOA burst period at index: %d\n",
+ index);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
rc = wil_ftm_parse_meas_params(
wil,
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS],
diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h
index 8efa292d5ff4..21923c27ec06 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.h
+++ b/drivers/net/wireless/ath/wil6210/ftm.h
@@ -437,12 +437,14 @@ struct wil_ftm_meas_peer_info {
u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */
struct wil_ftm_meas_params params;
u8 secure_token_id;
+ u16 aoa_burst_period; /* 0 if no AOA, >0 every <value> bursts */
};
/* session request, passed to wil_ftm_cfg80211_start_session */
struct wil_ftm_session_request {
u64 session_cookie;
u32 n_peers;
+ u32 aoa_type; /* enum qca_wlan_vendor_attr_aoa_type */
/* keep last, variable size according to n_peers */
struct wil_ftm_meas_peer_info peers[0];
};
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 863f766bccdb..051b709f53f0 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -12,7 +12,7 @@ config CNSS
config CNSS_ASYNC
bool "Enable/disable cnss pci platform driver asynchronous probe"
- depends on CNSS
+ depends on CNSS || CNSS2
---help---
If enabled, CNSS PCI platform driver would do asynchronous probe.
Using asynchronous probe will allow CNSS PCI platform driver to
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 9d383c8daa43..b49d0898178b 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -5,5 +5,4 @@ cnss2-y += debug.o
cnss2-y += pci.o
cnss2-y += power.o
cnss2-y += qmi.o
-cnss2-y += utils.o
cnss2-y += wlan_firmware_service_v01.o
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 35d7fe1c318c..916820ee4f5d 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -158,6 +158,7 @@ static ssize_t cnss_dev_boot_debug_write(struct file *fp,
} else if (sysfs_streq(cmd, "enumerate")) {
ret = cnss_pci_init(plat_priv);
} else if (sysfs_streq(cmd, "download")) {
+ set_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state);
ret = cnss_pci_start_mhi(plat_priv->bus_priv);
} else if (sysfs_streq(cmd, "linkup")) {
ret = cnss_resume_pci_link(plat_priv->bus_priv);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 23a81ff071ee..d3afb516b119 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -58,6 +58,7 @@ MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
enum cnss_debug_quirks {
LINK_DOWN_SELF_RECOVERY,
SKIP_DEVICE_BOOT,
+ USE_CORE_ONLY_FW,
};
unsigned long quirks;
@@ -322,31 +323,6 @@ void cnss_remove_pm_qos(void)
}
EXPORT_SYMBOL(cnss_remove_pm_qos);
-u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num)
-{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
- struct cnss_wlan_mac_info *wlan_mac_info;
- struct cnss_wlan_mac_addr *addr;
-
- if (!plat_priv)
- goto out;
-
- wlan_mac_info = &plat_priv->wlan_mac_info;
- if (!wlan_mac_info->is_wlan_mac_set) {
- cnss_pr_info("Platform driver doesn't have any MAC address!\n");
- goto out;
- }
-
- addr = &wlan_mac_info->wlan_mac_addr;
- *num = addr->no_of_mac_addr_set;
-
- return &addr->mac_addr[0][0];
-out:
- *num = 0;
- return NULL;
-}
-EXPORT_SYMBOL(cnss_common_get_wlan_mac_address);
-
int cnss_wlan_enable(struct device *dev,
struct cnss_wlan_enable_cfg *config,
enum cnss_driver_mode mode,
@@ -589,9 +565,15 @@ out:
static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
{
- int ret;
+ int ret = 0;
struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ cnss_pr_dbg("Skip driver probe\n");
+ goto out;
+ }
+
if (!plat_priv->driver_ops) {
cnss_pr_err("driver_ops is NULL\n");
ret = -EINVAL;
@@ -629,6 +611,13 @@ static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
{
struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Skip driver remove\n");
+ return 0;
+ }
+
if (!plat_priv->driver_ops) {
cnss_pr_err("driver_ops is NULL\n");
return -EINVAL;
@@ -1022,11 +1011,6 @@ static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
return -ENODEV;
}
- if (!plat_priv->driver_ops) {
- cnss_pr_err("driver_ops is NULL!\n");
- return -EINVAL;
- }
-
ret = cnss_power_on_device(plat_priv);
if (ret) {
cnss_pr_err("Failed to power on device, err = %d\n", ret);
@@ -1060,15 +1044,10 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
- if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
- goto skip_driver_remove;
-
- if (!plat_priv->driver_ops)
- return -EINVAL;
+ cnss_pm_request_resume(pci_priv);
cnss_driver_call_remove(plat_priv);
-skip_driver_remove:
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1135,6 +1114,12 @@ static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
return 0;
}
+ if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ return 0;
+ }
+
cnss_set_pin_connect_status(plat_priv);
if (qmi_bypass) {
@@ -1165,17 +1150,10 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
- if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
- test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
- test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state))
- goto skip_driver_remove;
-
- if (!plat_priv->driver_ops)
- return -EINVAL;
+ cnss_pm_request_resume(pci_priv);
cnss_driver_call_remove(plat_priv);
-skip_driver_remove:
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
cnss_pci_set_monitor_wake_intr(pci_priv, false);
cnss_pci_set_auto_suspended(pci_priv, 0);
@@ -1613,7 +1591,7 @@ int cnss_force_fw_assert(struct device *dev)
if (plat_priv->device_id == QCA6174_DEVICE_ID) {
cnss_pr_info("Forced FW assert is not supported\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
@@ -2343,6 +2321,9 @@ static struct platform_driver cnss_platform_driver = {
.name = "cnss2",
.owner = THIS_MODULE,
.of_match_table = cnss_of_match_table,
+#ifdef CONFIG_CNSS_ASYNC
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
},
};
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index a5f9ce37b0ea..4bf1c27d99de 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -97,16 +97,6 @@ struct cnss_bus_bw_info {
int current_bw_vote;
};
-struct cnss_wlan_mac_addr {
- u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
- u32 no_of_mac_addr_set;
-};
-
-struct cnss_wlan_mac_info {
- struct cnss_wlan_mac_addr wlan_mac_addr;
- bool is_wlan_mac_set;
-};
-
struct cnss_fw_mem {
size_t size;
void *va;
@@ -185,7 +175,6 @@ struct cnss_plat_data {
struct cnss_wlan_driver *driver_ops;
enum cnss_driver_status driver_status;
u32 recovery_count;
- struct cnss_wlan_mac_info wlan_mac_info;
unsigned long driver_state;
struct list_head event_list;
spinlock_t event_lock; /* spinlock for driver work event handling */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 2c297fba5c34..e4c35c4d664a 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -60,7 +60,6 @@ MODULE_PARM_DESC(fbc_bypass,
static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
{
- int ret = 0;
struct pci_dev *pci_dev = pci_priv->pci_dev;
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
bool link_down_or_recovery;
@@ -80,12 +79,8 @@ static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
}
} else {
if (link_down_or_recovery) {
- ret = msm_pcie_recover_config(pci_dev);
- if (ret) {
- cnss_pr_err("Failed to recover PCI config space, err = %d\n",
- ret);
- return ret;
- }
+ pci_load_saved_state(pci_dev, pci_priv->default_state);
+ pci_restore_state(pci_dev);
} else if (pci_priv->saved_state) {
pci_load_and_free_saved_state(pci_dev,
&pci_priv->saved_state);
@@ -100,27 +95,15 @@ static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
{
int ret = 0;
struct pci_dev *pci_dev = pci_priv->pci_dev;
- struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
- bool link_down_or_recovery;
-
- if (!plat_priv)
- return -ENODEV;
-
- link_down_or_recovery = pci_priv->pci_link_down_ind ||
- (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
MSM_PCIE_SUSPEND,
pci_dev->bus->number,
pci_dev, NULL,
- link_down_or_recovery ?
- PM_OPTIONS_LINK_DOWN :
PM_OPTIONS_DEFAULT);
if (ret) {
- cnss_pr_err("Failed to %s PCI link with %s option, err = %d\n",
- link_up ? "resume" : "suspend",
- link_down_or_recovery ? "link down" : "default",
- ret);
+ cnss_pr_err("Failed to %s PCI link with default option, err = %d\n",
+ link_up ? "resume" : "suspend", ret);
return ret;
}
@@ -685,6 +668,20 @@ out:
}
EXPORT_SYMBOL(cnss_auto_resume);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+ if (!pci_dev)
+ return -ENODEV;
+
+ return pm_request_resume(&pci_dev->dev);
+}
+
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -1443,6 +1440,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
if (ret)
goto dereg_pci_event;
+ pci_save_state(pci_dev);
+ pci_priv->default_state = pci_store_saved_state(pci_dev);
+
switch (pci_dev->device) {
case QCA6174_DEVICE_ID:
pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
@@ -1514,6 +1514,8 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
break;
}
+ pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
+
cnss_pci_disable_bus(pci_priv);
cnss_dereg_pci_event(pci_priv);
if (pci_priv->smmu_mapping)
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 4dc29c3c1f10..89edc6020d35 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -65,6 +65,7 @@ struct cnss_pci_data {
bool pci_link_state;
bool pci_link_down_ind;
struct pci_saved_state *saved_state;
+ struct pci_saved_state *default_state;
struct msm_pcie_register_event msm_pci_event;
atomic_t auto_suspended;
bool monitor_wake_intr;
@@ -137,5 +138,6 @@ int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c
deleted file mode 100644
index 9ffe386e3677..000000000000
--- a/drivers/net/wireless/cnss2/utils.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define CNSS_MAX_CH_NUM 45
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-static DEFINE_MUTEX(unsafe_channel_list_lock);
-static DEFINE_MUTEX(dfs_nol_info_lock);
-
-static struct cnss_unsafe_channel_list {
- u16 unsafe_ch_count;
- u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
-} unsafe_channel_list;
-
-static struct cnss_dfs_nol_info {
- void *dfs_nol_info;
- u16 dfs_nol_info_len;
-} dfs_nol_info;
-
-int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
-{
- mutex_lock(&unsafe_channel_list_lock);
- if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -EINVAL;
- }
-
- unsafe_channel_list.unsafe_ch_count = ch_count;
-
- if (ch_count != 0) {
- memcpy((char *)unsafe_channel_list.unsafe_ch_list,
- (char *)unsafe_ch_list, ch_count * sizeof(u16));
- }
- mutex_unlock(&unsafe_channel_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel);
-
-int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
- u16 *ch_count, u16 buf_len)
-{
- mutex_lock(&unsafe_channel_list_lock);
- if (!unsafe_ch_list || !ch_count) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -EINVAL;
- }
-
- if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -ENOMEM;
- }
-
- *ch_count = unsafe_channel_list.unsafe_ch_count;
- memcpy((char *)unsafe_ch_list,
- (char *)unsafe_channel_list.unsafe_ch_list,
- unsafe_channel_list.unsafe_ch_count * sizeof(u16));
- mutex_unlock(&unsafe_channel_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel);
-
-int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
-{
- void *temp;
- struct cnss_dfs_nol_info *dfs_info;
-
- mutex_lock(&dfs_nol_info_lock);
- if (!info || !info_len) {
- mutex_unlock(&dfs_nol_info_lock);
- return -EINVAL;
- }
-
- temp = kmalloc(info_len, GFP_KERNEL);
- if (!temp) {
- mutex_unlock(&dfs_nol_info_lock);
- return -ENOMEM;
- }
-
- memcpy(temp, info, info_len);
- dfs_info = &dfs_nol_info;
- kfree(dfs_info->dfs_nol_info);
-
- dfs_info->dfs_nol_info = temp;
- dfs_info->dfs_nol_info_len = info_len;
- mutex_unlock(&dfs_nol_info_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_wlan_set_dfs_nol);
-
-int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
-{
- int len;
- struct cnss_dfs_nol_info *dfs_info;
-
- mutex_lock(&dfs_nol_info_lock);
- if (!info || !info_len) {
- mutex_unlock(&dfs_nol_info_lock);
- return -EINVAL;
- }
-
- dfs_info = &dfs_nol_info;
-
- if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
- mutex_unlock(&dfs_nol_info_lock);
- return -ENOENT;
- }
-
- len = min(info_len, dfs_info->dfs_nol_info_len);
-
- memcpy(info, dfs_info->dfs_nol_info, len);
- mutex_unlock(&dfs_nol_info_lock);
-
- return len;
-}
-EXPORT_SYMBOL(cnss_wlan_get_dfs_nol);
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
index fafd9ce4b4c4..29dd4c999f2d 100644
--- a/drivers/net/wireless/cnss_genl/cnss_nl.c
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -64,6 +64,8 @@ static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = {
[CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
[CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
.len = CLD80211_MAX_NL_DATA },
+ [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
};
static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index c3331d6201c3..9a8982f581c5 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -3740,7 +3740,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c20017ced566..fb98f42cb5e7 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2170,6 +2170,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
+ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+ mwifiex_dbg(adapter, WARN,
+ "FW reported too many channel results (max %d)\n",
+ adapter->num_in_chan_stats);
+ return;
+ }
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 257a9eadd595..4ac6764f4897 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index a52230377e2c..c48b7e8ee0d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2269,7 +2269,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
/* Init IO handler */
@@ -2339,10 +2339,10 @@ fail3:
pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
+fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd4777954f87..9bee3f11898a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7d223e9080ef..77dddee2753a 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ } else {
+ pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
index dc05d7108135..adf47fc32548 100644
--- a/drivers/platform/msm/gpio-usbdetect.c
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -50,6 +50,7 @@ static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
if (usb->vbus_state) {
dev_dbg(&usb->pdev->dev, "setting vbus notification\n");
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1);
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
} else {
dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n");
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0);
@@ -85,6 +86,7 @@ static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
dev_dbg(&usb->pdev->dev, "starting usb HOST\n");
disable_irq(usb->vbus_det_irq);
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
}
return IRQ_HANDLED;
}
@@ -186,6 +188,14 @@ static int gpio_usbdetect_probe(struct platform_device *pdev)
enable_irq_wake(usb->id_det_irq);
dev_set_drvdata(&pdev->dev, usb);
+ if (usb->id_det_irq) {
+ gpio_usbdetect_id_irq(usb->id_det_irq, usb);
+ if (!usb->id_state) {
+ gpio_usbdetect_id_irq_thread(usb->id_det_irq, usb);
+ return 0;
+ }
+ }
+
/* Read and report initial VBUS state */
gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index df741c1c8e5f..9e19fa625daa 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -536,6 +536,7 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
@@ -549,6 +550,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -565,8 +568,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
/* cache the cne event */
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 4275e3d26157..ecbbe516266e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -86,7 +86,9 @@ const char *ipa_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa_hdr_l2_type_name[] = {
@@ -812,10 +814,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
@@ -842,10 +845,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
- else
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
rt_tbl_idx = ~0;
+ else
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 5dbd43b44540..23e4d2b0d6e8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -411,12 +411,15 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
if (rule_req->filter_spec_list_valid == true) {
num_q6_rule = rule_req->filter_spec_list_len;
IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
} else {
num_q6_rule = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
}
@@ -610,9 +613,11 @@ failure:
num_q6_rule = 0;
memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&add_mux_channel_lock);
return 0;
}
@@ -1536,6 +1541,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
mutex_unlock(&add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
@@ -1622,9 +1629,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* already got Q6 UL filter rules*/
if (ipa_qmi_ctx &&
ipa_qmi_ctx->modem_cfg_emb_pipe_flt
- == false)
+ == false) {
+ /* protect num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
rc = wwan_add_ul_flt_rule_to_ipa();
- else
+ mutex_unlock(&add_mux_channel_lock);
+ } else
rc = 0;
egress_set = true;
if (rc)
@@ -2687,6 +2697,9 @@ int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->interface_name);
@@ -2978,6 +2991,10 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+ data->tetherIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3012,6 +3029,10 @@ int rmnet_ipa_query_tethering_stats_all(
int rc = 0;
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3055,6 +3076,9 @@ int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index fd503f48f17c..e9fd1560b1e8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -603,6 +603,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
@@ -616,6 +617,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -632,8 +635,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
/* cache the cne event */
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
@@ -992,8 +995,52 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
break;
+
+ case IPA_IOC_ADD_RT_RULE_EXT:
+ if (copy_from_user(header,
+ (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+ retval = -EFAULT;
+ break;
+ }
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule_ext) +
+ pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ /* add check in case user-space module compromised */
+ if (unlikely(
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+ != pre_entry)) {
+ IPAERR(" prevent memory corruption(%d not match %d)\n",
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EINVAL;
+ break;
+ }
+ if (ipa3_add_rt_rule_ext(
+ (struct ipa_ioc_add_rt_rule_ext *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
case IPA_IOC_ADD_RT_RULE_AFTER:
- if (copy_from_user(header, (u8 *)arg,
+ if (copy_from_user(header, (const void __user *)arg,
sizeof(struct ipa_ioc_add_rt_rule_after))) {
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 8c6bd48cfb2c..97b9f04f51de 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1353,6 +1353,7 @@ int ipa3_set_usb_max_packet_size(
return 0;
}
+/* This function called as part of usb pipe resume */
int ipa3_xdci_connect(u32 clnt_hdl)
{
int result;
@@ -1392,11 +1393,14 @@ exit:
return result;
}
+
+/* This function called as part of usb pipe connect */
int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
{
struct ipa3_ep_context *ep;
int result = -EFAULT;
enum gsi_status gsi_res;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -1418,6 +1422,22 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
goto write_chan_scratch_fail;
}
}
+
+ if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ep->ep_delay_set = true;
+
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) success\n", clnt_hdl);
+ } else {
+ ep->ep_delay_set = false;
+ }
+
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error starting channel: %d\n", gsi_res);
@@ -1622,13 +1642,15 @@ static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
/* Clocks should be voted for before invoking this function */
static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
- u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+ u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl,
+ bool remove_delay)
{
int result;
bool is_empty = false;
int i;
bool stop_in_proc;
struct ipa3_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1649,6 +1671,22 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
if (!stop_in_proc)
goto exit;
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
+
/* if stop_in_proc, lets wait for emptiness */
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
result = ipa3_is_xdci_channel_empty(ep, &is_empty);
@@ -1714,6 +1752,21 @@ disable_force_clear_and_exit:
if (should_force_clear)
ipa3_disable_force_clear(qmi_req_id);
exit:
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
return result;
}
@@ -1743,7 +1796,8 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
source_pipe_bitmask = 1 <<
ipa3_get_ep_mapping(ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, clnt_hdl);
+ source_pipe_bitmask, should_force_clear, clnt_hdl,
+ true);
if (result) {
IPAERR("Fail to stop UL channel with data drain\n");
WARN_ON(1);
@@ -1918,7 +1972,8 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+ source_pipe_bitmask, should_force_clear, ul_clnt_hdl,
+ false);
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index c7ab616cb5b8..71da7d28a451 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -67,7 +67,9 @@ const char *ipa3_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa3_hdr_l2_type_name[] = {
@@ -867,10 +869,11 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index c2fb87ab757b..a03d8978c6c2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1157,6 +1157,13 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
+ entry->cookie, rules->add_after_hdl);
+ result = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 5ff926a60129..8e6db8f63fc1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -435,6 +435,7 @@ struct ipa3_rt_entry {
int id;
u16 prio;
u16 rule_id;
+ u16 rule_id_valid;
};
/**
@@ -576,6 +577,7 @@ struct ipa3_ep_context {
bool switch_to_intr;
int inactive_cycles;
u32 eot_in_poll_err;
+ bool ep_delay_set;
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
@@ -1615,6 +1617,8 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
*/
int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 571852c076ea..4897c4dccf59 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -695,6 +695,57 @@ int ipa3_qmi_filter_request_ex_send(
resp.resp.error, "ipa_install_filter");
}
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
+ req->firewall_rules_list_len);
+
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(
+ &(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+ req,
+ sizeof(struct
+ ipa_configure_ul_firewall_rules_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+ MAX_NUM_QMI_RULE_CACHE;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
+
+ req_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+ req_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+ memset(&resp, 0,
+ sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+ req,
+ sizeof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
int ipa3_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
{
@@ -880,6 +931,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
void *ind_cb_priv)
{
struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
struct msg_desc qmi_ind_desc;
int rc = 0;
@@ -888,7 +940,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
return;
}
- if (QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 == msg_id) {
+ if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
memset(&qmi_ind, 0, sizeof(
struct ipa_data_usage_quota_reached_ind_msg_v01));
qmi_ind_desc.max_msg_len =
@@ -908,6 +960,36 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
IPA_UPSTEAM_MODEM);
}
+
+ if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) {
+ memset(&qmi_ul_firewall_ind, 0, sizeof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+ qmi_ind_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01;
+ qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01;
+ qmi_ind_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei;
+
+ rc = qmi_kernel_decode(
+ &qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len);
+ if (rc < 0) {
+ IPAWANERR("Error decoding msg_id %d\n", msg_id);
+ return;
+ }
+
+ IPAWANDBG("UL firewall rules install indication on Q6");
+ if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+ IPAWANDBG(" : Success\n");
+ IPAWANDBG
+ ("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+ } else if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){
+ IPAWANERR(": Failure\n");
+ } else {
+ IPAWANERR(": Unexpected Result");
+ }
+ }
}
static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
@@ -1363,6 +1445,74 @@ int ipa3_qmi_stop_data_qouta(void)
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+ req_desc.ei_array =
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+ req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+ resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+ resp->resp.error,
+ "struct ipa_get_stats_per_client_req_msg_v01");
+}
+
void ipa3_qmi_init(void)
{
mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index e6f1e2ce0b75..297dca6b88cf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -32,54 +32,62 @@
#define IPAWANDBG(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANDBG_LOW(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANERR(fmt, args...) \
do { \
- pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANINFO(fmt, args...) \
do { \
- pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
extern struct ipa3_qmi_context *ipa3_qmi_ctx;
struct ipa3_qmi_context {
-struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
-u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
-int num_ipa_install_fltr_rule_req_msg;
-struct ipa_install_fltr_rule_req_msg_v01
+ struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+ u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+ int num_ipa_install_fltr_rule_req_msg;
+ struct ipa_install_fltr_rule_req_msg_v01
ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_install_fltr_rule_req_ex_msg;
-struct ipa_install_fltr_rule_req_ex_msg_v01
+ int num_ipa_install_fltr_rule_req_ex_msg;
+ struct ipa_install_fltr_rule_req_ex_msg_v01
ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_fltr_installed_notif_req_msg;
-struct ipa_fltr_installed_notif_req_msg_v01
+ int num_ipa_fltr_installed_notif_req_msg;
+ struct ipa_fltr_installed_notif_req_msg_v01
ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-bool modem_cfg_emb_pipe_flt;
+ int num_ipa_configure_ul_firewall_rules_req_msg;
+ struct ipa_configure_ul_firewall_rules_req_msg_v01
+ ipa_configure_ul_firewall_rules_req_msg_cache
+ [MAX_NUM_QMI_RULE_CACHE];
+ bool modem_cfg_emb_pipe_flt;
};
struct ipa3_rmnet_mux_val {
@@ -95,56 +103,69 @@ extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
extern struct elem_info
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
-
- extern struct elem_info
- ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_ul_firewall_rule_type_data_v01_ei[];
- extern struct elem_info
- ipa3_ul_firewall_config_result_type_data_v01_ei[];
- extern struct elem_info
- ipa3_per_client_stats_info_type_data_v01_ei[];
- extern struct elem_info
- ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_get_stats_per_client_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
/**
* struct ipa3_rmnet_context - IPA rmnet context
@@ -173,6 +194,9 @@ int ipa3_qmi_filter_request_send(
int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
/* sending filter-installed-notify-request to modem*/
int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
*req);
@@ -219,6 +243,16 @@ int rmnet_ipa3_query_tethering_stats_all(
struct wan_ioctl_query_tether_stats_all *data);
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data);
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -232,6 +266,14 @@ int ipa3_qmi_stop_data_qouta(void);
void ipa3_q6_handshake_complete(bool ssr_bootup);
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp);
+
void ipa3_qmi_init(void);
void ipa3_qmi_cleanup(void);
@@ -252,6 +294,12 @@ static inline int ipa3_qmi_filter_request_send(
return -EPERM;
}
+static inline int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
static inline int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
{
@@ -348,12 +396,28 @@ static inline int ipa3_qmi_stop_data_qouta(void)
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+static inline int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
static inline void ipa3_qmi_init(void)
{
+
}
static inline void ipa3_qmi_cleanup(void)
{
+
}
#endif /* CONFIG_RMNET_IPA3 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index ff57e3bd48f0..b9af782b4f6e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -918,7 +918,8 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
const struct ipa_rt_rule *rule,
struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
- struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+ u16 rule_id)
{
int id;
@@ -933,11 +934,16 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
(*(entry))->proc_ctx = proc_ctx;
- id = ipa3_alloc_rule_id(&tbl->rule_ids);
- if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
- goto alloc_rule_id_fail;
+ if (rule_id) {
+ id = rule_id;
+ (*(entry))->rule_id_valid = 1;
+ } else {
+ id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ if (id < 0) {
+ IPAERR("failed to allocate rule id\n");
+ WARN_ON(1);
+ goto alloc_rule_id_fail;
+ }
}
(*(entry))->rule_id = id;
@@ -984,7 +990,8 @@ ipa_insert_failed:
}
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
- const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+ u16 rule_id)
{
struct ipa3_rt_tbl *tbl;
struct ipa3_rt_entry *entry;
@@ -1012,7 +1019,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+ rule_id))
goto error;
if (at_rear)
@@ -1043,7 +1051,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
goto error;
list_add(&entry->link, &((*add_after_entry)->link));
@@ -1087,8 +1095,54 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].rt_rule_hdl)) {
- IPAERR_RL("failed to add rt rule %d\n", i);
+ &rules->rules[i].rt_rule_hdl,
+ 0)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl,
+ rules->rules[i].rule_id)) {
+ IPAERR("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1152,6 +1206,13 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u rule %d in rt tbls\n",
+ entry->cookie, rules->add_after_hdl);
+ ret = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
@@ -1229,7 +1290,9 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
entry->tbl->idx, entry->tbl->rule_cnt,
entry->rule_id, entry->tbl->ref_cnt);
- idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ /* if rule id was allocated from idr, remove it */
+ if (!entry->rule_id_valid)
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
IPAERR_RL("fail to del RT tbl\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 8fbde6675070..01ef670dba51 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -142,6 +142,10 @@ struct rmnet_ipa3_context {
u32 ipa3_to_apps_hdl;
struct mutex pipe_handle_guard;
struct mutex add_mux_channel_lock;
+ struct mutex per_client_stats_guard;
+ struct ipa_tether_device_info
+ tether_device
+ [IPACM_MAX_CLIENT_DEVICE_TYPES];
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -423,6 +427,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
if (rule_req->filter_spec_ex_list_valid == true) {
rmnet_ipa3_ctx->num_q6_rules =
rule_req->filter_spec_ex_list_len;
@@ -431,6 +437,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
} else {
rmnet_ipa3_ctx->num_q6_rules = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
}
@@ -633,9 +641,13 @@ failure:
rmnet_ipa3_ctx->num_q6_rules = 0;
memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return 0;
}
@@ -1437,8 +1449,13 @@ static int handle3_egress_format(struct net_device *dev,
if (rmnet_ipa3_ctx->num_q6_rules != 0) {
/* already got Q6 UL filter rules*/
- if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+ /* prevent multi-threads accessing num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
+ }
if (rc)
IPAWANERR("install UL rules failed\n");
else
@@ -1660,6 +1677,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
@@ -2571,7 +2590,9 @@ static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
}
if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
- type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+ type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS &&
+ type != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+ type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
IPAWANERR("Wrong type given. buff %p type %d\n",
buff, type);
}
@@ -2819,6 +2840,9 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->interface_name);
@@ -3111,6 +3135,10 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+ data->tetherIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3145,6 +3173,10 @@ int rmnet_ipa3_query_tethering_stats_all(
int rc = 0;
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3188,6 +3220,9 @@ int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3317,8 +3352,488 @@ void ipa3_q6_handshake_complete(bool ssr_bootup)
}
}
+static inline bool rmnet_ipa3_check_any_client_inited
+(
+ enum ipacm_per_client_device_type device_type
+)
+{
+ int i = 0;
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx != -1 &&
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].inited) {
+ IPAWANERR("Found client index: %d which is inited\n",
+ i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline int rmnet_ipa3_get_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ uint8_t mac[]
+)
+{
+ int i = 0;
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (memcmp(
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].mac,
+ mac,
+ IPA_MAC_ADDR_SIZE) == 0) {
+ IPAWANDBG("Matched client index: %d\n", i);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int rmnet_ipa3_delete_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ int lan_clnt_idx
+)
+{
+ struct ipa_lan_client *lan_client = NULL;
+ int i;
+
+ /* Check if the request is to clean up all clients. */
+ if (lan_clnt_idx == 0xffffffff) {
+ /* Reset the complete device info. */
+ memset(&rmnet_ipa3_ctx->tether_device[device_type], 0,
+ sizeof(struct ipa_tether_device_info));
+ rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1;
+ for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++)
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx = -1;
+ } else {
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[lan_clnt_idx];
+ /* Reset the client info before sending the message. */
+ memset(lan_client, 0, sizeof(struct ipa_lan_client));
+ lan_client->client_idx = -1;
+
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_set_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_LAN_CLIENT_INFO.
+ * It is used to store LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_set_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ if (data->client_init) {
+ /* check if the client is already inited. */
+ if (rmnet_ipa3_ctx->tether_device[data->device_type]
+ .lan_client[data->client_idx].inited) {
+ IPAWANERR("Client already inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE);
+
+ lan_client->client_idx = data->client_idx;
+
+ /* Update the Source pipe. */
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe =
+ ipa3_get_ep_mapping(data->ul_src_pipe);
+
+ /* Update the header length if not set. */
+ if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len)
+ rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len =
+ data->hdr_len;
+
+ lan_client->inited = true;
+
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++;
+
+ IPAWANDBG("Set the lan client info: %d, %d, %d\n",
+ lan_client->client_idx,
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe,
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+/* rmnet_ipa3_delete_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO.
+ * It is used to delete LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_clear_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ if (!data->client_init) {
+ /* check if the client is already de-inited. */
+ if (!lan_client->inited) {
+ IPAWANERR("Client already de-inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client->inited = false;
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+
+/* rmnet_ipa3_send_lan_client_msg() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG.
+ * It is used to send LAN client information to IPACM.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_send_lan_client_msg(
+ struct wan_ioctl_send_lan_client_msg *data)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+ struct ipa_lan_client_msg *lan_client;
+
+ /* Notify IPACM to reset the client index. */
+ lan_client = kzalloc(sizeof(struct ipa_lan_client_msg),
+ GFP_KERNEL);
+ if (!lan_client) {
+ IPAWANERR("Can't allocate memory for tether_info\n");
+ return -ENOMEM;
+ }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ memcpy(lan_client, &data->lan_client,
+ sizeof(struct ipa_lan_client_msg));
+ msg_meta.msg_type = data->client_event;
+ msg_meta.msg_len = sizeof(struct ipa_lan_client_msg);
+
+ rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(lan_client);
+ return rc;
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_enable_per_client_stats() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS.
+ * It is used to indicate Q6 to start capturing per client stats.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_enable_per_client_stats(
+ bool *data)
+{
+ struct ipa_enable_per_client_stats_req_msg_v01 *req;
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp;
+ int rc;
+
+ req =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ resp =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01));
+ memset(resp, 0,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01));
+
+ if (*data)
+ req->enable_per_client_stats = 1;
+ else
+ req->enable_per_client_stats = 0;
+
+ rc = ipa3_qmi_enable_per_client_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't enable per client stats\n");
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data)
+{
+ struct ipa_get_stats_per_client_req_msg_v01 *req;
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp;
+ int rc, lan_clnt_idx, lan_clnt_idx1, i;
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->client_info[0].mac[0],
+ data->client_info[0].mac[1],
+ data->client_info[0].mac[2],
+ data->client_info[0].mac[3],
+ data->client_info[0].mac[4],
+ data->client_info[0].mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if num_clients is valid. */
+ if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS &&
+ data->num_clients != 1) {
+ IPAWANERR("Invalid number of clients: %d\n", data->num_clients);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ if (data->num_clients == 1) {
+ /* Check if the client info is valid.*/
+ lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
+ data->device_type,
+ data->client_info[0].mac);
+ if (lan_clnt_idx1 < 0) {
+ IPAWANERR("Client info not available return.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[lan_clnt_idx1];
+ /*
+ * Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ lan_client->inited) {
+ IPAWANERR("Client not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+
+ } else {
+ /* Max number of clients. */
+ /* Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ rmnet_ipa3_check_any_client_inited(data->device_type)) {
+ IPAWANERR("CLient not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+ lan_clnt_idx1 = 0xffffffff;
+ }
+
+ req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -ENOMEM;
+ }
+ resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
+
+ if (data->reset_stats) {
+ req->reset_stats_valid = true;
+ req->reset_stats = true;
+ IPAWANDBG("fetch and reset the client stats\n");
+ }
+
+ req->client_id = lan_clnt_idx1;
+ req->src_pipe_id =
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe;
+
+ IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id,
+ req->src_pipe_id);
+
+ rc = ipa3_qmi_get_per_client_packet_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't get per client stats\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ if (resp->per_client_stats_list_valid) {
+ for (i = 0; i < resp->per_client_stats_list_len
+ && i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv4_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv4_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv4_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv4_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv4_bytes;
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv6_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv6_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv6_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv6_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv6_bytes;
+
+ IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->client_info[i].ipv4_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv4_rx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_rx_bytes);
+
+ /* Get the lan client index. */
+ lan_clnt_idx = resp->per_client_stats_list[i].client_id;
+ /* Check if lan_clnt_idx is valid. */
+ if (lan_clnt_idx < 0 ||
+ lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) {
+ IPAWANERR("Lan client index not valid.\n");
+ mutex_unlock(
+ &rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ ipa_assert();
+ return -EINVAL;
+ }
+ memcpy(data->client_info[i].mac,
+ rmnet_ipa3_ctx->
+ tether_device[data->device_type].
+ lan_client[lan_clnt_idx].mac,
+ IPA_MAC_ADDR_SIZE);
+ }
+ }
+
+ if (data->disconnect_clnt) {
+ rmnet_ipa3_delete_lan_client_info(data->device_type,
+ lan_clnt_idx1);
+ }
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
static int __init ipa3_wwan_init(void)
{
+ int i, j;
rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
if (!rmnet_ipa3_ctx) {
IPAWANERR("no memory\n");
@@ -3330,6 +3845,14 @@ static int __init ipa3_wwan_init(void)
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard);
+ /* Reset the Lan Stats. */
+ for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) {
+ rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1;
+ for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++)
+ rmnet_ipa3_ctx->tether_device[i].
+ lan_client[j].client_idx = -1;
+ }
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
@@ -3352,6 +3875,7 @@ static void __exit ipa3_wwan_cleanup(void)
ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 51bbec464e4d..dc1e5ce511a6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -50,6 +50,15 @@
#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
compat_uptr_t)
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ compat_uptr_t)
#endif
static unsigned int dev_num = 1;
@@ -125,6 +134,34 @@ static long ipa3_wan_ioctl(struct file *filp,
}
break;
+ case WAN_IOC_ADD_UL_FLT_RULE:
+ IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz =
+ sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_qmi_ul_filter_request_send(
+ (struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+ param)) {
+ IPAWANDBG("IPACM->Q6 add ul filter rule failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case WAN_IOC_ADD_FLT_RULE_INDEX:
IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
DRIVER_NAME);
@@ -316,6 +353,122 @@ static long ipa3_wan_ioctl(struct file *filp,
}
break;
+ case WAN_IOC_ENABLE_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(bool);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa3_enable_per_client_stats(
+ (bool *)param)) {
+ IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ break;
+
+ case WAN_IOC_QUERY_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ retval = rmnet_ipa3_query_per_client_stats(
+ (struct wan_ioctl_query_per_client_stats *)param);
+ if (retval) {
+ IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n");
+ break;
+ }
+
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_set_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_CLEAR_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_clear_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+
+ case WAN_IOC_SEND_LAN_CLIENT_MSG:
+ IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_send_lan_client_msg(
+ (struct wan_ioctl_send_lan_client_msg *)
+ param)) {
+ IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+
default:
retval = -ENOTTY;
}
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index ea2a91bd2d06..8391dce4b5f0 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -18,26 +18,25 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+static const char * const mhi_states_transition_str[STATE_TRANSITION_MAX] = {
+ [STATE_TRANSITION_RESET] = "RESET",
+ [STATE_TRANSITION_READY] = "READY",
+ [STATE_TRANSITION_M0] = "M0",
+ [STATE_TRANSITION_M1] = "M1",
+ [STATE_TRANSITION_M2] = "M2",
+ [STATE_TRANSITION_M3] = "M3",
+ [STATE_TRANSITION_BHI] = "BHI",
+ [STATE_TRANSITION_SBL] = "SBL",
+ [STATE_TRANSITION_AMSS] = "AMSS",
+ [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
+ [STATE_TRANSITION_WAKE] = "WAKE",
+ [STATE_TRANSITION_BHIE] = "BHIE",
+ [STATE_TRANSITION_RDDM] = "RDDM",
+ [STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
+};
+
const char *state_transition_str(enum STATE_TRANSITION state)
{
- static const char * const
- mhi_states_transition_str[STATE_TRANSITION_MAX] = {
- [STATE_TRANSITION_RESET] = "RESET",
- [STATE_TRANSITION_READY] = "READY",
- [STATE_TRANSITION_M0] = "M0",
- [STATE_TRANSITION_M1] = "M1",
- [STATE_TRANSITION_M2] = "M2",
- [STATE_TRANSITION_M3] = "M3",
- [STATE_TRANSITION_BHI] = "BHI",
- [STATE_TRANSITION_SBL] = "SBL",
- [STATE_TRANSITION_AMSS] = "AMSS",
- [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
- [STATE_TRANSITION_WAKE] = "WAKE",
- [STATE_TRANSITION_BHIE] = "BHIE",
- [STATE_TRANSITION_RDDM] = "RDDM",
- [STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
- };
-
return (state < STATE_TRANSITION_MAX) ?
mhi_states_transition_str[state] : "Invalid";
}
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 47b201738672..b919c688e627 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -1,5 +1,23 @@
menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+config QPNP_SMBCHARGER
+ tristate "QPNP SMB Charger driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the dual path switch mode battery charger which
+ supports USB detection and battery charging up to 3A.
+ The driver also offers relevant information to userspace via the
+ power supply framework.
+
+config QPNP_FG
+ tristate "QPNP fuel gauge driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the Fuel Gauge driver. This adds support for
+ battery fuel gauging and state of charge of battery connected to the
+ fuel gauge. The state of charge is reported through a BMS power
+ supply property and also sends uevents when the capacity is updated.
+
config QPNP_FG_GEN3
tristate "QPNP GEN3 fuel gauge driver"
depends on MFD_SPMI_PMIC
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 87ab2b24175f..92310ef5c803 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -1,3 +1,5 @@
+obj-$(CONFIG_QPNP_SMBCHARGER) += qpnp-smbcharger.o batterydata-lib.o pmic-voter.o
+obj-$(CONFIG_QPNP_FG) += qpnp-fg.o
obj-$(CONFIG_QPNP_FG_GEN3) += qpnp-fg-gen3.o fg-memif.o fg-util.o
obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o pmic-voter.o
obj-$(CONFIG_SMB1351_USB_CHARGER) += battery.o smb1351-charger.o pmic-voter.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 5e8cc84fbfbf..cb26658e564e 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -291,7 +291,7 @@ static struct class_attribute pl_attributes[] = {
* TAPER *
************/
#define MINIMUM_PARALLEL_FCC_UA 500000
-#define PL_TAPER_WORK_DELAY_MS 100
+#define PL_TAPER_WORK_DELAY_MS 500
#define TAPER_RESIDUAL_PCT 75
static void pl_taper_work(struct work_struct *work)
{
@@ -349,7 +349,7 @@ done:
* FCC *
**********/
#define EFFICIENCY_PCT 80
-static void split_fcc(struct pl_data *chip, int total_ua,
+static void get_fcc_split(struct pl_data *chip, int total_ua,
int *master_ua, int *slave_ua)
{
int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
@@ -388,7 +388,6 @@ static void split_fcc(struct pl_data *chip, int total_ua,
effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
slave_limited_ua = min(effective_total_ua, bcl_ua);
*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
- *slave_ua = (*slave_ua * chip->taper_pct) / 100;
/*
* In USBIN_USBIN configuration with internal rsense parallel
* charger's current goes through main charger's BATFET, keep
@@ -398,6 +397,8 @@ static void split_fcc(struct pl_data *chip, int total_ua,
*master_ua = max(0, total_ua);
else
*master_ua = max(0, total_ua - *slave_ua);
+
+ *slave_ua = (*slave_ua * chip->taper_pct) / 100;
}
static int pl_fcc_vote_callback(struct votable *votable, void *data,
@@ -425,7 +426,8 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
}
if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
- split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ get_fcc_split(chip, total_fcc_ua,
+ &master_fcc_ua, &slave_fcc_ua);
pval.intval = slave_fcc_ua;
rc = power_supply_set_property(chip->pl_psy,
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 232dd4ec8606..b75d7db57c3e 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -403,6 +403,7 @@ struct fg_chip {
struct mutex bus_lock;
struct mutex sram_rw_lock;
struct mutex charge_full_lock;
+ struct mutex qnovo_esr_ctrl_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
@@ -411,6 +412,7 @@ struct fg_chip {
int batt_id_ohms;
int ki_coeff_full_soc;
int charge_status;
+ int prev_charge_status;
int charge_done;
int charge_type;
int online_status;
@@ -435,6 +437,7 @@ struct fg_chip {
bool esr_flt_cold_temp_en;
bool slope_limit_en;
bool use_ima_single_mode;
+ bool qnovo_enable;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 0cb1dea7113b..23dd9131d402 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -264,8 +264,7 @@ static inline bool fg_sram_address_valid(u16 address, int len)
int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
u8 *val, int len, int flags)
{
- int rc = 0;
- bool tried_again = false;
+ int rc = 0, tries = 0;
bool atomic_access = false;
if (!chip)
@@ -292,7 +291,7 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
} else {
flags = FG_IMA_DEFAULT;
}
-wait:
+
/*
* Atomic access mean waiting upon SOC_UPDATE interrupt from
* FG_ALG and do the transaction after that. This is to make
@@ -301,16 +300,20 @@ wait:
* FG cycle (~1.47 seconds).
*/
if (atomic_access) {
- /* Wait for SOC_UPDATE completion */
- rc = wait_for_completion_interruptible_timeout(
- &chip->soc_update,
- msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
-
- /* If we were interrupted wait again one more time. */
- if (rc == -ERESTARTSYS && !tried_again) {
- tried_again = true;
- goto wait;
- } else if (rc <= 0) {
+ for (tries = 0; tries < 2; tries++) {
+ /* Wait for SOC_UPDATE completion */
+ rc = wait_for_completion_interruptible_timeout(
+ &chip->soc_update,
+ msecs_to_jiffies(SOC_UPDATE_WAIT_MS));
+ if (rc > 0) {
+ rc = 0;
+ break;
+ } else if (!rc) {
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ if (rc < 0) {
pr_err("wait for soc_update timed out rc=%d\n", rc);
goto out;
}
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 626926e8b97d..256d9ed8ada5 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -562,6 +562,21 @@ static int fg_get_charge_raw(struct fg_chip *chip, int *val)
return 0;
}
+#define BATT_SOC_32BIT GENMASK(31, 0)
+static int fg_get_charge_counter_shadow(struct fg_chip *chip, int *val)
+{
+ int rc, batt_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_u64((u32)batt_soc * chip->cl.learned_cc_uah, BATT_SOC_32BIT);
+ return 0;
+}
+
static int fg_get_charge_counter(struct fg_chip *chip, int *val)
{
int rc, cc_soc;
@@ -1148,7 +1163,7 @@ static int fg_batt_miss_irq_en_cb(struct votable *votable, void *data,
enable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
} else {
disable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq);
- disable_irq(chip->irqs[BATT_MISSING_IRQ].irq);
+ disable_irq_nosync(chip->irqs[BATT_MISSING_IRQ].irq);
}
return 0;
@@ -1167,7 +1182,7 @@ static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
} else {
disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
- disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq_nosync(chip->irqs[BSOC_DELTA_IRQ].irq);
}
return 0;
@@ -1249,6 +1264,21 @@ static bool is_parallel_charger_available(struct fg_chip *chip)
return true;
}
+static int fg_prime_cc_soc_sw(struct fg_chip *chip, int cc_soc_sw)
+{
+ int rc;
+
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0)
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ else
+ fg_dbg(chip, FG_STATUS, "cc_soc_sw: %x\n", cc_soc_sw);
+
+ return rc;
+}
+
static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
{
int16_t cc_mah;
@@ -1434,7 +1464,6 @@ static int fg_cap_learning_process_full_data(struct fg_chip *chip)
return 0;
}
-#define BATT_SOC_32BIT GENMASK(31, 0)
static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
{
int rc, cc_soc_sw, batt_soc_msb;
@@ -1453,16 +1482,13 @@ static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
/* Prime cc_soc_sw with battery SOC when capacity learning begins */
cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
BATT_SOC_32BIT);
- rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
- chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
- chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
if (rc < 0) {
pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
goto out;
}
chip->cl.init_cc_soc_sw = cc_soc_sw;
- chip->cl.active = true;
fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
batt_soc_msb, chip->cl.init_cc_soc_sw);
out:
@@ -1482,9 +1508,7 @@ static int fg_cap_learning_done(struct fg_chip *chip)
/* Write a FULL value to cc_soc_sw */
cc_soc_sw = CC_SOC_30BIT;
- rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
- chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
- chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
if (rc < 0) {
pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
goto out;
@@ -1497,8 +1521,9 @@ out:
static void fg_cap_learning_update(struct fg_chip *chip)
{
- int rc, batt_soc, batt_soc_msb;
+ int rc, batt_soc, batt_soc_msb, cc_soc_sw;
bool input_present = is_input_present(chip);
+ bool prime_cc = false;
mutex_lock(&chip->cl.lock);
@@ -1511,6 +1536,9 @@ static void fg_cap_learning_update(struct fg_chip *chip)
goto out;
}
+ if (chip->charge_status == chip->prev_charge_status)
+ goto out;
+
rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
if (rc < 0) {
pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
@@ -1526,8 +1554,12 @@ static void fg_cap_learning_update(struct fg_chip *chip)
if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
rc = fg_cap_learning_begin(chip, batt_soc);
chip->cl.active = (rc == 0);
+ } else {
+ if ((chip->charge_status ==
+ POWER_SUPPLY_STATUS_DISCHARGING) ||
+ chip->charge_done)
+ prime_cc = true;
}
-
} else {
if (chip->charge_done) {
rc = fg_cap_learning_done(chip);
@@ -1545,6 +1577,7 @@ static void fg_cap_learning_update(struct fg_chip *chip)
batt_soc_msb);
chip->cl.active = false;
chip->cl.init_cc_uah = 0;
+ prime_cc = true;
}
}
@@ -1561,10 +1594,29 @@ static void fg_cap_learning_update(struct fg_chip *chip)
batt_soc_msb);
chip->cl.active = false;
chip->cl.init_cc_uah = 0;
+ prime_cc = true;
}
}
}
+ /*
+ * Prime CC_SOC_SW when the device is not charging or during charge
+ * termination when the capacity learning is not active.
+ */
+
+ if (prime_cc) {
+ if (chip->charge_done)
+ cc_soc_sw = CC_SOC_30BIT;
+ else
+ cc_soc_sw = div_u64((u32)batt_soc *
+ CC_SOC_30BIT, BATT_SOC_32BIT);
+
+ rc = fg_prime_cc_soc_sw(chip, cc_soc_sw);
+ if (rc < 0)
+ pr_err("Error in writing cc_soc_sw, rc=%d\n",
+ rc);
+ }
+
out:
mutex_unlock(&chip->cl.lock);
}
@@ -1779,7 +1831,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if (msoc_raw <= recharge_soc && chip->charge_full) {
+ } else if ((msoc_raw <= recharge_soc || !chip->charge_done)
+ && chip->charge_full) {
if (chip->dt.linearize_soc) {
chip->delta_soc = FULL_CAPACITY - msoc;
@@ -2563,7 +2616,7 @@ static void status_change_work(struct work_struct *work)
}
fg_ttf_update(chip);
-
+ chip->prev_charge_status = chip->charge_status;
out:
fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
chip->charge_status, chip->charge_type, chip->charge_done);
@@ -3298,20 +3351,21 @@ static int fg_force_esr_meas(struct fg_chip *chip)
int rc;
int esr_uohms;
+ mutex_lock(&chip->qnovo_esr_ctrl_lock);
/* force esr extraction enable */
rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("failed to enable esr extn rc=%d\n", rc);
- return rc;
+ goto out;
}
rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
LD_REG_CTRL_BIT, 0);
if (rc < 0) {
pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
- return rc;
+ goto out;
}
rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
@@ -3319,24 +3373,36 @@ static int fg_force_esr_meas(struct fg_chip *chip)
ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
if (rc < 0) {
pr_err("Error in configuring force ESR rc=%d\n", rc);
- return rc;
+ goto out;
}
+ /*
+ * Release and grab the lock again after 1.5 seconds so that prepare
+ * callback can succeed if the request comes in between.
+ */
+ mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+
/* wait 1.5 seconds for hw to measure ESR */
msleep(1500);
+
+ mutex_lock(&chip->qnovo_esr_ctrl_lock);
rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
0);
if (rc < 0) {
pr_err("Error in restoring force ESR rc=%d\n", rc);
- return rc;
+ goto out;
}
+ /* If qnovo is disabled, then leave ESR extraction enabled */
+ if (!chip->qnovo_enable)
+ goto done;
+
rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
if (rc < 0) {
pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
- return rc;
+ goto out;
}
/* force esr extraction disable */
@@ -3345,36 +3411,46 @@ static int fg_force_esr_meas(struct fg_chip *chip)
FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("failed to disable esr extn rc=%d\n", rc);
- return rc;
+ goto out;
}
+done:
fg_get_battery_resistance(chip, &esr_uohms);
fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
-
+out:
+ mutex_unlock(&chip->qnovo_esr_ctrl_lock);
return rc;
}
static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
{
- int rc;
+ int rc = 0;
+ mutex_lock(&chip->qnovo_esr_ctrl_lock);
/* force esr extraction disable when qnovo enables */
rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
ESR_EXTRACTION_ENABLE_OFFSET,
BIT(0), qnovo_enable ? 0 : BIT(0),
FG_IMA_DEFAULT);
- if (rc < 0)
+ if (rc < 0) {
pr_err("Error in configuring esr extraction rc=%d\n", rc);
+ goto out;
+ }
rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
LD_REG_CTRL_BIT,
qnovo_enable ? LD_REG_CTRL_BIT : 0);
if (rc < 0) {
pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
- return rc;
+ goto out;
}
- fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
- return 0;
+
+ fg_dbg(chip, FG_STATUS, "%s for Qnovo\n",
+ qnovo_enable ? "Prepared" : "Unprepared");
+ chip->qnovo_enable = qnovo_enable;
+out:
+ mutex_unlock(&chip->qnovo_esr_ctrl_lock);
+ return rc;
}
static void ttf_work(struct work_struct *work)
@@ -3529,6 +3605,9 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = fg_get_charge_counter(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+ rc = fg_get_charge_counter_shadow(chip, &pval->intval);
+ break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
rc = fg_get_time_to_full(chip, &pval->intval);
break;
@@ -3704,6 +3783,7 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "usb") == 0)) {
/*
* We cannot vote for awake votable here as that takes
@@ -3738,6 +3818,7 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
@@ -4934,6 +5015,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->debug_mask = &fg_gen3_debug_mask;
chip->irqs = fg_irqs;
chip->charge_status = -EINVAL;
+ chip->prev_charge_status = -EINVAL;
chip->ki_coeff_full_soc = -EINVAL;
chip->online_status = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
@@ -5006,6 +5088,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->cl.lock);
mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
+ mutex_init(&chip->qnovo_esr_ctrl_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
new file mode 100644
index 000000000000..cfd2f64a9bb8
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -0,0 +1,7051 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "FG: %s: " fmt, __func__
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/power_supply.h>
+#include <linux/of_batterydata.h>
+#include <linux/string_helpers.h>
+#include <linux/alarmtimer.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* Register offsets */
+
+/* Interrupt offsets */
+#define INT_RT_STS(base) (base + 0x10)
+#define INT_EN_CLR(base) (base + 0x16)
+
+/* SPMI Register offsets */
+#define SOC_MONOTONIC_SOC 0x09
+#define SOC_BOOT_MOD 0x50
+#define SOC_RESTART 0x51
+
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RAM register offsets */
+#define RAM_OFFSET 0x400
+
+/* Bit/Mask definitions */
+#define FULL_PERCENT 0xFF
+#define MAX_TRIES_SOC 5
+#define MA_MV_BIT_RES 39
+#define MSB_SIGN BIT(7)
+#define IBAT_VBAT_MASK 0x7F
+#define NO_OTP_PROF_RELOAD BIT(6)
+#define REDO_FIRST_ESTIMATE BIT(3)
+#define RESTART_GO BIT(0)
+#define THERM_DELAY_MASK 0xE0
+
+/* SUBTYPE definitions */
+#define FG_SOC 0x9
+#define FG_BATT 0xA
+#define FG_ADC 0xB
+#define FG_MEMIF 0xC
+
+#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
+#define MEM_IF_TIMEOUT_MS 5000
+#define BUCKET_COUNT 8
+#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+
+#define BCL_MA_TO_ADC(_current, _adc_val) { \
+ _adc_val = (u8)((_current) * 100 / 976); \
+}
+
+/* Debug Flag Definitions */
+enum {
+ FG_SPMI_DEBUG_WRITES = BIT(0), /* Show SPMI writes */
+ FG_SPMI_DEBUG_READS = BIT(1), /* Show SPMI reads */
+ FG_IRQS = BIT(2), /* Show interrupts */
+ FG_MEM_DEBUG_WRITES = BIT(3), /* Show SRAM writes */
+ FG_MEM_DEBUG_READS = BIT(4), /* Show SRAM reads */
+ FG_POWER_SUPPLY = BIT(5), /* Show POWER_SUPPLY */
+ FG_STATUS = BIT(6), /* Show FG status changes */
+ FG_AGING = BIT(7), /* Show FG aging algorithm */
+};
+
+/* PMIC REVISIONS */
+#define REVID_RESERVED 0
+#define REVID_VARIANT 1
+#define REVID_ANA_MAJOR 2
+#define REVID_DIG_MAJOR 3
+
+enum dig_major {
+ DIG_REV_1 = 0x1,
+ DIG_REV_2 = 0x2,
+ DIG_REV_3 = 0x3,
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum wa_flags {
+ IADC_GAIN_COMP_WA = BIT(0),
+ USE_CC_SOC_REG = BIT(1),
+ PULSE_REQUEST_WA = BIT(2),
+ BCL_HI_POWER_FOR_CHGLED_WA = BIT(3)
+};
+
+enum current_sense_type {
+ INTERNAL_CURRENT_SENSE,
+ EXTERNAL_CURRENT_SENSE,
+};
+
+struct fg_mem_setting {
+ u16 address;
+ u8 offset;
+ int value;
+};
+
+struct fg_mem_data {
+ u16 address;
+ u8 offset;
+ unsigned int len;
+ int value;
+};
+
+struct fg_learning_data {
+ int64_t cc_uah;
+ int64_t learned_cc_uah;
+ int init_cc_pc_val;
+ bool active;
+ bool feedback_on;
+ struct mutex learning_lock;
+ ktime_t time_stamp;
+ /* configuration properties */
+ int max_start_soc;
+ int max_increment;
+ int max_decrement;
+ int min_temp;
+ int max_temp;
+ int vbat_est_thr_uv;
+};
+
+struct fg_rslow_data {
+ u8 rslow_cfg;
+ u8 rslow_thr;
+ u8 rs_to_rslow[2];
+ u8 rslow_comp[4];
+ uint32_t chg_rs_to_rslow;
+ uint32_t chg_rslow_comp_c1;
+ uint32_t chg_rslow_comp_c2;
+ uint32_t chg_rslow_comp_thr;
+ bool active;
+ struct mutex lock;
+};
+
+struct fg_cyc_ctr_data {
+ bool en;
+ bool started[BUCKET_COUNT];
+ u16 count[BUCKET_COUNT];
+ u8 last_soc[BUCKET_COUNT];
+ int id;
+ struct mutex lock;
+};
+
+struct fg_iadc_comp_data {
+ u8 dfl_gain_reg[2];
+ bool gain_active;
+ int64_t dfl_gain;
+};
+
+struct fg_cc_soc_data {
+ int init_sys_soc;
+ int init_cc_soc;
+ int full_capacity;
+ int delta_soc;
+};
+
+/* FG_MEMIF setting index */
+enum fg_mem_setting_index {
+ FG_MEM_SOFT_COLD = 0,
+ FG_MEM_SOFT_HOT,
+ FG_MEM_HARD_COLD,
+ FG_MEM_HARD_HOT,
+ FG_MEM_RESUME_SOC,
+ FG_MEM_BCL_LM_THRESHOLD,
+ FG_MEM_BCL_MH_THRESHOLD,
+ FG_MEM_TERM_CURRENT,
+ FG_MEM_CHG_TERM_CURRENT,
+ FG_MEM_IRQ_VOLT_EMPTY,
+ FG_MEM_CUTOFF_VOLTAGE,
+ FG_MEM_VBAT_EST_DIFF,
+ FG_MEM_DELTA_SOC,
+ FG_MEM_BATT_LOW,
+ FG_MEM_THERM_DELAY,
+ FG_MEM_SETTING_MAX,
+};
+
+/* FG_MEMIF data index */
+enum fg_mem_data_index {
+ FG_DATA_BATT_TEMP = 0,
+ FG_DATA_OCV,
+ FG_DATA_VOLTAGE,
+ FG_DATA_CURRENT,
+ FG_DATA_BATT_ESR,
+ FG_DATA_BATT_ESR_COUNT,
+ FG_DATA_BATT_SOC,
+ FG_DATA_CC_CHARGE,
+ FG_DATA_VINT_ERR,
+ FG_DATA_CPRED_VOLTAGE,
+ /* values below this only gets read once per profile reload */
+ FG_DATA_BATT_ID,
+ FG_DATA_BATT_ID_INFO,
+ FG_DATA_MAX,
+};
+
+#define SETTING(_idx, _address, _offset, _value) \
+ [FG_MEM_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = {
+ /* ID Address, Offset, Value*/
+ SETTING(SOFT_COLD, 0x454, 0, 100),
+ SETTING(SOFT_HOT, 0x454, 1, 400),
+ SETTING(HARD_COLD, 0x454, 2, 50),
+ SETTING(HARD_HOT, 0x454, 3, 450),
+ SETTING(RESUME_SOC, 0x45C, 1, 0),
+ SETTING(BCL_LM_THRESHOLD, 0x47C, 2, 50),
+ SETTING(BCL_MH_THRESHOLD, 0x47C, 3, 752),
+ SETTING(TERM_CURRENT, 0x40C, 2, 250),
+ SETTING(CHG_TERM_CURRENT, 0x4F8, 2, 250),
+ SETTING(IRQ_VOLT_EMPTY, 0x458, 3, 3100),
+ SETTING(CUTOFF_VOLTAGE, 0x40C, 0, 3200),
+ SETTING(VBAT_EST_DIFF, 0x000, 0, 30),
+ SETTING(DELTA_SOC, 0x450, 3, 1),
+ SETTING(BATT_LOW, 0x458, 0, 4200),
+ SETTING(THERM_DELAY, 0x4AC, 3, 0),
+};
+
+#define DATA(_idx, _address, _offset, _length, _value) \
+ [FG_DATA_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_data[FG_DATA_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ DATA(BATT_TEMP, 0x550, 2, 2, -EINVAL),
+ DATA(OCV, 0x588, 3, 2, -EINVAL),
+ DATA(VOLTAGE, 0x5CC, 1, 2, -EINVAL),
+ DATA(CURRENT, 0x5CC, 3, 2, -EINVAL),
+ DATA(BATT_ESR, 0x554, 2, 2, -EINVAL),
+ DATA(BATT_ESR_COUNT, 0x558, 2, 2, -EINVAL),
+ DATA(BATT_SOC, 0x56C, 1, 3, -EINVAL),
+ DATA(CC_CHARGE, 0x570, 0, 4, -EINVAL),
+ DATA(VINT_ERR, 0x560, 0, 4, -EINVAL),
+ DATA(CPRED_VOLTAGE, 0x540, 0, 2, -EINVAL),
+ DATA(BATT_ID, 0x594, 1, 1, -EINVAL),
+ DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
+};
+
+static int fg_debug_mask;
+module_param_named(
+ debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int fg_sense_type = -EINVAL;
+static int fg_restart;
+
+static int fg_est_dump;
+module_param_named(
+ first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR
+);
+
+static char *fg_batt_type;
+module_param_named(
+ battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR
+);
+
+static int fg_sram_update_period_ms = 30000;
+module_param_named(
+ sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
+);
+
+struct fg_irq {
+ int irq;
+ unsigned long disabled;
+};
+
+enum fg_soc_irq {
+ HIGH_SOC,
+ LOW_SOC,
+ FULL_SOC,
+ EMPTY_SOC,
+ DELTA_SOC,
+ FIRST_EST_DONE,
+ SW_FALLBK_OCV,
+ SW_FALLBK_NEW_BATT,
+ FG_SOC_IRQ_COUNT,
+};
+
+enum fg_batt_irq {
+ JEITA_SOFT_COLD,
+ JEITA_SOFT_HOT,
+ VBATT_LOW,
+ BATT_IDENTIFIED,
+ BATT_ID_REQ,
+ BATTERY_UNKNOWN,
+ BATT_MISSING,
+ BATT_MATCH,
+ FG_BATT_IRQ_COUNT,
+};
+
+enum fg_mem_if_irq {
+ FG_MEM_AVAIL,
+ TA_RCVRY_SUG,
+ FG_MEM_IF_IRQ_COUNT,
+};
+
+enum fg_batt_aging_mode {
+ FG_AGING_NONE,
+ FG_AGING_ESR,
+ FG_AGING_CC,
+};
+
+enum register_type {
+ MEM_INTF_CFG,
+ MEM_INTF_CTL,
+ MEM_INTF_ADDR_LSB,
+ MEM_INTF_RD_DATA0,
+ MEM_INTF_WR_DATA0,
+ MAX_ADDRESS,
+};
+
+struct register_offset {
+ u16 address[MAX_ADDRESS];
+};
+
+static struct register_offset offset[] = {
+ [0] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x40, 0x41, 0x42, 0x4C, 0x48},
+ },
+ [1] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x50, 0x51, 0x61, 0x67, 0x63},
+ },
+};
+
+#define MEM_INTF_CFG(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG])
+#define MEM_INTF_CTL(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL])
+#define MEM_INTF_ADDR_LSB(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB])
+#define MEM_INTF_RD_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0])
+#define MEM_INTF_WR_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0])
+
+struct fg_wakeup_source {
+ struct wakeup_source source;
+ unsigned long enabled;
+};
+
+static void fg_stay_awake(struct fg_wakeup_source *source)
+{
+ if (!__test_and_set_bit(0, &source->enabled)) {
+ __pm_stay_awake(&source->source);
+ pr_debug("enabled source %s\n", source->source.name);
+ }
+}
+
+static void fg_relax(struct fg_wakeup_source *source)
+{
+ if (__test_and_clear_bit(0, &source->enabled)) {
+ __pm_relax(&source->source);
+ pr_debug("disabled source %s\n", source->source.name);
+ }
+}
+
+#define THERMAL_COEFF_N_BYTES 6
+struct fg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ u8 pmic_subtype;
+ u8 pmic_revision[4];
+ u8 revision[4];
+ u16 soc_base;
+ u16 batt_base;
+ u16 mem_base;
+ u16 vbat_adc_addr;
+ u16 ibat_adc_addr;
+ u16 tp_rev_addr;
+ u32 wa_flag;
+ atomic_t memif_user_cnt;
+ struct fg_irq soc_irq[FG_SOC_IRQ_COUNT];
+ struct fg_irq batt_irq[FG_BATT_IRQ_COUNT];
+ struct fg_irq mem_irq[FG_MEM_IF_IRQ_COUNT];
+ struct completion sram_access_granted;
+ struct completion sram_access_revoked;
+ struct completion batt_id_avail;
+ struct completion first_soc_done;
+ struct power_supply *bms_psy;
+ struct power_supply_desc bms_psy_d;
+ struct mutex rw_lock;
+ struct mutex sysfs_restart_lock;
+ struct delayed_work batt_profile_init;
+ struct work_struct dump_sram;
+ struct work_struct status_change_work;
+ struct work_struct cycle_count_work;
+ struct work_struct battery_age_work;
+ struct work_struct update_esr_work;
+ struct work_struct set_resume_soc_work;
+ struct work_struct rslow_comp_work;
+ struct work_struct sysfs_restart_work;
+ struct work_struct init_work;
+ struct work_struct charge_full_work;
+ struct work_struct gain_comp_work;
+ struct work_struct bcl_hi_power_work;
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct fg_wakeup_source memif_wakeup_source;
+ struct fg_wakeup_source profile_wakeup_source;
+ struct fg_wakeup_source empty_check_wakeup_source;
+ struct fg_wakeup_source resume_soc_wakeup_source;
+ struct fg_wakeup_source gain_comp_wakeup_source;
+ struct fg_wakeup_source capacity_learning_wakeup_source;
+ bool first_profile_loaded;
+ struct fg_wakeup_source update_temp_wakeup_source;
+ struct fg_wakeup_source update_sram_wakeup_source;
+ bool fg_restarting;
+ bool profile_loaded;
+ bool use_otp_profile;
+ bool battery_missing;
+ bool power_supply_registered;
+ bool sw_rbias_ctrl;
+ bool use_thermal_coefficients;
+ bool esr_strict_filter;
+ bool soc_empty;
+ bool charge_done;
+ bool resume_soc_lowered;
+ bool vbat_low_irq_enabled;
+ bool charge_full;
+ bool hold_soc_while_full;
+ bool input_present;
+ bool otg_present;
+ bool safety_timer_expired;
+ bool bad_batt_detection_en;
+ bool bcl_lpm_disabled;
+ bool charging_disabled;
+ struct delayed_work update_jeita_setting;
+ struct delayed_work update_sram_data;
+ struct delayed_work update_temp_work;
+ struct delayed_work check_empty_work;
+ char *batt_profile;
+ u8 thermal_coefficients[THERMAL_COEFF_N_BYTES];
+ u32 cc_cv_threshold_mv;
+ unsigned int batt_profile_len;
+ unsigned int batt_max_voltage_uv;
+ const char *batt_type;
+ const char *batt_psy_name;
+ unsigned long last_sram_update_time;
+ unsigned long last_temp_update_time;
+ int64_t ocv_coeffs[12];
+ int64_t cutoff_voltage;
+ int evaluation_current;
+ int ocv_junction_p1p2;
+ int ocv_junction_p2p3;
+ int nom_cap_uah;
+ int actual_cap_uah;
+ int status;
+ int prev_status;
+ int health;
+ enum fg_batt_aging_mode batt_aging_mode;
+ /* capacity learning */
+ struct fg_learning_data learning_data;
+ struct alarm fg_cap_learning_alarm;
+ struct work_struct fg_cap_learning_work;
+ struct fg_cc_soc_data sw_cc_soc_data;
+ /* rslow compensation */
+ struct fg_rslow_data rslow_comp;
+ /* cycle counter */
+ struct fg_cyc_ctr_data cyc_ctr;
+ /* iadc compensation */
+ struct fg_iadc_comp_data iadc_comp_data;
+ /* interleaved memory access */
+ u16 *offset;
+ bool ima_supported;
+ bool init_done;
+ /* jeita hysteresis */
+ bool jeita_hysteresis_support;
+ bool batt_hot;
+ bool batt_cold;
+ int cold_hysteresis;
+ int hot_hysteresis;
+ /* ESR pulse tuning */
+ struct fg_wakeup_source esr_extract_wakeup_source;
+ struct work_struct esr_extract_config_work;
+ bool esr_extract_disabled;
+ bool imptr_pulse_slow_en;
+ bool esr_pulse_tune_en;
+};
+
+/* FG_MEMIF DEBUGFS structures */
+#define ADDR_LEN 4 /* 3 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 4 /* 4 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "fg_memif";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+static const char *default_batt_type = "Unknown Battery";
+static const char *loading_batt_type = "Loading Battery Data";
+static const char *missing_batt_type = "Disconnected Battery";
+
+/* Log buffer */
+struct fg_log_buffer {
+ size_t rpos; /* Current 'read' position in buffer */
+ size_t wpos; /* Current 'write' position in buffer */
+ size_t len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* transaction parameters */
+struct fg_trans {
+ u32 cnt; /* Number of bytes to read */
+ u16 addr; /* 12-bit address in SRAM */
+ u32 offset; /* Offset of last read data + byte offset */
+ struct fg_chip *chip;
+ struct fg_log_buffer *log; /* log buffer */
+ u8 *data; /* fg data that is read */
+ struct mutex memif_dfs_lock; /* Prevent thread concurrency */
+};
+
+struct fg_dbgfs {
+ u32 cnt;
+ u32 addr;
+ struct fg_chip *chip;
+ struct dentry *root;
+ struct mutex lock;
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct fg_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .help_msg = {
+ .data =
+"FG Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/fg_memif\n"
+" /help -- Static help text\n"
+" /address -- Starting register address for reads or writes\n"
+" /count -- Number of registers to read (only used for reads)\n"
+" /data -- Initiates the SRAM read (formatted output)\n"
+"\n",
+ },
+};
+
+static const struct of_device_id fg_match_table[] = {
+ { .compatible = QPNP_FG_DEV_NAME, },
+ {}
+};
+
+static char *fg_supplicants[] = {
+ "battery",
+ "bcl",
+ "fg_adc"
+};
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, " ");
+ }
+}
+
+static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("write(0x%04X), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr,
+ to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("read(0x%04x), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, addr, mask, val);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define RIF_MEM_ACCESS_REQ BIT(7)
+static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read rif_mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ *status = mem_if_sts & RIF_MEM_ACCESS_REQ;
+ return 0;
+}
+
+static bool fg_check_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+ bool rif_mem_sts = false;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return false;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0)
+ return false;
+
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return false;
+
+ return rif_mem_sts;
+}
+
+static inline int fg_assert_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INTF_CTL_BURST BIT(7)
+#define INTF_CTL_WR_EN BIT(6)
+static int fg_config_access(struct fg_chip *chip, bool write,
+ bool burst)
+{
+ int rc;
+ u8 intf_ctl = 0;
+
+ intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0);
+
+ rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+static int fg_req_and_wait_access(struct fg_chip *chip, int timeout)
+{
+ int rc = 0, ret = 0;
+ bool tried_again = false;
+
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+ fg_stay_awake(&chip->memif_wakeup_source);
+ }
+
+wait:
+ /* Wait for MEM_AVAIL IRQ. */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_granted,
+ msecs_to_jiffies(timeout));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int fg_release_access(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, 0, 1);
+ fg_relax(&chip->memif_wakeup_source);
+ reinit_completion(&chip->sram_access_granted);
+
+ return rc;
+}
+
+static void fg_release_access_if_necessary(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) {
+ fg_release_access(chip);
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+/*
+ * fg_mem_lock disallows the fuel gauge to release access until it has been
+ * released.
+ *
+ * an equal number of calls must be made to fg_mem_release for the fuel gauge
+ * driver to release the sram access.
+ */
+static void fg_mem_lock(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_mem_release(struct fg_chip *chip)
+{
+ fg_release_access_if_necessary(chip);
+}
+
+static int fg_set_ram_addr(struct fg_chip *chip, u16 *address)
+{
+ int rc;
+
+ rc = fg_write(chip, (u8 *) address,
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n",
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define BUF_LEN 4
+static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len,
+ int offset)
+{
+ int rc, total_len;
+ u8 *rd_data = val;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_config_access(chip, 0, (len > 4));
+ if (rc)
+ return rc;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ if (!offset) {
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip),
+ min(len, BUF_LEN));
+ } else {
+ rc = fg_read(chip, rd_data,
+ MEM_INTF_RD_DATA0(chip) + offset,
+ min(len, BUF_LEN - offset));
+
+ /* manually set address to allow continous reads */
+ address += BUF_LEN;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+ }
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ rd_data += (BUF_LEN - offset);
+ len -= (BUF_LEN - offset);
+ offset = 0;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+ return rc;
+}
+
+static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, orig_address = address;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ rc = fg_sub_mem_read(chip, val, address, len, offset);
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, sublen;
+ bool access_configured = false;
+ u8 *wr_data = val, word[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3)
+ return -EINVAL;
+
+ address = ((address + offset) / 4) * 4;
+ offset = (address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES) {
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len);
+ pr_info("writing: %s\n", str);
+ }
+
+ while (len > 0) {
+ if (offset != 0) {
+ sublen = min(4 - offset, len);
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word + offset, wr_data, sublen);
+ /* configure access as burst if more to write */
+ rc = fg_config_access(chip, 1, (len - sublen) > 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ offset = 0;
+ access_configured = true;
+ } else if (len >= 4) {
+ if (!access_configured) {
+ rc = fg_config_access(chip, 1, len > 4);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ }
+ sublen = 4;
+ memcpy(word, wr_data, 4);
+ } else if (len > 0 && len < 4) {
+ sublen = len;
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word, wr_data, sublen);
+ rc = fg_config_access(chip, 1, 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ } else {
+ pr_err("Invalid length: %d\n", len);
+ break;
+ }
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip), rc);
+ goto out;
+ }
+ len -= sublen;
+ wr_data += sublen;
+ address += 4;
+ }
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+#define MEM_INTF_IMA_CFG 0x52
+#define MEM_INTF_IMA_OPR_STS 0x54
+#define MEM_INTF_IMA_ERR_STS 0x5F
+#define MEM_INTF_IMA_EXP_STS 0x55
+#define MEM_INTF_IMA_HW_STS 0x56
+#define MEM_INTF_IMA_BYTE_EN 0x60
+#define IMA_ADDR_STBL_ERR BIT(7)
+#define IMA_WR_ACS_ERR BIT(6)
+#define IMA_RD_ACS_ERR BIT(5)
+#define IMA_IACS_CLR BIT(2)
+#define IMA_IACS_RDY BIT(1)
+static int fg_check_ima_exception(struct fg_chip *chip)
+{
+ int rc = 0, ret = 0;
+ u8 err_sts, exp_sts = 0, hw_sts = 0;
+
+ rc = fg_read(chip, &err_sts,
+ chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ return rc;
+ }
+
+ if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
+ u8 temp;
+
+ fg_read(chip, &exp_sts,
+ chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
+ fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+ rc = err_sts;
+
+ /* clear the error */
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ temp = 0x4;
+ ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ temp = 0x0;
+ ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+ int rc = 0, timeout = 250;
+ u8 ima_opr_sts = 0;
+
+ /*
+ * Additional delay to make sure IACS ready bit is set after
+ * Read/Write operation.
+ */
+
+ usleep_range(30, 35);
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
+ break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
+ }
+
+ if (!timeout || rc) {
+ pr_err("IACS_RDY not set\n");
+ /* perform IACS_CLR sequence */
+ fg_check_ima_exception(chip);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define IACS_SLCT BIT(5)
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val,
+ u16 address, int offset, int len)
+{
+ int rc = 0, i;
+ u8 *word = val, byte_enable = 0, num_bytes = 0;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ?
+ (BUF_LEN - offset) : len;
+ /* write to byte_enable */
+ for (i = offset; i < (offset + num_bytes); i++)
+ byte_enable |= BIT(i);
+
+ rc = fg_write(chip, &byte_enable,
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ if (rc) {
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* write data */
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ /*
+ * The last-byte WR_DATA3 starts the write transaction.
+ * Write a dummy value to WR_DATA3 if it does not have
+ * valid data. This dummy data is not written to the
+ * SRAM as byte_en for WR_DATA3 is not set.
+ */
+ if (!(byte_enable & BIT(3))) {
+ u8 dummy_byte = 0x0;
+ rc = fg_write(chip, &dummy_byte,
+ MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ word += num_bytes;
+ len -= num_bytes;
+ offset = byte_enable = 0;
+ }
+
+ return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int offset, int len)
+{
+ int rc = 0, total_len;
+ u8 *rd_data = val, num_bytes;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len;
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+
+ rd_data += num_bytes;
+ len -= num_bytes;
+ offset = 0;
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ if (len && (len + offset) < BUF_LEN) {
+ /* move to single mode */
+ u8 intr_ctl = 0;
+
+ rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+
+ return rc;
+}
+
+#define IMA_REQ_ACCESS (IACS_SLCT | RIF_MEM_ACCESS_REQ)
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+ u16 address, int len, int offset, int op)
+{
+ int rc = 0;
+ bool rif_mem_sts = true;
+ int time_count = 0;
+
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return rc;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+ op ? "write" : "read");
+
+ /*
+ * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not
+ * clear, then return an error instead of waiting for it again.
+ */
+ if (time_count > 4) {
+ pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+
+ /* configure for IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit rc = %d\n", rc);
+ return rc;
+ }
+
+ /* configure for the read/write single/burst mode */
+ rc = fg_config_access(chip, op, (offset + len) > 4);
+ if (rc) {
+ pr_err("failed to set configure memory access rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* write addresses to the register */
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc) {
+ pr_err("failed to set SRAM address rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc)
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+
+ return rc;
+}
+
+#define MEM_INTF_FG_BEAT_COUNT 0x57
+#define BEAT_COUNT_MASK 0x0F
+#define RETRY_COUNT 3
+static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 start_beat_count, end_beat_count, count = 0;
+ bool retry = false;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ if (address < RAM_OFFSET) {
+ /*
+ * OTP memory reads need a conventional memory access, do a
+ * conventional read when SRAM offset < RAM_OFFSET.
+ */
+ rc = fg_conventional_mem_read(chip, val, address, len, offset,
+ 0);
+ if (rc)
+ pr_err("Failed to read OTP memory %d\n", rc);
+ goto exit;
+ }
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
+ if (rc) {
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* read the start beat count */
+ rc = fg_read(chip, &start_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ /* read data */
+ rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to read SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* read the end beat count */
+ rc = fg_read(chip, &end_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ start_beat_count &= BEAT_COUNT_MASK;
+ end_beat_count &= BEAT_COUNT_MASK;
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Start beat_count = %x End beat_count = %x\n",
+ start_beat_count, end_beat_count);
+ if (start_beat_count != end_beat_count) {
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Beat count do not match - retry transaction\n");
+ retry = true;
+ }
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+exit:
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 count = 0;
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
+ if (rc) {
+ pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* write data */
+ rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to write SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ mutex_unlock(&chip->rw_lock);
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_read(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_read(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_write(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_write(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, u8 offset)
+{
+ int rc = 0;
+ u8 reg[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_mem_read(chip, reg, addr, 4, 0, 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ reg[offset] &= ~mask;
+ reg[offset] |= val & mask;
+
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4);
+ pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset);
+
+ rc = fg_mem_write(chip, reg, addr, 4, 0, 0);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int soc_to_setpoint(int soc)
+{
+ return DIV_ROUND_CLOSEST(soc * 255, 100);
+}
+
+static void batt_to_setpoint_adc(int vbatt_mv, u8 *data)
+{
+ int val;
+ /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */
+ val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
+ data[0] = val & 0xFF;
+ data[1] = val >> 8;
+ return;
+}
+
+static u8 batt_to_setpoint_8b(int vbatt_mv)
+{
+ int val;
+ /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */
+ val = (vbatt_mv - 2500) * 512 / 1000;
+ return DIV_ROUND_CLOSEST(val, 5);
+}
+
+static u8 therm_delay_to_setpoint(u32 delay_us)
+{
+ u8 val;
+
+ if (delay_us < 2560)
+ val = 0;
+ else if (delay_us > 163840)
+ val = 7;
+ else
+ val = ilog2(delay_us / 10) - 7;
+ return val << 5;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define BATTERY_SOC_REG 0x56C
+#define BATTERY_SOC_OFFSET 1
+#define FULL_PERCENT_3B 0xFFFFFF
+static int get_battery_soc_raw(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[3];
+
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ return 0;
+ }
+ return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]);
+}
+
+#define COUNTER_IMPTR_REG 0X558
+#define COUNTER_PULSE_REG 0X55C
+#define SOC_FULL_REG 0x564
+#define COUNTER_IMPTR_OFFSET 2
+#define COUNTER_PULSE_OFFSET 0
+#define SOC_FULL_OFFSET 3
+#define ESR_PULSE_RECONFIG_SOC 0xFFF971
+static int fg_configure_soc(struct fg_chip *chip)
+{
+ u32 batt_soc;
+ u8 cntr[2] = {0, 0};
+ int rc = 0;
+
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ /* Read Battery SOC */
+ batt_soc = get_battery_soc_raw(chip);
+
+ if (batt_soc > ESR_PULSE_RECONFIG_SOC) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Configuring soc registers batt_soc: %x\n",
+ batt_soc);
+ batt_soc = ESR_PULSE_RECONFIG_SOC;
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write BATT_SOC rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2,
+ COUNTER_IMPTR_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ }
+out:
+ fg_release_access_if_necessary(chip);
+ return rc;
+}
+
+#define SOC_EMPTY BIT(3)
+static bool fg_is_batt_empty(struct fg_chip *chip)
+{
+ u8 fg_soc_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_EMPTY) != 0;
+}
+
+static int get_monotonic_soc_raw(struct fg_chip *chip)
+{
+ u8 cap[2];
+ int rc, tries = 0;
+
+ while (tries < MAX_TRIES_SOC) {
+ rc = fg_read(chip, cap,
+ chip->soc_base + SOC_MONOTONIC_SOC, 2);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ chip->soc_base + SOC_MONOTONIC_SOC, rc);
+ return rc;
+ }
+
+ if (cap[0] == cap[1])
+ break;
+
+ tries++;
+ }
+
+ if (tries == MAX_TRIES_SOC) {
+ pr_err("shadow registers do not match\n");
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("raw: 0x%02x\n", cap[0]);
+ return cap[0];
+}
+
+#define EMPTY_CAPACITY 0
+#define DEFAULT_CAPACITY 50
+#define MISSING_CAPACITY 100
+#define FULL_CAPACITY 100
+#define FULL_SOC_RAW 0xFF
+static int get_prop_capacity(struct fg_chip *chip)
+{
+ int msoc;
+
+ if (chip->battery_missing)
+ return MISSING_CAPACITY;
+ if (!chip->profile_loaded && !chip->use_otp_profile)
+ return DEFAULT_CAPACITY;
+ if (chip->charge_full)
+ return FULL_CAPACITY;
+ if (chip->soc_empty) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("capacity: %d, EMPTY\n",
+ EMPTY_CAPACITY);
+ return EMPTY_CAPACITY;
+ }
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0)
+ return EMPTY_CAPACITY;
+ else if (msoc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+}
+
+#define HIGH_BIAS 3
+#define MED_BIAS BIT(1)
+#define LOW_BIAS BIT(0)
+static u8 bias_ua[] = {
+ [HIGH_BIAS] = 150,
+ [MED_BIAS] = 15,
+ [LOW_BIAS] = 5,
+};
+
+static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info)
+{
+ u64 battery_id_ohm;
+
+ if ((bid_info & 0x3) == 0) {
+ pr_err("can't determine battery id 0x%02x\n", bid_info);
+ return -EINVAL;
+ }
+
+ battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]);
+
+ return battery_id_ohm;
+}
+
+#define DEFAULT_TEMP_DEGC 250
+static int get_sram_prop_now(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d value %d\n",
+ fg_data[type].address, fg_data[type].offset,
+ fg_data[type].value);
+
+ if (type == FG_DATA_BATT_ID)
+ return get_batt_id(fg_data[type].value,
+ fg_data[FG_DATA_BATT_ID_INFO].value);
+
+ return fg_data[type].value;
+}
+
+#define MIN_TEMP_DEGC -300
+#define MAX_TEMP_DEGC 970
+static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d\n", settings[type].address,
+ settings[type].offset);
+
+ return settings[type].value;
+}
+
+static int set_prop_jeita_temp(struct fg_chip *chip,
+ unsigned int type, int decidegc)
+{
+ int rc = 0;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d temp%d\n",
+ settings[type].address,
+ settings[type].offset, decidegc);
+
+ settings[type].value = decidegc;
+
+ cancel_delayed_work_sync(
+ &chip->update_jeita_setting);
+ schedule_delayed_work(
+ &chip->update_jeita_setting, 0);
+
+ return rc;
+}
+
+#define EXTERNAL_SENSE_SELECT 0x4AC
+#define EXTERNAL_SENSE_OFFSET 0x2
+#define EXTERNAL_SENSE_BIT BIT(2)
+static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ EXTERNAL_SENSE_BIT,
+ ext_sense_type ? EXTERNAL_SENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define EXPONENT_MASK 0xF800
+#define MANTISSA_MASK 0x3FF
+#define SIGN BIT(10)
+#define EXPONENT_SHIFT 11
+#define MICRO_UNIT 1000000ULL
+static int64_t float_decode(u16 reg)
+{
+ int64_t final_val, exponent_val, mantissa_val;
+ int exponent, mantissa, n;
+ bool sign;
+
+ exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT;
+ mantissa = (reg & MANTISSA_MASK);
+ sign = !!(reg & SIGN);
+
+ pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign);
+
+ mantissa_val = mantissa * MICRO_UNIT;
+
+ n = exponent - 15;
+ if (n < 0)
+ exponent_val = MICRO_UNIT >> -n;
+ else
+ exponent_val = MICRO_UNIT << n;
+
+ n = n - 10;
+ if (n < 0)
+ mantissa_val >>= -n;
+ else
+ mantissa_val <<= n;
+
+ final_val = exponent_val + mantissa_val;
+
+ if (sign)
+ final_val *= -1;
+
+ return final_val;
+}
+
+#define MIN_HALFFLOAT_EXP_N -15
+#define MAX_HALFFLOAT_EXP_N 16
+static int log2_floor(int64_t uval)
+{
+ int n = 0;
+ int64_t i = MICRO_UNIT;
+
+ if (uval > i) {
+ while (uval > i && n > MIN_HALFFLOAT_EXP_N) {
+ i <<= 1;
+ n += 1;
+ }
+ if (uval < i)
+ n -= 1;
+ } else if (uval < i) {
+ while (uval < i && n < MAX_HALFFLOAT_EXP_N) {
+ i >>= 1;
+ n -= 1;
+ }
+ }
+
+ return n;
+}
+
+static int64_t exp2_int(int64_t n)
+{
+ int p = n - 1;
+
+ if (p > 0)
+ return (2 * MICRO_UNIT) << p;
+ else
+ return (2 * MICRO_UNIT) >> abs(p);
+}
+
+static u16 float_encode(int64_t uval)
+{
+ int sign = 0, n, exp, mantissa;
+ u16 half = 0;
+
+ if (uval < 0) {
+ sign = 1;
+ uval = abs(uval);
+ }
+ n = log2_floor(uval);
+ exp = n + 15;
+ mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n),
+ MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT);
+
+ half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN)
+ | ((exp << 11) & EXPONENT_MASK);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n",
+ uval, mantissa, sign, exp, half);
+ return half;
+}
+
+#define BATT_IDED BIT(3)
+static int fg_is_batt_id_valid(struct fg_chip *chip)
+{
+ u8 fg_batt_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+
+ return (fg_batt_sts & BATT_IDED) ? 1 : 0;
+}
+
+static int64_t twos_compliment_extend(int64_t val, int nbytes)
+{
+ int i;
+ int64_t mask;
+
+ mask = 0x80LL << ((nbytes - 1) * 8);
+ if (val & mask) {
+ for (i = 8; i > nbytes; i--) {
+ mask = 0xFFLL << ((i - 1) * 8);
+ val |= mask;
+ }
+ }
+
+ return val;
+}
+
+#define LSB_24B_NUMRTR 596046
+#define LSB_24B_DENMTR 1000000
+#define LSB_16B_NUMRTR 152587
+#define LSB_16B_DENMTR 1000
+#define LSB_8B 9800
+#define TEMP_LSB_16B 625
+#define DECIKELVIN 2730
+#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
+#define FULL_PERCENT_28BIT 0xFFFFFFF
+static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+{
+ int i, j, rc = 0;
+ u8 reg[4];
+ int64_t temp;
+ int battid_valid = fg_is_batt_id_valid(chip);
+
+ fg_stay_awake(&chip->update_sram_wakeup_source);
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_mem_lock(chip);
+ for (i = 1; i < FG_DATA_MAX; i++) {
+ if (chip->profile_loaded && i >= FG_DATA_BATT_ID)
+ continue;
+ rc = fg_mem_read(chip, reg, fg_data[i].address,
+ fg_data[i].len, fg_data[i].offset, 0);
+ if (rc) {
+ pr_err("Failed to update sram data\n");
+ break;
+ }
+
+ temp = 0;
+ for (j = 0; j < fg_data[i].len; j++)
+ temp |= reg[j] << (8 * j);
+
+ switch (i) {
+ case FG_DATA_OCV:
+ case FG_DATA_VOLTAGE:
+ case FG_DATA_CPRED_VOLTAGE:
+ fg_data[i].value = div_u64(
+ (u64)(u16)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_CURRENT:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div_s64(
+ (s64)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_BATT_ESR:
+ fg_data[i].value = float_decode((u16) temp);
+ break;
+ case FG_DATA_BATT_ESR_COUNT:
+ fg_data[i].value = (u16)temp;
+ break;
+ case FG_DATA_BATT_ID:
+ if (battid_valid)
+ fg_data[i].value = reg[0] * LSB_8B;
+ break;
+ case FG_DATA_BATT_ID_INFO:
+ if (battid_valid)
+ fg_data[i].value = reg[0];
+ break;
+ case FG_DATA_BATT_SOC:
+ fg_data[i].value = div64_s64((temp * 10000),
+ FULL_PERCENT_3B);
+ break;
+ case FG_DATA_CC_CHARGE:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(
+ temp * (int64_t)chip->nom_cap_uah,
+ FULL_PERCENT_28BIT);
+ break;
+ case FG_DATA_VINT_ERR:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(temp * chip->nom_cap_uah,
+ FULL_PERCENT_3B);
+ break;
+ };
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("%d %lld %d\n", i, temp, fg_data[i].value);
+ }
+ fg_mem_release(chip);
+
+ if (!rc)
+ get_current_time(&chip->last_sram_update_time);
+
+resched:
+ if (battid_valid) {
+ complete_all(&chip->batt_id_avail);
+ *resched_ms = fg_sram_update_period_ms;
+ } else {
+ *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
+ }
+ fg_relax(&chip->update_sram_wakeup_source);
+}
+
+#define SRAM_TIMEOUT_MS 3000
+static void update_sram_data_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_sram_data.work);
+ int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+ bool tried_again = false;
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(SRAM_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ update_sram_data(chip, &resched_ms);
+
+out:
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
+}
+
+#define BATT_TEMP_OFFSET 3
+#define BATT_TEMP_CNTRL_MASK 0x17
+#define DISABLE_THERM_BIT BIT(0)
+#define TEMP_SENSE_ALWAYS_BIT BIT(1)
+#define TEMP_SENSE_CHARGE_BIT BIT(2)
+#define FORCE_RBIAS_ON_BIT BIT(4)
+#define BATT_TEMP_OFF DISABLE_THERM_BIT
+#define BATT_TEMP_ON (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \
+ TEMP_SENSE_CHARGE_BIT)
+#define TEMP_PERIOD_UPDATE_MS 10000
+#define TEMP_PERIOD_TIMEOUT_MS 3000
+static void update_temp_data(struct work_struct *work)
+{
+ s16 temp;
+ u8 reg[2];
+ bool tried_again = false;
+ int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_temp_work.work);
+
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_stay_awake(&chip->update_temp_wakeup_source);
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_ON,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc);
+ goto out;
+ }
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(timeout));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Read FG_DATA_BATT_TEMP now */
+ rc = fg_mem_read(chip, reg, fg_data[0].address,
+ fg_data[0].len, fg_data[0].offset,
+ chip->sw_rbias_ctrl ? 1 : 0);
+ if (rc) {
+ pr_err("Failed to update temp data\n");
+ goto out;
+ }
+
+ temp = reg[0] | (reg[1] << 8);
+ fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
+ - DECIKELVIN;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
+
+ get_current_time(&chip->last_temp_update_time);
+
+out:
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_OFF,
+ BATT_TEMP_OFFSET);
+ if (rc)
+ pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc);
+ }
+ fg_relax(&chip->update_temp_wakeup_source);
+
+resched:
+ schedule_delayed_work(
+ &chip->update_temp_work,
+ msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS));
+}
+
+static void update_jeita_setting(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_jeita_setting.work);
+ u8 reg[4];
+ int i, rc;
+
+ for (i = 0; i < 4; i++)
+ reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30;
+
+ rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address,
+ 4, settings[FG_MEM_SOFT_COLD].offset, 0);
+ if (rc)
+ pr_err("failed to update JEITA setting rc=%d\n", rc);
+}
+
+static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold)
+{
+ u16 address;
+ int offset, rc;
+
+ address = settings[FG_MEM_RESUME_SOC].address;
+ offset = settings[FG_MEM_RESUME_SOC].offset;
+
+ rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset);
+
+ if (rc)
+ pr_err("write failed rc=%d\n", rc);
+ else
+ pr_debug("setting resume-soc to %x\n", threshold);
+
+ return rc;
+}
+
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (!rc)
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+ return rc;
+}
+
+#define BATT_CYCLE_NUMBER_REG 0x5E8
+#define BATT_CYCLE_OFFSET 0
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i, address;
+ u8 data[2];
+
+ fg_mem_lock(chip);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ address = BATT_CYCLE_NUMBER_REG + i * 2;
+ rc = fg_mem_read(chip, (u8 *)&data, address, 2,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n",
+ i, rc);
+ else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+ }
+ fg_mem_release(chip);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, len, i;
+
+ if (!chip->cyc_ctr.en)
+ return;
+
+ len = sizeof(chip->cyc_ctr.count);
+ memset(chip->cyc_ctr.count, 0, len);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ chip->cyc_ctr.started[i] = false;
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count,
+ BATT_CYCLE_NUMBER_REG, len,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+ int rc = 0, address;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+ return 0;
+
+ cyc_count = chip->cyc_ctr.count[bucket];
+ cyc_count++;
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ address = BATT_CYCLE_NUMBER_REG + bucket * 2;
+
+ rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n",
+ bucket, rc);
+ else
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+}
+
+static void update_cycle_count(struct work_struct *work)
+{
+ int rc = 0, bucket, i;
+ u8 reg[3], batt_soc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ cycle_count_work);
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read battery soc rc: %d\n", rc);
+ goto out;
+ }
+ batt_soc = reg[2];
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket);
+
+ /*
+ * If we've started counting for the previous bucket,
+ * then store the counter for that bucket if the
+ * counter for current bucket is getting started.
+ */
+ if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+ !chip->cyc_ctr.started[bucket]) {
+ rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+ if (rc) {
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ goto out;
+ } else {
+ chip->cyc_ctr.started[bucket - 1] = false;
+ chip->cyc_ctr.last_soc[bucket - 1] = 0;
+ }
+ }
+ if (!chip->cyc_ctr.started[bucket]) {
+ chip->cyc_ctr.started[bucket] = true;
+ chip->cyc_ctr.last_soc[bucket] = batt_soc;
+ }
+ } else {
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ if (chip->cyc_ctr.started[i] &&
+ batt_soc > chip->cyc_ctr.last_soc[i]) {
+ rc = fg_inc_store_cycle_ctr(chip, i);
+ if (rc)
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ chip->cyc_ctr.started[i] = false;
+ }
+ }
+out:
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+ int count;
+
+ if (!chip->cyc_ctr.en)
+ return 0;
+
+ if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return count;
+}
+
+static void half_float_to_buffer(int64_t uval, u8 *buffer)
+{
+ u16 raw;
+
+ raw = float_encode(uval);
+ buffer[0] = (u8)(raw & 0xFF);
+ buffer[1] = (u8)((raw >> 8) & 0xFF);
+}
+
+static int64_t half_float(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return float_decode(val);
+}
+
+static int voltage_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */
+ return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR);
+}
+
+static int bcap_uah_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return ((int)val) * 1000;
+}
+
+static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
+{
+ int64_t *coeffs;
+
+ if (soc > chip->ocv_junction_p1p2 * 10)
+ coeffs = chip->ocv_coeffs;
+ else if (soc > chip->ocv_junction_p2p3 * 10)
+ coeffs = chip->ocv_coeffs + 4;
+ else
+ coeffs = chip->ocv_coeffs + 8;
+ /* the range of ocv will fit in a 32 bit int */
+ return (int)(coeffs[0]
+ + div_s64(coeffs[1] * soc, 1000LL)
+ + div_s64(coeffs[2] * soc * soc, 1000000LL)
+ + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL));
+}
+
+static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv)
+{
+ int64_t val;
+ int soc = -EINVAL;
+ /*
+ * binary search variables representing the valid start and end
+ * percentages to search
+ */
+ int start = 0, end = 1000, mid;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("target_ocv = %d\n", ocv);
+ /* do a binary search for the closest soc to match the ocv */
+ while (end - start > 1) {
+ mid = (start + end) / 2;
+ val = lookup_ocv_for_soc(chip, mid);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n",
+ start, mid, end, val);
+ if (ocv < val) {
+ end = mid;
+ } else if (ocv > val) {
+ start = mid;
+ } else {
+ soc = mid;
+ break;
+ }
+ }
+ /*
+ * if the exact soc was not found and there are two or less values
+ * remaining, just compare them and see which one is closest to the ocv
+ */
+ if (soc == -EINVAL) {
+ if (abs(ocv - lookup_ocv_for_soc(chip, start))
+ > abs(ocv - lookup_ocv_for_soc(chip, end)))
+ soc = end;
+ else
+ soc = start;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n",
+ soc, ocv, lookup_ocv_for_soc(chip, soc));
+ return soc;
+}
+
+#define ESR_ACTUAL_REG 0x554
+#define BATTERY_ESR_REG 0x4F4
+#define TEMP_RS_TO_RSLOW_REG 0x514
+static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
+{
+ int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
+ int64_t esr_actual, battery_esr, val;
+ int soc_cutoff_aged, soc_cutoff_new, rc;
+ int battery_soc, unusable_soc, batt_temp;
+ u8 buffer[3];
+
+ if (chip->batt_aging_mode != FG_AGING_ESR)
+ return 0;
+
+ if (chip->nom_cap_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv coefficients not loaded, aborting\n");
+ return 0;
+ }
+ fg_mem_lock(chip);
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (batt_temp < 150 || batt_temp > 400) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery temp (%d) out of range, aborting\n",
+ (int)batt_temp);
+ rc = 0;
+ goto done;
+ }
+
+ battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
+ if (battery_soc < 25 || battery_soc > 75) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery SoC (%d) out of range, aborting\n",
+ (int)battery_soc);
+ rc = 0;
+ goto done;
+ }
+
+ rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
+ esr_actual = half_float(buffer);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ battery_esr = half_float(buffer);
+
+ if (rc) {
+ goto error_done;
+ } else if (esr_actual < battery_esr) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Batt ESR lower than ESR actual, aborting\n");
+ rc = 0;
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0);
+ temp_rs_to_rslow = half_float(buffer);
+
+ if (rc)
+ goto error_done;
+
+ fg_mem_release(chip);
+
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n",
+ battery_soc, chip->cutoff_voltage,
+ chip->evaluation_current);
+ pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n",
+ temp_rs_to_rslow, battery_esr, esr_actual);
+ }
+
+ /* calculate soc_cutoff_new */
+ val = (1000000LL + temp_rs_to_rslow) * battery_esr;
+ do_div(val, 1000000);
+ ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ /* calculate soc_cutoff_aged */
+ val = (1000000LL + temp_rs_to_rslow) * esr_actual;
+ do_div(val, 1000000);
+ ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n",
+ ocv_cutoff_new, ocv_cutoff_aged);
+
+ soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new);
+ soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged);
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("aged soc = %d, new soc = %d\n",
+ soc_cutoff_aged, soc_cutoff_new);
+ unusable_soc = soc_cutoff_aged - soc_cutoff_new;
+
+ *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah)
+ * (1000 - unusable_soc), 1000);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("nom cap = %d, actual cap = %d\n",
+ chip->nom_cap_uah, *actual_capacity);
+
+ return rc;
+
+error_done:
+ pr_err("some register reads failed: %d\n", rc);
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+static void battery_age_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ battery_age_work);
+
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ val->intval = !!chip->bcl_lpm_disabled;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int correction_times[] = {
+ 1470,
+ 2940,
+ 4410,
+ 5880,
+ 7350,
+ 8820,
+ 10290,
+ 11760,
+ 13230,
+ 14700,
+ 16170,
+ 17640,
+ 19110,
+ 20580,
+ 22050,
+ 23520,
+ 24990,
+ 26460,
+ 27930,
+ 29400,
+ 30870,
+ 32340,
+ 33810,
+ 35280,
+ 36750,
+ 38220,
+ 39690,
+ 41160,
+ 42630,
+ 44100,
+ 45570,
+ 47040,
+};
+
+static int correction_factors[] = {
+ 1000000,
+ 1007874,
+ 1015789,
+ 1023745,
+ 1031742,
+ 1039780,
+ 1047859,
+ 1055979,
+ 1064140,
+ 1072342,
+ 1080584,
+ 1088868,
+ 1097193,
+ 1105558,
+ 1113964,
+ 1122411,
+ 1130899,
+ 1139427,
+ 1147996,
+ 1156606,
+ 1165256,
+ 1173947,
+ 1182678,
+ 1191450,
+ 1200263,
+ 1209115,
+ 1218008,
+ 1226942,
+ 1235915,
+ 1244929,
+ 1253983,
+ 1263076,
+};
+
+#define FG_CONVERSION_FACTOR (64198531LL)
+static int iavg_3b_to_uah(u8 *buffer, int delta_ms)
+{
+ int64_t val, i_filtered;
+ int i, correction_factor;
+
+ for (i = 0; i < ARRAY_SIZE(correction_times); i++) {
+ if (correction_times[i] > delta_ms)
+ break;
+ }
+ if (i >= ARRAY_SIZE(correction_times)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge took more than 32 cycles\n");
+ i = ARRAY_SIZE(correction_times) - 1;
+ }
+ correction_factor = correction_factors[i];
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_ms = %d, cycles = %d, correction = %d\n",
+ delta_ms, i, correction_factor);
+ val = buffer[2] << 16 | buffer[1] << 8 | buffer[0];
+ /* convert val from signed 24b to signed 64b */
+ i_filtered = (val << 40) >> 40;
+ val = i_filtered * correction_factor;
+ val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n",
+ i_filtered, i_filtered, val);
+
+ return val;
+}
+
+static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip)
+{
+ int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (batt_temp > chip->learning_data.max_temp
+ || batt_temp < chip->learning_data.min_temp) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("temp (%d) out of range [%d, %d], aborting\n",
+ batt_temp,
+ chip->learning_data.min_temp,
+ chip->learning_data.max_temp);
+ return false;
+ }
+ return true;
+}
+
+static void fg_cap_learning_stop(struct fg_chip *chip)
+{
+ chip->learning_data.cc_uah = 0;
+ chip->learning_data.active = false;
+}
+
+#define I_FILTERED_REG 0x584
+static void fg_cap_learning_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ fg_cap_learning_work);
+ u8 i_filtered[3], data[3];
+ int rc, cc_uah, delta_ms;
+ ktime_t now_kt, delta_kt;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (!chip->learning_data.active)
+ goto fail;
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ mutex_unlock(&chip->learning_data.learning_lock);
+ fg_relax(&chip->capacity_learning_wakeup_source);
+ return;
+ }
+
+ fg_mem_lock(chip);
+
+ rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to read i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ memset(data, 0, 3);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+
+ now_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp);
+ chip->learning_data.time_stamp = now_kt;
+
+ delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000);
+
+ cc_uah = iavg_3b_to_uah(i_filtered, delta_ms);
+ chip->learning_data.cc_uah -= cc_uah;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return;
+
+}
+
+#define CC_SOC_BASE_REG 0x5BC
+#define CC_SOC_OFFSET 3
+#define CC_SOC_MAGNITUDE_MASK 0x1FFFFFFF
+#define CC_SOC_NEGATIVE_BIT BIT(29)
+static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc)
+{
+ int rc;
+ u8 reg[4];
+ unsigned int temp, magnitude;
+
+ rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read CC_SOC_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
+ magnitude = temp & CC_SOC_MAGNITUDE_MASK;
+ if (temp & CC_SOC_NEGATIVE_BIT)
+ *cc_soc = -1 * (~magnitude + 1);
+ else
+ *cc_soc = magnitude;
+
+ return 0;
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int cc_pc_val, rc = -EINVAL;
+ unsigned int cc_soc_delta_pc;
+ int64_t delta_cc_uah;
+
+ if (!chip->learning_data.active)
+ goto fail;
+
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ rc = fg_get_cc_soc(chip, &cc_pc_val);
+ if (rc) {
+ pr_err("failed to get CC_SOC, stopping capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST(
+ abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
+ * 100, FULL_PERCENT_28BIT);
+
+ delta_cc_uah = div64_s64(
+ chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
+ 100);
+ chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.cc_uah);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+#define FG_CAP_LEARNING_INTERVAL_NS 30000000000
+static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm, struct fg_chip,
+ fg_cap_learning_alarm);
+
+ if (chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm fired\n");
+ schedule_work(&chip->fg_cap_learning_work);
+ alarm_forward_now(alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ return ALARMTIMER_RESTART;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm misfired\n");
+ return ALARMTIMER_NORESTART;
+}
+
+#define FG_AGING_STORAGE_REG 0x5E4
+#define ACTUAL_CAPACITY_REG 0x578
+#define MAH_TO_SOC_CONV_REG 0x4A0
+#define CC_SOC_COEFF_OFFSET 0
+#define ACTUAL_CAPACITY_OFFSET 2
+#define MAH_TO_SOC_CONV_CS_OFFSET 0
+static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah)
+{
+ int rc;
+ int64_t cc_to_soc_coeff, mah_to_soc;
+ u8 data[2];
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2,
+ ACTUAL_CAPACITY_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to store actual capacity: %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2,
+ MAH_TO_SOC_CONV_CS_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc);
+ } else {
+ mah_to_soc = data[1] << 8 | data[0];
+ mah_to_soc *= MICRO_UNIT;
+ cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah);
+ half_float_to_buffer(cc_to_soc_coeff, data);
+ rc = fg_mem_write(chip, (u8 *)data,
+ ACTUAL_CAPACITY_REG, 2,
+ CC_SOC_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to write cc_soc_coeff_offset: %d\n",
+ rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n",
+ cc_to_soc_coeff, data[0], data[1]);
+ }
+ return rc;
+}
+
+static void fg_cap_learning_load_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int64_t old_cap = chip->learning_data.learned_cc_uah;
+ int rc;
+
+ rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to load aged capacity: %d\n", rc);
+ } else {
+ chip->learning_data.learned_cc_uah = cc_mah * 1000;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld-> %lld/%x uah\n",
+ old_cap,
+ chip->learning_data.learned_cc_uah,
+ cc_mah);
+ }
+}
+
+static void fg_cap_learning_save_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc)
+ pr_err("Failed to store aged capacity: %d\n", rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n",
+ chip->learning_data.learned_cc_uah,
+ cc_mah, cc_mah);
+
+ if (chip->learning_data.feedback_on) {
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc);
+ }
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+
+ max_inc_val = chip->learning_data.learned_cc_uah
+ * (1000 + chip->learning_data.max_increment);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->learning_data.learned_cc_uah
+ * (1000 - chip->learning_data.max_decrement);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->learning_data.learned_cc_uah;
+ if (chip->learning_data.cc_uah > max_inc_val)
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ else if (chip->learning_data.cc_uah < min_dec_val)
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ else
+ chip->learning_data.learned_cc_uah =
+ chip->learning_data.cc_uah;
+
+ fg_cap_learning_save_data(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->learning_data.cc_uah,
+ old_cap, chip->learning_data.learned_cc_uah);
+}
+
+static int get_vbat_est_diff(struct fg_chip *chip)
+{
+ return abs(fg_data[FG_DATA_VOLTAGE].value
+ - fg_data[FG_DATA_CPRED_VOLTAGE].value);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define IBATTF_TAU_MASK 0x38
+#define IBATTF_TAU_99_S 0x30
+static int fg_cap_learning_check(struct fg_chip *chip)
+{
+ u8 data[4];
+ int rc = 0, battery_soc, cc_pc_val;
+ int vbat_est_diff, vbat_est_thr_uv;
+ unsigned int cc_pc_100 = FULL_PERCENT_28BIT;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING
+ && !chip->learning_data.active
+ && chip->batt_aging_mode == FG_AGING_CC) {
+ if (chip->learning_data.learned_cc_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("no capacity, aborting\n");
+ goto fail;
+ }
+
+ if (!fg_is_temperature_ok_for_learning(chip))
+ goto fail;
+
+ fg_mem_lock(chip);
+ if (!chip->learning_data.feedback_on) {
+ vbat_est_diff = get_vbat_est_diff(chip);
+ vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
+ if (vbat_est_diff >= vbat_est_thr_uv &&
+ vbat_est_thr_uv > 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("vbat_est_diff (%d) < threshold (%d)\n",
+ vbat_est_diff, vbat_est_thr_uv);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ }
+ battery_soc = get_battery_soc_raw(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("checking battery soc (%d vs %d)\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ /* check if the battery is low enough to start soc learning */
+ if (battery_soc * 100 / FULL_PERCENT_3B
+ > chip->learning_data.max_start_soc) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery soc too low (%d < %d), aborting\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* set the coulomb counter to a percentage of the capacity */
+ chip->learning_data.cc_uah = div64_s64(
+ (chip->learning_data.learned_cc_uah * battery_soc),
+ FULL_PERCENT_3B);
+
+ /* Use CC_SOC_REG based capacity learning */
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ fg_mem_release(chip);
+ /* SW_CC_SOC based capacity learning */
+ if (fg_get_cc_soc(chip, &cc_pc_val)) {
+ pr_err("failed to get CC_SOC, stop capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ chip->learning_data.init_cc_pc_val = cc_pc_val;
+ chip->learning_data.active = true;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n",
+ chip->learning_data.init_cc_pc_val);
+ } else {
+ rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG,
+ IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0);
+ if (rc) {
+ pr_err("Failed to write IF IBAT Tau: %d\n",
+ rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* clear the i_filtered register */
+ memset(data, 0, 4);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+ chip->learning_data.time_stamp = ktime_get_boottime();
+ chip->learning_data.active = true;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cap learning started, soc = %d cc_uah = %lld\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.cc_uah);
+ alarm_start_relative(&chip->fg_cap_learning_alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ }
+ } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING)
+ && chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("capacity learning stopped\n");
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ /* reset SW_CC_SOC register to 100% */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ }
+ fg_cap_learning_post_process(chip);
+ }
+
+ fg_cap_learning_stop(chip);
+ }
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return rc;
+}
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
+static bool is_otg_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_USB_OTG, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_charger_available(struct fg_chip *chip)
+{
+ if (!chip->batt_psy_name)
+ return false;
+
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int set_prop_enable_charging(struct fg_chip *chip, bool enable)
+{
+ int rc = 0;
+ union power_supply_propval ret = {enable, };
+
+ if (!is_charger_available(chip)) {
+ pr_err("Charger not available yet!\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ &ret);
+ if (rc) {
+ pr_err("couldn't configure batt chg %d\n", rc);
+ return rc;
+ }
+
+ chip->charging_disabled = !enable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("%sabling charging\n", enable ? "en" : "dis");
+
+ return rc;
+}
+
+#define MAX_BATTERY_CC_SOC_CAPACITY 150
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ status_change_work);
+ unsigned long current_time = 0;
+ int cc_soc, rc, capacity = get_prop_capacity(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (capacity >= 99 && chip->hold_soc_while_full
+ && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("holding soc at 100\n");
+ chip->charge_full = true;
+ } else if (fg_debug_mask & FG_STATUS) {
+ pr_info("terminated charging at %d/0x%02x\n",
+ capacity, get_monotonic_soc_raw(chip));
+ }
+ }
+ if (chip->status == POWER_SUPPLY_STATUS_FULL ||
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
+ fg_configure_soc(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ fg_cap_learning_check(chip);
+ schedule_work(&chip->update_esr_work);
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ if (fg_get_cc_soc(chip, &cc_soc)) {
+ pr_err("failed to get CC_SOC\n");
+ return;
+ }
+ }
+
+ if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ get_current_time(&current_time);
+ /*
+ * When charging status changes, update SRAM parameters if it
+ * was not updated before 5 seconds from now.
+ */
+ if (chip->last_sram_update_time + 5 < current_time) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ }
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ if ((chip->wa_flag & USE_CC_SOC_REG) &&
+ chip->bad_batt_detection_en &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ chip->sw_cc_soc_data.init_sys_soc = capacity;
+ chip->sw_cc_soc_data.init_cc_soc = cc_soc;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info(" Init_sys_soc %d init_cc_soc %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc);
+ }
+ }
+ if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
+ && chip->safety_timer_expired) {
+ chip->sw_cc_soc_data.delta_soc =
+ DIV_ROUND_CLOSEST(abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc)
+ * 100, FULL_PERCENT_28BIT);
+ chip->sw_cc_soc_data.full_capacity =
+ chip->sw_cc_soc_data.delta_soc +
+ chip->sw_cc_soc_data.init_sys_soc;
+ pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc, cc_soc,
+ chip->sw_cc_soc_data.delta_soc,
+ chip->sw_cc_soc_data.full_capacity);
+ /*
+ * If sw_cc_soc capacity greater than 150, then it's a bad
+ * battery. else, reset timer and restart charging.
+ */
+ if (chip->sw_cc_soc_data.full_capacity >
+ MAX_BATTERY_CC_SOC_CAPACITY) {
+ pr_info("Battery possibly damaged, do not restart charging\n");
+ } else {
+ pr_info("Reset safety-timer and restart charging\n");
+ rc = set_prop_enable_charging(chip, false);
+ if (rc) {
+ pr_err("failed to disable charging %d\n", rc);
+ return;
+ }
+
+ chip->safety_timer_expired = false;
+ msleep(200);
+
+ rc = set_prop_enable_charging(chip, true);
+ if (rc) {
+ pr_err("failed to enable charging %d\n", rc);
+ return;
+ }
+ }
+ }
+}
+
+/*
+ * Check for change in the status of input or OTG and schedule
+ * IADC gain compensation work.
+ */
+static void check_gain_compensation(struct fg_chip *chip)
+{
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if ((chip->wa_flag & IADC_GAIN_COMP_WA)
+ && ((chip->input_present ^ input_present)
+ || (chip->otg_present ^ otg_present))) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ chip->input_present = input_present;
+ chip->otg_present = otg_present;
+ cancel_work_sync(&chip->gain_comp_work);
+ schedule_work(&chip->gain_comp_work);
+ }
+}
+
+static void fg_hysteresis_config(struct fg_chip *chip)
+{
+ int hard_hot = 0, hard_cold = 0;
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) {
+ /* turn down the hard hot threshold */
+ chip->batt_hot = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n",
+ hard_hot, hard_hot - chip->hot_hysteresis);
+ } else if (chip->health == POWER_SUPPLY_HEALTH_COLD &&
+ !chip->batt_cold) {
+ /* turn up the hard cold threshold */
+ chip->batt_cold = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n",
+ hard_cold, hard_cold + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ chip->batt_hot) {
+ /* restore the hard hot threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot + chip->hot_hysteresis);
+ chip->batt_hot = !chip->batt_hot;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n",
+ hard_hot,
+ hard_hot + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_COLD &&
+ chip->batt_cold) {
+ /* restore the hard cold threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold - chip->cold_hysteresis);
+ chip->batt_cold = !chip->batt_cold;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n",
+ hard_cold,
+ hard_cold - chip->cold_hysteresis);
+ }
+}
+
+#define BATT_INFO_STS(base) (base + 0x09)
+#define JEITA_HARD_HOT_RT_STS BIT(6)
+#define JEITA_HARD_COLD_RT_STS BIT(5)
+static int fg_init_batt_temp_state(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 batt_info_sts;
+ int hard_hot = 0, hard_cold = 0;
+
+ /*
+ * read the batt_info_sts register to parse battery's
+ * initial status and do hysteresis config accordingly.
+ */
+ rc = fg_read(chip, &batt_info_sts,
+ BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("failed to read batt info sts, rc=%d\n", rc);
+ return rc;
+ }
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ chip->batt_hot =
+ (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false;
+ chip->batt_cold =
+ (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false;
+ if (chip->batt_hot || chip->batt_cold) {
+ if (chip->batt_hot) {
+ chip->health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ } else {
+ chip->health = POWER_SUPPLY_HEALTH_COLD;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ }
+ }
+
+ return rc;
+}
+
+static int fg_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ int rc = 0, unused;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ if (val->intval)
+ update_sram_data(chip, &unused);
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ chip->prev_status = chip->status;
+ chip->status = val->intval;
+ schedule_work(&chip->status_change_work);
+ check_gain_compensation(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ chip->health = val->intval;
+ if (chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+
+ if (chip->jeita_hysteresis_support)
+ fg_hysteresis_config(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ chip->charge_done = val->intval;
+ if (!chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) {
+ chip->cyc_ctr.id = val->intval;
+ } else {
+ pr_err("rejecting invalid cycle_count_id = %d\n",
+ val->intval);
+ rc = -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED:
+ chip->safety_timer_expired = val->intval;
+ schedule_work(&chip->status_change_work);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) {
+ chip->bcl_lpm_disabled = !!val->intval;
+ schedule_work(&chip->bcl_hi_power_work);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return rc;
+};
+
+static int fg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define SRAM_DUMP_START 0x400
+#define SRAM_DUMP_LEN 0x200
+static void dump_sram(struct work_struct *work)
+{
+ int i, rc;
+ u8 *buffer, rt_sts;
+ char str[16];
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ dump_sram);
+
+ buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL);
+ if (buffer == NULL) {
+ pr_err("Can't allocate buffer\n");
+ return;
+ }
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ else
+ pr_info("soc rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ pr_info("batt rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->mem_base), rc);
+ else
+ pr_info("memif rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < SRAM_DUMP_LEN; i += 4) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4);
+ pr_info("%03X %s\n", SRAM_DUMP_START + i, str);
+ }
+ devm_kfree(chip->dev, buffer);
+}
+
+#define MAXRSCHANGE_REG 0x434
+#define ESR_VALUE_OFFSET 1
+#define ESR_STRICT_VALUE 0x4120391F391F3019
+#define ESR_DEFAULT_VALUE 0x58CD4A6761C34A67
+static void update_esr_value(struct work_struct *work)
+{
+ union power_supply_propval prop = {0, };
+ u64 esr_value;
+ int rc = 0;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_esr_work);
+
+ if (!is_charger_available(chip))
+ return;
+
+ power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+
+ if (!chip->esr_strict_filter) {
+ if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_FULL)) {
+ esr_value = ESR_STRICT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value,
+ MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write strict ESR value rc=%d\n",
+ rc);
+ else
+ chip->esr_strict_filter = true;
+ }
+ } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) {
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ chip->esr_strict_filter = false;
+ }
+}
+
+#define TEMP_COUNTER_REG 0x580
+#define VBAT_FILTERED_OFFSET 1
+#define GAIN_REG 0x424
+#define GAIN_OFFSET 1
+#define K_VCOR_REG 0x484
+#define DEF_GAIN_OFFSET 2
+#define PICO_UNIT 0xE8D4A51000LL
+#define ATTO_UNIT 0xDE0B6B3A7640000LL
+#define VBAT_REF 3800000
+
+/*
+ * IADC Gain compensation steps:
+ * If Input/OTG absent:
+ * - read VBAT_FILTERED, KVCOR, GAIN
+ * - calculate the gain compensation using following formula:
+ * gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1;
+ * else
+ * - reset to the default gain compensation
+ */
+static void iadc_gain_comp_work(struct work_struct *work)
+{
+ u8 reg[4];
+ int rc;
+ uint64_t vbat_filtered;
+ int64_t gain, kvcor, temp, numerator;
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ gain_comp_work);
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if (!chip->init_done)
+ goto done;
+
+ if (!input_present && !otg_present) {
+ /* read VBAT_FILTERED */
+ rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3,
+ VBAT_FILTERED_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read VBAT: rc=%d\n", rc);
+ goto done;
+ }
+ temp = (reg[2] << 16) | (reg[1] << 8) | reg[0];
+ vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR,
+ LSB_24B_DENMTR);
+
+ /* read K_VCOR */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to KVCOR rc=%d\n", rc);
+ goto done;
+ }
+ kvcor = half_float(reg);
+
+ /* calculate gain */
+ numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain)
+ * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF))
+ - ATTO_UNIT;
+ gain = div64_s64(numerator, PICO_UNIT);
+
+ /* write back gain */
+ half_float_to_buffer(gain, reg);
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain reg rc=%d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]);
+ chip->iadc_comp_data.gain_active = true;
+ } else {
+ /* reset gain register */
+ rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg,
+ GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write gain comp: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain reset [%x %x]\n",
+ chip->iadc_comp_data.dfl_gain_reg[1],
+ chip->iadc_comp_data.dfl_gain_reg[0]);
+ chip->iadc_comp_data.gain_active = false;
+ }
+
+done:
+ fg_relax(&chip->gain_comp_wakeup_source);
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
+#define SOC_FIRST_EST_DONE BIT(5)
+static bool is_first_est_done(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_soc_sts;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
+}
+
+static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ int rc;
+ bool vbatt_low_sts;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("vbatt-low triggered\n");
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ goto out;
+ }
+ if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("disabling vbatt_low irq\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->batt_type = default_batt_type;
+ mutex_lock(&chip->cyc_ctr.lock);
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("battery missing, clearing cycle counters\n");
+ clear_cycle_counter(chip);
+ mutex_unlock(&chip->cyc_ctr.lock);
+ } else {
+ if (!chip->use_otp_profile) {
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->first_soc_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ } else {
+ chip->battery_missing = false;
+ }
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("batt-missing triggered: %s\n",
+ batt_missing ? "missing" : "present");
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 mem_if_sts;
+ int rc;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fg_check_sram_access(chip)) {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access granted\n");
+ reinit_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_granted);
+ } else {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access revoked\n");
+ complete_all(&chip->sram_access_revoked);
+ }
+
+ if (!rc && (fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("mem_if sts 0x%02x\n", mem_if_sts);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+
+ schedule_work(&chip->battery_age_work);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ schedule_work(&chip->update_esr_work);
+ if (chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+ if (chip->wa_flag & IADC_GAIN_COMP_WA
+ && chip->iadc_comp_data.gain_active) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ schedule_work(&chip->gain_comp_work);
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG
+ && chip->learning_data.active) {
+ fg_stay_awake(&chip->capacity_learning_wakeup_source);
+ schedule_work(&chip->fg_cap_learning_work);
+ }
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define FG_EMPTY_DEBOUNCE_MS 1500
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (fg_is_batt_empty(chip)) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ chip->soc_empty = false;
+ }
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered\n");
+
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ complete_all(&chip->first_soc_done);
+
+ return IRQ_HANDLED;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+
+ if (is_input_present(chip) && chip->rslow_comp.active &&
+ chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (!is_input_present(chip) && chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ if (!is_input_present(chip) && chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+}
+
+static void set_resume_soc_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ set_resume_soc_work);
+ int rc, resume_soc_raw;
+
+ if (is_input_present(chip) && !chip->resume_soc_lowered) {
+ if (!chip->charge_done)
+ goto done;
+ resume_soc_raw = get_monotonic_soc_raw(chip)
+ - (0xFF - settings[FG_MEM_RESUME_SOC].value);
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc lowered to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->charge_done = false;
+ chip->resume_soc_lowered = true;
+ } else if (chip->resume_soc_lowered && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc set to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->resume_soc_lowered = false;
+ }
+done:
+ fg_relax(&chip->resume_soc_wakeup_source);
+}
+
+
+#define OCV_COEFFS_START_REG 0x4C0
+#define OCV_JUNCTION_REG 0x4D8
+#define NOM_CAP_REG 0x4F4
+#define CUTOFF_VOLTAGE_REG 0x40C
+#define RSLOW_CFG_REG 0x538
+#define RSLOW_CFG_OFFSET 2
+#define RSLOW_THRESH_REG 0x52C
+#define RSLOW_THRESH_OFFSET 0
+#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RSLOW_COMP_REG 0x528
+#define RSLOW_COMP_C1_OFFSET 0
+#define RSLOW_COMP_C2_OFFSET 2
+static int populate_system_data(struct fg_chip *chip)
+{
+ u8 buffer[24];
+ int rc, i;
+ int16_t cc_mah;
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
+ if (rc) {
+ pr_err("Failed to read ocv coefficients: %d\n", rc);
+ goto done;
+ }
+ for (i = 0; i < 12; i += 1)
+ chip->ocv_coeffs[i] = half_float(buffer + (i * 2));
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("coeffs1 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[0], chip->ocv_coeffs[1],
+ chip->ocv_coeffs[2], chip->ocv_coeffs[3]);
+ pr_info("coeffs2 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[4], chip->ocv_coeffs[5],
+ chip->ocv_coeffs[6], chip->ocv_coeffs[7]);
+ pr_info("coeffs3 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[8], chip->ocv_coeffs[9],
+ chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
+ }
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
+ chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ if (rc) {
+ pr_err("Failed to read ocv junctions: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto done;
+ }
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
+ }
+ rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read cutoff voltage: %d\n", rc);
+ goto done;
+ }
+ chip->cutoff_voltage = voltage_2b(buffer);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n",
+ chip->cutoff_voltage, chip->nom_cap_uah,
+ chip->ocv_junction_p1p2,
+ chip->ocv_junction_p2p3);
+
+ rc = fg_mem_read(chip, buffer, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_cfg = buffer[0];
+ rc = fg_mem_read(chip, buffer, RSLOW_THRESH_REG, 1,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow thresh: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_thr = buffer[0];
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
+ rc = fg_mem_read(chip, buffer, RSLOW_COMP_REG, 4,
+ RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow comp: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rslow_comp, buffer, 4);
+
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
+#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
+#define RSLOW_THRESH_FULL_VAL 0xFF
+static int fg_rslow_charge_comp_set(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[2];
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
+ rc = fg_mem_write(chip, buffer,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Activated rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+#define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5))
+static int fg_rslow_charge_comp_clear(struct fg_chip *chip)
+{
+ u8 reg;
+ int rc;
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp,
+ RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = false;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Cleared rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+static void rslow_comp_work(struct work_struct *work)
+{
+ int battery_soc_1b;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ rslow_comp_work);
+
+ battery_soc_1b = get_battery_soc_raw(chip) >> 16;
+ if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr
+ && chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->rslow_comp.active)
+ fg_rslow_charge_comp_set(chip);
+ } else {
+ if (chip->rslow_comp.active)
+ fg_rslow_charge_comp_clear(chip);
+ }
+}
+
+#define MICROUNITS_TO_ADC_RAW(units) \
+ div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR)
+static int update_chg_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data,
+ settings[FG_MEM_CHG_TERM_CURRENT].address,
+ 2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0);
+}
+
+#define CC_CV_SETPOINT_REG 0x4F8
+#define CC_CV_SETPOINT_OFFSET 0
+static void update_cc_cv_setpoint(struct fg_chip *chip)
+{
+ int rc;
+ u8 tmp[2];
+
+ if (!chip->cc_cv_threshold_mv)
+ return;
+ batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp);
+ rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2,
+ CC_CV_SETPOINT_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write CC_CV_VOLT rc=%d\n", rc);
+ return;
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Wrote %x %x to address %x for CC_CV setpoint\n",
+ tmp[0], tmp[1], CC_CV_SETPOINT_REG);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define CBITS_RMEAS1_OFFSET 1
+#define CBITS_RMEAS2_OFFSET 2
+#define CBITS_RMEAS1_DEFAULT_VAL 0x65
+#define CBITS_RMEAS2_DEFAULT_VAL 0x65
+#define IMPTR_FAST_TIME_SHIFT 1
+#define IMPTR_LONG_TIME_SHIFT (1 << 4)
+#define IMPTR_PULSE_CTR_CHG 1
+#define IMPTR_PULSE_CTR_DISCHG (1 << 4)
+static int fg_config_imptr_pulse(struct fg_chip *chip, bool slow)
+{
+ int rc;
+ u8 cntr[2] = {0, 0};
+ u8 val;
+
+ if (slow == chip->imptr_pulse_slow_en) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled already\n",
+ slow ? "en" : "dis");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = slow ? (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT) :
+ CBITS_RMEAS1_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas1_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ val = slow ? (IMPTR_PULSE_CTR_CHG | IMPTR_PULSE_CTR_DISCHG) :
+ CBITS_RMEAS2_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas2_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ if (slow) {
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 4,
+ COUNTER_IMPTR_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+ }
+
+ chip->imptr_pulse_slow_en = slow;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n", slow ? "en" : "dis");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define CURRENT_DELTA_MIN_REG 0x42C
+#define CURRENT_DELTA_MIN_OFFSET 1
+#define SYS_CFG_1_REG 0x4AC
+#define SYS_CFG_1_OFFSET 0
+#define CURRENT_DELTA_MIN_DEFAULT 0x16
+#define CURRENT_DELTA_MIN_500MA 0xCD
+#define RSLOW_CFG_USE_FIX_RSER_VAL BIT(7)
+#define ENABLE_ESR_PULSE_VAL BIT(3)
+static int fg_config_esr_extract(struct fg_chip *chip, bool disable)
+{
+ int rc;
+ u8 val;
+
+ if (disable == chip->esr_extract_disabled) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract already %sabled\n",
+ disable ? "dis" : "en");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = disable ? CURRENT_DELTA_MIN_500MA :
+ CURRENT_DELTA_MIN_DEFAULT;
+ rc = fg_mem_write(chip, &val, CURRENT_DELTA_MIN_REG, 1,
+ CURRENT_DELTA_MIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write curr_delta_min rc=%d\n", rc);
+ goto done;
+ }
+
+ val = disable ? RSLOW_CFG_USE_FIX_RSER_VAL : 0;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_USE_FIX_RSER_VAL, val, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg rc= %d\n", rc);
+ goto done;
+ }
+
+ val = disable ? 0 : ENABLE_ESR_PULSE_VAL;
+ rc = fg_mem_masked_write(chip, SYS_CFG_1_REG,
+ ENABLE_ESR_PULSE_VAL, val, SYS_CFG_1_OFFSET);
+ if (rc) {
+ pr_err("unable to write sys_cfg_1 rc= %d\n", rc);
+ goto done;
+ }
+
+ chip->esr_extract_disabled = disable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n", disable ? "dis" : "en");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define ESR_EXTRACT_STOP_SOC 2
+#define IMPTR_PULSE_CONFIG_SOC 5
+static void esr_extract_config_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ esr_extract_config_work);
+ bool input_present = is_input_present(chip);
+ int capacity = get_prop_capacity(chip);
+
+ if (input_present && capacity <= ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, true);
+ } else if (capacity > ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, false);
+
+ if (capacity <= IMPTR_PULSE_CONFIG_SOC)
+ fg_config_imptr_pulse(chip, true);
+ else
+ fg_config_imptr_pulse(chip, false);
+ }
+
+ fg_relax(&chip->esr_extract_wakeup_source);
+}
+
+#define LOW_LATENCY BIT(6)
+#define BATT_PROFILE_OFFSET 0x4C0
+#define PROFILE_INTEGRITY_REG 0x53C
+#define PROFILE_INTEGRITY_BIT BIT(0)
+#define FIRST_EST_DONE_BIT BIT(5)
+#define MAX_TRIES_FIRST_EST 3
+#define FIRST_EST_WAIT_MS 2000
+#define PROFILE_LOAD_TIMEOUT_MS 5000
+static int fg_do_restart(struct fg_chip *chip, bool write_profile)
+{
+ int rc, ibat_ua;
+ u8 reg = 0;
+ u8 buf[2];
+ bool tried_once = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restarting fuel gauge...\n");
+
+try_again:
+ if (write_profile) {
+ if (!chip->charging_disabled) {
+ pr_err("Charging not yet disabled!\n");
+ return -EINVAL;
+ }
+
+ ibat_ua = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ if (ibat_ua == -EINVAL) {
+ pr_err("SRAM not updated yet!\n");
+ return ibat_ua;
+ }
+
+ if (ibat_ua < 0) {
+ pr_warn("Charging enabled?, ibat_ua: %d\n", ibat_ua);
+
+ if (!tried_once) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ msleep(1000);
+ tried_once = true;
+ goto try_again;
+ }
+ }
+ }
+
+ chip->fg_restarting = true;
+ /*
+ * save the temperature if the sw rbias control is active so that there
+ * is no gap of time when there is no valid temperature read after the
+ * restart
+ */
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_read(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to read batt temp rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+ /*
+ * release the sram access and configure the correct settings
+ * before re-requesting access.
+ */
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto unlock_and_fail;
+ }
+
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto unlock_and_fail;
+ }
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ LOW_LATENCY, LOW_LATENCY, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+ /* read once to get a fg cycle in */
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto fail;
+ }
+
+ /*
+ * If this is not the first time a profile has been loaded, sleep for
+ * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory
+ */
+ if (chip->first_profile_loaded)
+ msleep(3000);
+
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ if (write_profile) {
+ /* write the battery profile */
+ rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ chip->batt_profile_len, 0, 1);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ /* write the integrity bits and release access */
+ rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG,
+ PROFILE_INTEGRITY_BIT,
+ PROFILE_INTEGRITY_BIT, 0);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+
+ /* decrement the user count so that memory access can be released */
+ fg_release_access_if_necessary(chip);
+
+ /*
+ * make sure that the first estimate has completed
+ * in case of a hotswap
+ */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+
+ /*
+ * reinitialize the completion so that the driver knows when the restart
+ * finishes
+ */
+ reinit_completion(&chip->first_soc_done);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ /*
+ * set the restart bits so that the next fg cycle will not reload
+ * the profile
+ */
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, reg, 1);
+ if (rc) {
+ pr_err("failed to set fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* wait for the first estimate to complete */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+ rc = fg_read(chip, &reg, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto fail;
+ }
+ if ((reg & FIRST_EST_DONE_BIT) == 0)
+ pr_err("Battery profile reloading failed, no first estimate\n");
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* restore the battery temperature reading here */
+ if (chip->sw_rbias_ctrl) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("reloaded 0x%02x%02x into batt temp",
+ buf[0], buf[1]);
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to write batt temp rc=%d\n", rc);
+ goto fail;
+ }
+ }
+
+ /* Enable charging now as the first estimate is done now */
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ chip->fg_restarting = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("done!\n");
+ return 0;
+
+unlock_and_fail:
+ mutex_unlock(&chip->rw_lock);
+ goto fail;
+sub_and_fail:
+ fg_release_access_if_necessary(chip);
+ goto fail;
+fail:
+ chip->fg_restarting = false;
+ return -EINVAL;
+}
+
+#define FG_PROFILE_LEN 128
+#define PROFILE_COMPARE_LEN 32
+#define THERMAL_COEFF_ADDR 0x444
+#define THERMAL_COEFF_OFFSET 0x2
+#define BATTERY_PSY_WAIT_MS 2000
+static int fg_batt_profile_init(struct fg_chip *chip)
+{
+ int rc = 0, ret, len, batt_id;
+ struct device_node *node = chip->pdev->dev.of_node;
+ struct device_node *batt_node, *profile_node;
+ const char *data, *batt_type_str;
+ bool tried_again = false, vbat_in_range, profiles_same;
+ u8 reg = 0;
+
+wait:
+ fg_stay_awake(&chip->profile_wakeup_source);
+ ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("profile loading timed out rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!batt_node) {
+ pr_warn("No available batterydata, using OTP defaults\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ batt_id /= 1000;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery id = %dKOhms\n", batt_id);
+
+ profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
+ fg_batt_type);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ goto no_profile;
+ }
+
+ /* read rslow compensation values if they're available */
+ rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow",
+ &chip->rslow_comp.chg_rs_to_rslow);
+ if (rc) {
+ chip->rslow_comp.chg_rs_to_rslow = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rs to rslow: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1",
+ &chip->rslow_comp.chg_rslow_comp_c1);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c1: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2",
+ &chip->rslow_comp.chg_rslow_comp_c2);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c2: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr",
+ &chip->rslow_comp.chg_rslow_comp_thr);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_thr = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp thr: %d\n", rc);
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &chip->batt_max_voltage_uv);
+
+ if (rc)
+ pr_warn("couldn't find battery max voltage\n");
+
+ /*
+ * Only configure from profile if fg-cc-cv-threshold-mv is not
+ * defined in the charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fg-cc-cv-threshold-mv", NULL)) {
+ of_property_read_u32(profile_node,
+ "qcom,fg-cc-cv-threshold-mv",
+ &chip->cc_cv_threshold_mv);
+ }
+
+ data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+ if (!data) {
+ pr_err("no battery profile loaded\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (len != FG_PROFILE_LEN) {
+ pr_err("battery profile incorrect size: %d\n", len);
+ rc = -EINVAL;
+ goto no_profile;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc) {
+ pr_err("Could not find battery data type: %d\n", rc);
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (!chip->batt_profile)
+ chip->batt_profile = devm_kzalloc(chip->dev,
+ sizeof(char) * len, GFP_KERNEL);
+
+ if (!chip->batt_profile) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ len, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ /* Check whether the charger is ready */
+ if (!is_charger_available(chip))
+ goto reschedule;
+
+ /* Disable charging for a FG cycle before calculating vbat_in_range */
+ if (!chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, false);
+ if (rc)
+ pr_err("Failed to disable charging, rc=%d\n", rc);
+
+ goto reschedule;
+ }
+
+ vbat_in_range = get_vbat_est_diff(chip)
+ < settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
+ profiles_same = memcmp(chip->batt_profile, data,
+ PROFILE_COMPARE_LEN) == 0;
+ if (reg & PROFILE_INTEGRITY_BIT) {
+ fg_cap_learning_load_data(chip);
+ if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery profiles same, using default\n");
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+ goto done;
+ }
+ } else {
+ pr_info("Battery profile not same, clearing data\n");
+ clear_cycle_counter(chip);
+ chip->learning_data.learned_cc_uah = 0;
+ }
+
+ if (fg_est_dump)
+ dump_sram(&chip->dump_sram);
+
+ if ((fg_debug_mask & FG_STATUS) && !vbat_in_range)
+ pr_info("Vbat out of range: v_current_pred: %d, v:%d\n",
+ fg_data[FG_DATA_CPRED_VOLTAGE].value,
+ fg_data[FG_DATA_VOLTAGE].value);
+
+ if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip))
+ pr_info("battery empty\n");
+
+ if ((fg_debug_mask & FG_STATUS) && !profiles_same)
+ pr_info("profiles differ\n");
+
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("Using new profile\n");
+ print_hex_dump(KERN_INFO, "FG: loaded profile: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ chip->batt_profile, len, false);
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ memcpy(chip->batt_profile, data, len);
+
+ chip->batt_profile_len = len;
+
+ if (fg_debug_mask & FG_STATUS)
+ print_hex_dump(KERN_INFO, "FG: new profile: ",
+ DUMP_PREFIX_NONE, 16, 1, chip->batt_profile,
+ chip->batt_profile_len, false);
+
+ rc = fg_do_restart(chip, true);
+ if (rc) {
+ pr_err("restart failed: %d\n", rc);
+ goto no_profile;
+ }
+
+ /*
+ * Only configure from profile if thermal-coefficients is not
+ * defined in the FG device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", NULL)) {
+ data = of_get_property(profile_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ rc = fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("spmi write failed addr:%03x, ret:%d\n",
+ THERMAL_COEFF_ADDR, rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery thermal coefficients changed\n");
+ }
+ }
+
+done:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (fg_batt_type)
+ chip->batt_type = fg_batt_type;
+ else
+ chip->batt_type = batt_type_str;
+ chip->first_profile_loaded = true;
+ chip->profile_loaded = true;
+ chip->battery_missing = is_battery_missing(chip);
+ update_chg_iterm(chip);
+ update_cc_cv_setpoint(chip);
+ rc = populate_system_data(chip);
+ if (rc) {
+ pr_err("failed to read ocv properties=%d\n", rc);
+ return rc;
+ }
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+ schedule_work(&chip->status_change_work);
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
+ fg_data[FG_DATA_VOLTAGE].value);
+ return rc;
+no_profile:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ return rc;
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ fg_relax(&chip->profile_wakeup_source);
+ return 0;
+}
+
+static void check_empty_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_empty_work.work);
+
+ if (fg_is_batt_empty(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("EMPTY SOC high\n");
+ chip->soc_empty = true;
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ }
+ fg_relax(&chip->empty_check_wakeup_source);
+}
+
+static void batt_profile_init(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ batt_profile_init.work);
+
+ if (fg_batt_profile_init(chip))
+ pr_err("failed to initialize profile\n");
+}
+
+static void sysfs_restart_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ sysfs_restart_work);
+ int rc;
+
+ rc = fg_do_restart(chip, false);
+ if (rc)
+ pr_err("fg restart failed: %d\n", rc);
+ mutex_lock(&chip->sysfs_restart_lock);
+ fg_restart = 0;
+ mutex_unlock(&chip->sysfs_restart_lock);
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+#define SRAM_RELEASE_TIMEOUT_MS 500
+static void charge_full_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ charge_full_work);
+ int rc;
+ u8 buffer[3];
+ int bsoc;
+ int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ bool disable = false;
+ u8 reg;
+
+ if (chip->status != POWER_SUPPLY_STATUS_FULL) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery not full: %d\n", chip->status);
+ disable = true;
+ }
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ goto out;
+ }
+ if (buffer[2] <= resume_soc_raw) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("bsoc = 0x%02x <= resume = 0x%02x\n",
+ buffer[2], resume_soc_raw);
+ disable = true;
+ }
+ if (!disable)
+ goto out;
+
+ rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ /* force a full soc value into the monotonic in order to display 100 */
+ buffer[0] = 0xFF;
+ buffer[1] = 0xFF;
+ rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16;
+ pr_info("wrote %06x into soc full\n", bsoc);
+ }
+ fg_mem_release(chip);
+ /*
+ * wait one cycle to make sure the soc is updated before clearing
+ * the soc mask bit
+ */
+ fg_mem_lock(chip);
+ fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+out:
+ fg_mem_release(chip);
+ if (disable)
+ chip->charge_full = false;
+}
+
+static void update_bcl_thresholds(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 mh_offset = 0, lm_offset = 0;
+ u16 address = 0;
+ int ret = 0;
+
+ address = settings[FG_MEM_BCL_MH_THRESHOLD].address;
+ mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ ret = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (ret)
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret);
+ else
+ pr_debug("Old BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]);
+
+ ret = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (ret)
+ pr_err("spmi write failed. addr:%03x, ret:%d\n",
+ address, ret);
+ else
+ pr_debug("New BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+}
+
+static int disable_bcl_lpm(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 lm_offset = 0;
+ u16 address = 0;
+ int rc = 0;
+
+ address = settings[FG_MEM_BCL_LM_THRESHOLD].address;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ rc = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (rc) {
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]);
+
+ /* Put BCL always above LPM */
+ BCL_MA_TO_ADC(0, data[lm_offset]);
+
+ rc = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (rc)
+ pr_err("spmi write failed. addr:%03x, rc:%d\n",
+ address, rc);
+ else
+ pr_debug("New BCL LM threshold:%x\n", data[lm_offset]);
+
+ return rc;
+}
+
+static void bcl_hi_power_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ bcl_hi_power_work);
+ int rc;
+
+ if (chip->bcl_lpm_disabled) {
+ rc = disable_bcl_lpm(chip);
+ if (rc)
+ pr_err("failed to disable bcl low mode %d\n",
+ rc);
+ } else {
+ update_bcl_thresholds(chip);
+ }
+}
+
+#define VOLT_UV_TO_VOLTCMP8(volt_uv) \
+ ((volt_uv - 2500000) / 9766)
+static int update_irq_volt_empty(struct fg_chip *chip)
+{
+ u8 data;
+ int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
+ data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data);
+ return fg_mem_write(chip, &data,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0);
+}
+
+static int update_cutoff_voltage(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_voltage_raw;
+ s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value;
+
+ converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000);
+ data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_voltage_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n",
+ voltage_mv, converted_voltage_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address,
+ 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0);
+}
+
+static int update_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address,
+ 2, settings[FG_MEM_TERM_CURRENT].offset, 0);
+}
+
+#define OF_READ_SETTING(type, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &settings[type].value); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &store); \
+ \
+ if (retval == -EINVAL) { \
+ retval = 0; \
+ store = default_val; \
+ } else if (retval) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+#define DEFAULT_EVALUATION_CURRENT_MA 1000
+static int fg_of_init(struct fg_chip *chip)
+{
+ int rc = 0, sense_type, len = 0;
+ const char *data;
+ struct device_node *node = chip->pdev->dev.of_node;
+ u32 temp[2] = {0};
+
+ OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1);
+
+ if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) {
+ int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0;
+
+ rc = of_property_read_u32_array(node,
+ "qcom,cold-hot-jeita-hysteresis", temp, 2);
+ if (rc) {
+ pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->jeita_hysteresis_support = true;
+ chip->cold_hysteresis = temp[0];
+ chip->hot_hysteresis = temp[1];
+ hard_hot = settings[FG_MEM_HARD_HOT].value;
+ soft_hot = settings[FG_MEM_SOFT_HOT].value;
+ hard_cold = settings[FG_MEM_HARD_COLD].value;
+ soft_cold = settings[FG_MEM_SOFT_COLD].value;
+ if (((hard_hot - chip->hot_hysteresis) < soft_hot) ||
+ ((hard_cold + chip->cold_hysteresis) > soft_cold)) {
+ chip->jeita_hysteresis_support = false;
+ pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n",
+ chip->hot_hysteresis, chip->cold_hysteresis);
+ } else {
+ pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n",
+ chip->cold_hysteresis, chip->hot_hysteresis);
+ }
+ }
+
+ OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1);
+ data = of_get_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ chip->use_thermal_coefficients = true;
+ }
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1);
+ settings[FG_MEM_RESUME_SOC].value =
+ DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value
+ * FULL_SOC_RAW, FULL_CAPACITY);
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1);
+ OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "vbat-estimate-diff-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1);
+ OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1);
+ OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1);
+ OF_READ_PROPERTY(chip->learning_data.max_increment,
+ "cl-max-increment-deciperc", rc, 5);
+ OF_READ_PROPERTY(chip->learning_data.max_decrement,
+ "cl-max-decrement-deciperc", rc, 100);
+ OF_READ_PROPERTY(chip->learning_data.max_temp,
+ "cl-max-temp-decidegc", rc, 450);
+ OF_READ_PROPERTY(chip->learning_data.min_temp,
+ "cl-min-temp-decidegc", rc, 150);
+ OF_READ_PROPERTY(chip->learning_data.max_start_soc,
+ "cl-max-start-capacity", rc, 15);
+ OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
+ "cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->evaluation_current,
+ "aging-eval-current-ma", rc,
+ DEFAULT_EVALUATION_CURRENT_MA);
+ OF_READ_PROPERTY(chip->cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", rc, 0);
+ if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-on"))
+ chip->batt_aging_mode = FG_AGING_CC;
+ else if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-estimation-on"))
+ chip->batt_aging_mode = FG_AGING_ESR;
+ else
+ chip->batt_aging_mode = FG_AGING_NONE;
+ if (chip->batt_aging_mode == FG_AGING_CC) {
+ chip->learning_data.feedback_on
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-feedback");
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery aging mode: %d\n", chip->batt_aging_mode);
+
+ /* Get the use-otp-profile property */
+ chip->use_otp_profile = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,use-otp-profile");
+ chip->hold_soc_while_full
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,hold-soc-while-full");
+
+ sense_type = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,ext-sense-type");
+ if (rc == 0) {
+ if (fg_sense_type < 0)
+ fg_sense_type = sense_type;
+
+ if (fg_debug_mask & FG_STATUS) {
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ pr_info("Using internal sense\n");
+ else if (fg_sense_type == EXTERNAL_CURRENT_SENSE)
+ pr_info("Using external sense\n");
+ else
+ pr_info("Using default sense\n");
+ }
+ } else {
+ rc = 0;
+ }
+
+ chip->bad_batt_detection_en = of_property_read_bool(node,
+ "qcom,bad-battery-detection-enable");
+
+ chip->sw_rbias_ctrl = of_property_read_bool(node,
+ "qcom,sw-rbias-control");
+
+ chip->cyc_ctr.en = of_property_read_bool(node,
+ "qcom,cycle-counter-en");
+ if (chip->cyc_ctr.en)
+ chip->cyc_ctr.id = 1;
+
+ chip->esr_pulse_tune_en = of_property_read_bool(node,
+ "qcom,esr-pulse-tuning-en");
+
+ return rc;
+}
+
+static int fg_init_irqs(struct fg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ struct platform_device *pdev = chip->pdev;
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ return rc;
+ }
+
+ if ((base == chip->vbat_adc_addr) ||
+ (base == chip->ibat_adc_addr) ||
+ (base == chip->tp_rev_addr))
+ continue;
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_irq[FULL_SOC].irq = of_irq_get_byname(child,
+ "full-soc");
+ if (chip->soc_irq[FULL_SOC].irq < 0) {
+ pr_err("Unable to get full-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[EMPTY_SOC].irq = of_irq_get_byname(child,
+ "empty-soc");
+ if (chip->soc_irq[EMPTY_SOC].irq < 0) {
+ pr_err("Unable to get low-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[DELTA_SOC].irq = of_irq_get_byname(child,
+ "delta-soc");
+ if (chip->soc_irq[DELTA_SOC].irq < 0) {
+ pr_err("Unable to get delta-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[FIRST_EST_DONE].irq
+ = of_irq_get_byname(child, "first-est-done");
+ if (chip->soc_irq[FIRST_EST_DONE].irq < 0) {
+ pr_err("Unable to get first-est-done irq\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FULL_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "full-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d full-soc: %d\n",
+ chip->soc_irq[FULL_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[DELTA_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "delta-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[DELTA_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FIRST_EST_DONE].irq,
+ fg_first_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "first-est-done", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[FIRST_EST_DONE].irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ break;
+ case FG_MEMIF:
+ chip->mem_irq[FG_MEM_AVAIL].irq
+ = of_irq_get_byname(child, "mem-avail");
+ if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) {
+ pr_err("Unable to get mem-avail irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->mem_irq[FG_MEM_AVAIL].irq,
+ fg_mem_avail_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "mem-avail", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d mem-avail: %d\n",
+ chip->mem_irq[FG_MEM_AVAIL].irq, rc);
+ return rc;
+ }
+ break;
+ case FG_BATT:
+ chip->batt_irq[BATT_MISSING].irq
+ = of_irq_get_byname(child, "batt-missing");
+ if (chip->batt_irq[BATT_MISSING].irq < 0) {
+ pr_err("Unable to get batt-missing irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[BATT_MISSING].irq,
+ NULL,
+ fg_batt_missing_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "batt-missing", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-missing: %d\n",
+ chip->batt_irq[BATT_MISSING].irq, rc);
+ return rc;
+ }
+ chip->batt_irq[VBATT_LOW].irq
+ = of_irq_get_byname(child, "vbatt-low");
+ if (chip->batt_irq[VBATT_LOW].irq < 0) {
+ pr_err("Unable to get vbatt-low irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->batt_irq[VBATT_LOW].irq,
+ fg_vbatt_low_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "vbatt-low", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d vbatt-low: %d\n",
+ chip->batt_irq[VBATT_LOW].irq, rc);
+ return rc;
+ }
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ break;
+ case FG_ADC:
+ break;
+ default:
+ pr_err("subtype %d\n", subtype);
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+}
+
+static int fg_remove(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ fg_cleanup(chip);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return 0;
+}
+
+static int fg_memif_data_open(struct inode *inode, struct file *file)
+{
+ struct fg_log_buffer *log;
+ struct fg_trans *trans;
+ u8 *data_buf;
+
+ size_t logbufsize = SZ_4K;
+ size_t databufsize = SZ_4K;
+
+ if (!dbgfs_data.chip) {
+ pr_err("Not initialized data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ /* Allocate data buffer */
+ data_buf = kzalloc(databufsize, GFP_KERNEL);
+
+ if (!data_buf) {
+ kfree(trans);
+ kfree(log);
+ pr_err("Unable to allocate memory for data buffer\n");
+ return -ENOMEM;
+ }
+
+ trans->log = log;
+ trans->data = data_buf;
+ trans->cnt = dbgfs_data.cnt;
+ trans->addr = dbgfs_data.addr;
+ trans->chip = dbgfs_data.chip;
+ trans->offset = trans->addr;
+ mutex_init(&trans->memif_dfs_lock);
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int fg_memif_dfs_close(struct inode *inode, struct file *file)
+{
+ struct fg_trans *trans = file->private_data;
+
+ if (trans && trans->log && trans->data) {
+ file->private_data = NULL;
+ mutex_destroy(&trans->memif_dfs_lock);
+ kfree(trans->log);
+ kfree(trans->data);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM. When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int
+write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct fg_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (12-bit address) */
+ cnt = print_to_log(log, "%3.3X ", offset & 0xfff);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+ int cnt, rc;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct fg_log_buffer *log = trans->log;
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (item_cnt > SZ_4K) {
+ pr_err("Reading too many bytes\n");
+ return -EINVAL;
+ }
+
+ rc = fg_mem_read(trans->chip, trans->data,
+ trans->addr, trans->cnt, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return rc;
+ }
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_next_line_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fg_trans *trans = file->private_data;
+ struct fg_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy sram register values to user\n");
+ len = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return len;
+}
+
+/**
+ * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+ char *kbuf;
+ u32 offset;
+
+ struct fg_trans *trans = file->private_data;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ offset = trans->offset;
+
+ /* Make a copy of the user data */
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while ((pos < count) &&
+ sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ /*
+ * We shouldn't be receiving a string of characters that
+ * exceeds a size of 5 to keep this functionally correct.
+ * Also, we should make sure that pos never gets overflowed
+ * beyond the limit.
+ */
+ if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+ cnt = 0;
+ ret = -EINVAL;
+ break;
+ }
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ pr_info("address %x, count %d\n", offset, cnt);
+ /* Perform the write(s) */
+
+ ret = fg_mem_write(trans->chip, values, offset,
+ cnt, 0, 0);
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt > 4 ? 4 : cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return ret;
+}
+
+static const struct file_operations fg_memif_dfs_reg_fops = {
+ .open = fg_memif_data_open,
+ .release = fg_memif_dfs_close,
+ .read = fg_memif_dfs_reg_read,
+ .write = fg_memif_dfs_reg_write,
+};
+
+/**
+ * fg_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *fg_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating FG_MEM debugfs file-system\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * fg_dfs_get_root: return a pointer to FG debugfs root directory.
+ * @return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SRAM transaction, namely: address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *fg_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = fg_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * fg_dfs_create: adds new fg_mem if debugfs entry
+ * @return zero on success
+ */
+int fg_dfs_create(struct fg_chip *chip)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = fg_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ dbgfs_data.chip = chip;
+
+ file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt));
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE,
+ root, &(dbgfs_data.addr));
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data,
+ &fg_memif_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+}
+
+#define EXTERNAL_SENSE_OFFSET_REG 0x41C
+#define EXT_OFFSET_TRIM_REG 0xF8
+#define SEC_ACCESS_REG 0xD0
+#define SEC_ACCESS_UNLOCK 0xA5
+#define BCL_TRIM_REV_FIXED 12
+static int bcl_trim_workaround(struct fg_chip *chip)
+{
+ u8 reg, rc;
+
+ if (chip->tp_rev_addr == 0)
+ return 0;
+
+ rc = fg_read(chip, &reg, chip->tp_rev_addr, 1);
+ if (rc) {
+ pr_err("Failed to read tp reg, rc = %d\n", rc);
+ return rc;
+ }
+ if (reg >= BCL_TRIM_REV_FIXED) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("workaround not applied, tp_rev = %d\n", reg);
+ return 0;
+ }
+
+ rc = fg_mem_read(chip, &reg, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0);
+ if (rc) {
+ pr_err("Failed to read ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+ rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG,
+ SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1);
+
+ rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG,
+ 0xFF, reg, 1);
+ if (rc) {
+ pr_err("Failed to write ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define KI_COEFF_PRED_FULL_ADDR 0x408
+#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
+#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
+#define TEMP_FRAC_SHIFT_REG 0x4A4
+#define FG_ADC_CONFIG_REG 0x4B8
+#define FG_BCL_CONFIG_OFFSET 0x3
+#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+static int fg_common_hw_init(struct fg_chip *chip)
+{
+ int rc;
+ int resume_soc_raw;
+ u8 val;
+
+ update_iterm(chip);
+ update_cutoff_voltage(chip);
+ update_irq_volt_empty(chip);
+ update_bcl_thresholds(chip);
+
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ return rc;
+ }
+ } else {
+ pr_info("FG auto recharge threshold not specified in DT\n");
+ }
+
+ if (fg_sense_type >= 0) {
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ if (rc) {
+ pr_err("failed to config sense type %d rc=%d\n",
+ fg_sense_type, rc);
+ return rc;
+ }
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF,
+ soc_to_setpoint(settings[FG_MEM_DELTA_SOC].value),
+ settings[FG_MEM_DELTA_SOC].offset);
+ if (rc) {
+ pr_err("failed to write delta soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
+ batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
+ settings[FG_MEM_BATT_LOW].offset);
+ if (rc) {
+ pr_err("failed to write Vbatt_low rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address,
+ THERM_DELAY_MASK,
+ therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value),
+ settings[FG_MEM_THERM_DELAY].offset);
+ if (rc) {
+ pr_err("failed to write therm_delay rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->use_thermal_coefficients) {
+ fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ }
+
+ if (!chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ TEMP_SENSE_ALWAYS_BIT,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ rc = fg_mem_read(chip, &val, SYS_CFG_1_REG, 1, SYS_CFG_1_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read sys_cfg_1: %d\n", rc);
+ return rc;
+ }
+
+ if (!(val & ENABLE_ESR_PULSE_VAL))
+ chip->esr_extract_disabled = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n",
+ chip->esr_extract_disabled ? "dis" : "en");
+
+ rc = fg_mem_read(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read cbits_input_filter_reg: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (val & (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT))
+ chip->imptr_pulse_slow_en = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n",
+ chip->imptr_pulse_slow_en ? "en" : "dis");
+
+ rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ return rc;
+ }
+
+ if (val & RSLOW_CFG_ON_VAL)
+ chip->rslow_comp.active = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rslow_comp active is %sabled\n",
+ chip->rslow_comp.active ? "en" : "dis");
+ }
+
+ return 0;
+}
+
+static int fg_8994_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 data[4];
+ u64 esr_value;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ PATCH_NEG_CURRENT_BIT,
+ PATCH_NEG_CURRENT_BIT,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write patch current bit rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bcl_trim_workaround(chip);
+ if (rc) {
+ pr_err("failed to redo bcl trim rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0);
+
+ data[0] = 0xA2;
+ data[1] = 0x12;
+
+ rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0);
+ if (rc) {
+ pr_err("failed to write temp ocv constants rc=%d\n", rc);
+ return rc;
+ }
+
+ data[0] = KI_COEFF_PRED_FULL_4_0_LSB;
+ data[1] = KI_COEFF_PRED_FULL_4_0_MSB;
+ fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0);
+
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ pr_debug("set default value to esr filter\n");
+
+ return 0;
+}
+
+#define FG_USBID_CONFIG_OFFSET 0x2
+#define DISABLE_USBID_DETECT_BIT BIT(0)
+static int fg_8996_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable usbid conversions for PMi8996 V1.0 */
+ if (chip->pmic_revision[REVID_DIG_MAJOR] == 1
+ && chip->pmic_revision[REVID_ANA_MAJOR] == 0) {
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ DISABLE_USBID_DETECT_BIT,
+ 0, FG_USBID_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to enable usbid conversions: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int fg_8950_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc)
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+
+ return rc;
+}
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+
+ rc = fg_common_hw_init(chip);
+ if (rc) {
+ pr_err("Unable to initialize FG HW rc=%d\n", rc);
+ return rc;
+ }
+
+ /* add PMIC specific hw init */
+ switch (chip->pmic_subtype) {
+ case PMI8994:
+ rc = fg_8994_hw_init(chip);
+ chip->wa_flag |= PULSE_REQUEST_WA;
+ break;
+ case PMI8996:
+ rc = fg_8996_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ case PMI8950:
+ case PMI8937:
+ rc = fg_8950_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ }
+ if (rc)
+ pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc);
+
+ pr_debug("wa_flag=0x%x\n", chip->wa_flag);
+
+ return rc;
+}
+
+#define DIG_MINOR 0x0
+#define DIG_MAJOR 0x1
+#define ANA_MINOR 0x2
+#define ANA_MAJOR 0x3
+#define IACS_INTR_SRC_SLCT BIT(3)
+static int fg_setup_memif_offset(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
+ if (rc) {
+ pr_err("Unable to read FG revision rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (chip->revision[DIG_MAJOR]) {
+ case DIG_REV_1:
+ case DIG_REV_2:
+ chip->offset = offset[0].address;
+ break;
+ case DIG_REV_3:
+ chip->offset = offset[1].address;
+ chip->ima_supported = true;
+ break;
+ default:
+ pr_err("Digital Major rev=%d not supported\n",
+ chip->revision[DIG_MAJOR]);
+ return -EINVAL;
+ }
+
+ if (chip->ima_supported) {
+ /*
+ * Change the FG_MEM_INT interrupt to track IACS_READY
+ * condition instead of end-of-transaction. This makes sure
+ * that the next transaction starts only after the hw is ready.
+ */
+ rc = fg_masked_write(chip,
+ chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT,
+ IACS_INTR_SRC_SLCT, 1);
+ if (rc) {
+ pr_err("failed to configure interrupt source %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int fg_detect_pmic_type(struct fg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ pr_err("Unable to get pmic_revid rc=%ld\n",
+ PTR_ERR(pmic_rev_id));
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ case PMI8950:
+ case PMI8937:
+ case PMI8996:
+ chip->pmic_subtype = pmic_rev_id->pmic_subtype;
+ chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
+ chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
+ chip->pmic_revision[REVID_ANA_MAJOR] = pmic_rev_id->rev3;
+ chip->pmic_revision[REVID_DIG_MAJOR] = pmic_rev_id->rev4;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INIT_JEITA_DELAY_MS 1000
+
+static void delayed_init_work(struct work_struct *work)
+{
+ u8 reg[2];
+ int rc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ init_work);
+
+ /* hold memory access until initialization finishes */
+ fg_mem_lock(chip);
+
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("failed to hw init rc = %d\n", rc);
+ fg_mem_release(chip);
+ fg_cleanup(chip);
+ return;
+ }
+ /* release memory access before update_sram_data is called */
+ fg_mem_release(chip);
+
+ schedule_delayed_work(
+ &chip->update_jeita_setting,
+ msecs_to_jiffies(INIT_JEITA_DELAY_MS));
+
+ if (chip->last_sram_update_time == 0)
+ update_sram_data_work(&chip->update_sram_data.work);
+
+ if (chip->last_temp_update_time == 0)
+ update_temp_data(&chip->update_temp_work.work);
+
+ if (!chip->use_otp_profile)
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ goto done;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ goto done;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ }
+
+ pr_debug("FG: HW_init success\n");
+
+ return;
+done:
+ fg_cleanup(chip);
+}
+
+static int fg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &(pdev->dev);
+ struct fg_chip *chip;
+ struct device_node *child;
+ unsigned int base;
+ u8 subtype, reg;
+ int rc = 0;
+ struct power_supply_config bms_psy_cfg;
+
+ if (!pdev) {
+ pr_err("no valid spmi pointer\n");
+ return -ENODEV;
+ }
+
+ if (!pdev->dev.of_node) {
+ pr_err("device node missing\n");
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL);
+ if (chip == NULL) {
+ pr_err("Can't allocate fg_chip\n");
+ return -ENOMEM;
+ }
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->pdev = pdev;
+ chip->dev = &(pdev->dev);
+
+ wakeup_source_init(&chip->empty_check_wakeup_source.source,
+ "qpnp_fg_empty_check");
+ wakeup_source_init(&chip->memif_wakeup_source.source,
+ "qpnp_fg_memaccess");
+ wakeup_source_init(&chip->profile_wakeup_source.source,
+ "qpnp_fg_profile");
+ wakeup_source_init(&chip->update_temp_wakeup_source.source,
+ "qpnp_fg_update_temp");
+ wakeup_source_init(&chip->update_sram_wakeup_source.source,
+ "qpnp_fg_update_sram");
+ wakeup_source_init(&chip->resume_soc_wakeup_source.source,
+ "qpnp_fg_set_resume_soc");
+ wakeup_source_init(&chip->gain_comp_wakeup_source.source,
+ "qpnp_fg_gain_comp");
+ wakeup_source_init(&chip->capacity_learning_wakeup_source.source,
+ "qpnp_fg_cap_learning");
+ wakeup_source_init(&chip->esr_extract_wakeup_source.source,
+ "qpnp_fg_esr_extract");
+ mutex_init(&chip->rw_lock);
+ mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->learning_data.learning_lock);
+ mutex_init(&chip->rslow_comp.lock);
+ mutex_init(&chip->sysfs_restart_lock);
+ INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
+ INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
+ INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
+ INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
+ INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
+ INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
+ INIT_WORK(&chip->dump_sram, dump_sram);
+ INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->cycle_count_work, update_cycle_count);
+ INIT_WORK(&chip->battery_age_work, battery_age_work);
+ INIT_WORK(&chip->update_esr_work, update_esr_value);
+ INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work);
+ INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work);
+ INIT_WORK(&chip->init_work, delayed_init_work);
+ INIT_WORK(&chip->charge_full_work, charge_full_work);
+ INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
+ INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
+ INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+ alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
+ fg_cap_learning_alarm_cb);
+ init_completion(&chip->sram_access_granted);
+ init_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_revoked);
+ init_completion(&chip->batt_id_avail);
+ init_completion(&chip->first_soc_done);
+ dev_set_drvdata(&pdev->dev, chip);
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ rc = -ENXIO;
+ goto of_init_fail;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ goto of_init_fail;
+ }
+
+ if (strcmp("qcom,fg-adc-vbat", child->name) == 0) {
+ chip->vbat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,fg-adc-ibat", child->name) == 0) {
+ chip->ibat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,revid-tp-rev", child->name) == 0) {
+ chip->tp_rev_addr = base;
+ continue;
+ }
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_base = base;
+ break;
+ case FG_MEMIF:
+ chip->mem_base = base;
+ break;
+ case FG_BATT:
+ chip->batt_base = base;
+ break;
+ default:
+ pr_err("Invalid peripheral subtype=0x%x\n", subtype);
+ rc = -EINVAL;
+ }
+ }
+
+ rc = fg_detect_pmic_type(chip);
+ if (rc) {
+ pr_err("Unable to detect PMIC type rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_setup_memif_offset(chip);
+ if (rc) {
+ pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_of_init(chip);
+ if (rc) {
+ pr_err("failed to parse devicetree rc%d\n", rc);
+ goto of_init_fail;
+ }
+
+ if (chip->jeita_hysteresis_support) {
+ rc = fg_init_batt_temp_state(chip);
+ if (rc) {
+ pr_err("failed to get battery status rc%d\n", rc);
+ goto of_init_fail;
+ }
+ }
+
+ /* check if the first estimate is already finished at this time */
+ if (is_first_est_done(chip))
+ complete_all(&chip->first_soc_done);
+
+ reg = 0xFF;
+ rc = fg_write(chip, &reg, INT_EN_CLR(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to clear interrupts %d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_init_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto cancel_work;
+ }
+
+ chip->batt_type = default_batt_type;
+
+ chip->bms_psy_d.name = "bms";
+ chip->bms_psy_d.type = POWER_SUPPLY_TYPE_BMS;
+ chip->bms_psy_d.properties = fg_power_props;
+ chip->bms_psy_d.num_properties = ARRAY_SIZE(fg_power_props);
+ chip->bms_psy_d.get_property = fg_power_get_property;
+ chip->bms_psy_d.set_property = fg_power_set_property;
+ chip->bms_psy_d.external_power_changed = fg_external_power_changed;
+ chip->bms_psy_d.property_is_writeable = fg_property_is_writeable;
+
+ bms_psy_cfg.drv_data = chip;
+ bms_psy_cfg.supplied_to = fg_supplicants;
+ bms_psy_cfg.num_supplicants = ARRAY_SIZE(fg_supplicants);
+ bms_psy_cfg.of_node = NULL;
+ chip->bms_psy = devm_power_supply_register(chip->dev,
+ &chip->bms_psy_d,
+ &bms_psy_cfg);
+ if (IS_ERR(chip->bms_psy)) {
+ pr_err("batt failed to register rc = %ld\n",
+ PTR_ERR(chip->bms_psy));
+ goto of_init_fail;
+ }
+ chip->power_supply_registered = true;
+ /*
+ * Just initialize the batt_psy_name here. Power supply
+ * will be obtained later.
+ */
+ chip->batt_psy_name = "battery";
+
+ if (chip->mem_base) {
+ rc = fg_dfs_create(chip);
+ if (rc < 0) {
+ pr_err("failed to create debugfs rc = %d\n", rc);
+ goto cancel_work;
+ }
+ }
+
+ schedule_work(&chip->init_work);
+
+ pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ chip->pmic_subtype);
+
+ return rc;
+
+cancel_work:
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+of_init_fail:
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ return rc;
+}
+
+static void check_and_update_sram_data(struct fg_chip *chip)
+{
+ unsigned long current_time = 0, next_update_time, time_left;
+
+ get_current_time(&current_time);
+
+ next_update_time = chip->last_temp_update_time
+ + (TEMP_PERIOD_UPDATE_MS / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_temp_work, msecs_to_jiffies(time_left * 1000));
+
+ next_update_time = chip->last_sram_update_time
+ + (fg_sram_update_period_ms / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_sram_data, msecs_to_jiffies(time_left * 1000));
+}
+
+static int fg_suspend(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ cancel_delayed_work(&chip->update_temp_work);
+ cancel_delayed_work(&chip->update_sram_data);
+
+ return 0;
+}
+
+static int fg_resume(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ check_and_update_sram_data(chip);
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_fg_pm_ops = {
+ .suspend = fg_suspend,
+ .resume = fg_resume,
+};
+
+static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_fg_sense_type = fg_sense_type;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_sense_type: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_sense_type != 0 && fg_sense_type != 1) {
+ pr_err("Bad value %d\n", fg_sense_type);
+ fg_sense_type = old_fg_sense_type;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_sense_type set to %d\n", fg_sense_type);
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ return rc;
+}
+
+static struct kernel_param_ops fg_sense_type_ops = {
+ .set = fg_sense_type_set,
+ .get = param_get_int,
+};
+
+module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644);
+
+static int fg_restart_set(const char *val, const struct kernel_param *kp)
+{
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+ chip = power_supply_get_drvdata(bms_psy);
+
+ mutex_lock(&chip->sysfs_restart_lock);
+ if (fg_restart != 0) {
+ mutex_unlock(&chip->sysfs_restart_lock);
+ return 0;
+ }
+ fg_restart = 1;
+ mutex_unlock(&chip->sysfs_restart_lock);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge restart initiated from sysfs...\n");
+
+ schedule_work(&chip->sysfs_restart_work);
+ return 0;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_set,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+static struct platform_driver fg_driver = {
+ .driver = {
+ .name = QPNP_FG_DEV_NAME,
+ .of_match_table = fg_match_table,
+ .pm = &qpnp_fg_pm_ops,
+ },
+ .probe = fg_probe,
+ .remove = fg_remove,
+};
+
+static int __init fg_init(void)
+{
+ return platform_driver_register(&fg_driver);
+}
+
+static void __exit fg_exit(void)
+{
+ return platform_driver_unregister(&fg_driver);
+}
+
+module_init(fg_init);
+module_exit(fg_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel Gauge Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index ea205100644d..4beaddff47b3 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -188,6 +188,11 @@ static int __weak_chg_icl_ua = 500000;
module_param_named(
weak_chg_icl_ua, __weak_chg_icl_ua, int, S_IRUSR | S_IWUSR);
+static int __try_sink_enabled = 1;
+module_param_named(
+ try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
@@ -1658,6 +1663,18 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /*
+ * allow DRP.DFP time to exceed by tPDdebounce time.
+ */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_DRP_DFP_TIME_CFG_BIT,
+ TYPEC_DRP_DFP_TIME_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DRP.DFP time rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* configure float charger options */
switch (chip->dt.float_option) {
case 1:
@@ -2236,6 +2253,7 @@ static int smb2_probe(struct platform_device *pdev)
chg->dev = &pdev->dev;
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
+ chg->try_sink_enabled = &__try_sink_enabled;
chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
chg->mode = PARALLEL_MASTER;
chg->irq_info = smb2_irqs;
diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c
new file mode 100644
index 000000000000..a2863dcf7389
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-smbcharger.c
@@ -0,0 +1,8472 @@
+/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__
+
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/leds.h>
+#include <linux/rtc.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/batterydata-lib.h>
+#include <linux/of_batterydata.h>
+#include <linux/msm_bcl.h>
+#include <linux/ktime.h>
+#include <linux/extcon.h>
+#include <linux/pmic-voter.h>
+
+/* Mask/Bit helpers */
+#define _SMB_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+/* Config registers */
+struct smbchg_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+struct parallel_usb_cfg {
+ struct power_supply *psy;
+ int min_current_thr_ma;
+ int min_9v_current_thr_ma;
+ int allowed_lowering_ma;
+ int current_max_ma;
+ bool avail;
+ struct mutex lock;
+ int initial_aicl_ma;
+ ktime_t last_disabled;
+ bool enabled_once;
+};
+
+struct ilim_entry {
+ int vmin_uv;
+ int vmax_uv;
+ int icl_pt_ma;
+ int icl_lv_ma;
+ int icl_hv_ma;
+};
+
+struct ilim_map {
+ int num;
+ struct ilim_entry *entries;
+};
+
+struct smbchg_version_tables {
+ const int *dc_ilim_ma_table;
+ int dc_ilim_ma_len;
+ const int *usb_ilim_ma_table;
+ int usb_ilim_ma_len;
+ const int *iterm_ma_table;
+ int iterm_ma_len;
+ const int *fcc_comp_table;
+ int fcc_comp_len;
+ const int *aicl_rerun_period_table;
+ int aicl_rerun_period_len;
+ int rchg_thr_mv;
+};
+
+struct smbchg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ int schg_version;
+
+ /* peripheral register address bases */
+ u16 chgr_base;
+ u16 bat_if_base;
+ u16 usb_chgpth_base;
+ u16 dc_chgpth_base;
+ u16 otg_base;
+ u16 misc_base;
+
+ int fake_battery_soc;
+ u8 revision[4];
+
+ /* configuration parameters */
+ int iterm_ma;
+ int usb_max_current_ma;
+ int typec_current_ma;
+ int dc_max_current_ma;
+ int dc_target_current_ma;
+ int cfg_fastchg_current_ma;
+ int fastchg_current_ma;
+ int vfloat_mv;
+ int fastchg_current_comp;
+ int float_voltage_comp;
+ int resume_delta_mv;
+ int safety_time;
+ int prechg_safety_time;
+ int bmd_pin_src;
+ int jeita_temp_hard_limit;
+ int aicl_rerun_period_s;
+ bool use_vfloat_adjustments;
+ bool iterm_disabled;
+ bool bmd_algo_disabled;
+ bool soft_vfloat_comp_disabled;
+ bool chg_enabled;
+ bool charge_unknown_battery;
+ bool chg_inhibit_en;
+ bool chg_inhibit_source_fg;
+ bool low_volt_dcin;
+ bool cfg_chg_led_support;
+ bool cfg_chg_led_sw_ctrl;
+ bool vbat_above_headroom;
+ bool force_aicl_rerun;
+ bool hvdcp3_supported;
+ bool restricted_charging;
+ bool skip_usb_suspend_for_fake_battery;
+ bool hvdcp_not_supported;
+ bool otg_pinctrl;
+ u8 original_usbin_allowance;
+ struct parallel_usb_cfg parallel;
+ struct delayed_work parallel_en_work;
+ struct dentry *debug_root;
+ struct smbchg_version_tables tables;
+
+ /* wipower params */
+ struct ilim_map wipower_default;
+ struct ilim_map wipower_pt;
+ struct ilim_map wipower_div2;
+ struct qpnp_vadc_chip *vadc_dev;
+ bool wipower_dyn_icl_avail;
+ struct ilim_entry current_ilim;
+ struct mutex wipower_config;
+ bool wipower_configured;
+ struct qpnp_adc_tm_btm_param param;
+
+ /* flash current prediction */
+ int rpara_uohm;
+ int rslow_uohm;
+ int vled_max_uv;
+
+ /* vfloat adjustment */
+ int max_vbat_sample;
+ int n_vbat_samples;
+
+ /* status variables */
+ int wake_reasons;
+ int previous_soc;
+ int usb_online;
+ bool dc_present;
+ bool usb_present;
+ bool batt_present;
+ int otg_retries;
+ ktime_t otg_enable_time;
+ bool aicl_deglitch_short;
+ bool safety_timer_en;
+ bool aicl_complete;
+ bool usb_ov_det;
+ bool otg_pulse_skip_dis;
+ const char *battery_type;
+ enum power_supply_type usb_supply_type;
+ bool very_weak_charger;
+ bool parallel_charger_detected;
+ bool chg_otg_enabled;
+ bool flash_triggered;
+ bool flash_active;
+ bool icl_disabled;
+ u32 wa_flags;
+ int usb_icl_delta;
+ bool typec_dfp;
+ unsigned int usb_current_max;
+ unsigned int usb_health;
+
+ /* jeita and temperature */
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+ unsigned int thermal_levels;
+ unsigned int therm_lvl_sel;
+ unsigned int *thermal_mitigation;
+
+ /* irqs */
+ int batt_hot_irq;
+ int batt_warm_irq;
+ int batt_cool_irq;
+ int batt_cold_irq;
+ int batt_missing_irq;
+ int vbat_low_irq;
+ int chg_hot_irq;
+ int chg_term_irq;
+ int taper_irq;
+ bool taper_irq_enabled;
+ struct mutex taper_irq_lock;
+ int recharge_irq;
+ int fastchg_irq;
+ int wdog_timeout_irq;
+ int power_ok_irq;
+ int dcin_uv_irq;
+ int usbin_uv_irq;
+ int usbin_ov_irq;
+ int src_detect_irq;
+ int otg_fail_irq;
+ int otg_oc_irq;
+ int aicl_done_irq;
+ int usbid_change_irq;
+ int chg_error_irq;
+ bool enable_aicl_wake;
+
+ /* psy */
+ struct power_supply_desc usb_psy_d;
+ struct power_supply *usb_psy;
+ struct power_supply_desc batt_psy_d;
+ struct power_supply *batt_psy;
+ struct power_supply_desc dc_psy_d;
+ struct power_supply *dc_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *typec_psy;
+ int dc_psy_type;
+ const char *bms_psy_name;
+ const char *battery_psy_name;
+
+ struct regulator *dpdm_reg;
+ struct smbchg_regulator otg_vreg;
+ struct smbchg_regulator ext_otg_vreg;
+ struct work_struct usb_set_online_work;
+ struct delayed_work vfloat_adjust_work;
+ struct delayed_work hvdcp_det_work;
+ spinlock_t sec_access_lock;
+ struct mutex therm_lvl_lock;
+ struct mutex usb_set_online_lock;
+ struct mutex pm_lock;
+ /* aicl deglitch workaround */
+ unsigned long first_aicl_seconds;
+ int aicl_irq_count;
+ struct mutex usb_status_lock;
+ bool hvdcp_3_det_ignore_uv;
+ struct completion src_det_lowered;
+ struct completion src_det_raised;
+ struct completion usbin_uv_lowered;
+ struct completion usbin_uv_raised;
+ int pulse_cnt;
+ struct led_classdev led_cdev;
+ bool skip_usb_notification;
+ u32 vchg_adc_channel;
+ struct qpnp_vadc_chip *vchg_vadc_dev;
+
+ /* voters */
+ struct votable *fcc_votable;
+ struct votable *usb_icl_votable;
+ struct votable *dc_icl_votable;
+ struct votable *usb_suspend_votable;
+ struct votable *dc_suspend_votable;
+ struct votable *battchg_suspend_votable;
+ struct votable *hw_aicl_rerun_disable_votable;
+ struct votable *hw_aicl_rerun_enable_indirect_votable;
+ struct votable *aicl_deglitch_short_votable;
+
+ /* extcon for VBUS / ID notification to USB */
+ struct extcon_dev *extcon;
+};
+
+enum qpnp_schg {
+ QPNP_SCHG,
+ QPNP_SCHG_LITE,
+};
+
+static char *version_str[] = {
+ [QPNP_SCHG] = "SCHG",
+ [QPNP_SCHG_LITE] = "SCHG_LITE",
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum smbchg_wa {
+ SMBCHG_AICL_DEGLITCH_WA = BIT(0),
+ SMBCHG_HVDCP_9V_EN_WA = BIT(1),
+ SMBCHG_USB100_WA = BIT(2),
+ SMBCHG_BATT_OV_WA = BIT(3),
+ SMBCHG_CC_ESR_WA = BIT(4),
+ SMBCHG_FLASH_ICL_DISABLE_WA = BIT(5),
+ SMBCHG_RESTART_WA = BIT(6),
+ SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA = BIT(7),
+};
+
+enum print_reason {
+ PR_REGISTER = BIT(0),
+ PR_INTERRUPT = BIT(1),
+ PR_STATUS = BIT(2),
+ PR_DUMP = BIT(3),
+ PR_PM = BIT(4),
+ PR_MISC = BIT(5),
+ PR_WIPOWER = BIT(6),
+ PR_TYPEC = BIT(7),
+};
+
+enum wake_reason {
+ PM_PARALLEL_CHECK = BIT(0),
+ PM_REASON_VFLOAT_ADJUST = BIT(1),
+ PM_ESR_PULSE = BIT(2),
+ PM_PARALLEL_TAPER = BIT(3),
+ PM_DETECT_HVDCP = BIT(4),
+};
+
+/* fcc_voters */
+#define ESR_PULSE_FCC_VOTER "ESR_PULSE_FCC_VOTER"
+#define BATT_TYPE_FCC_VOTER "BATT_TYPE_FCC_VOTER"
+#define RESTRICTED_CHG_FCC_VOTER "RESTRICTED_CHG_FCC_VOTER"
+
+/* ICL VOTERS */
+#define PSY_ICL_VOTER "PSY_ICL_VOTER"
+#define THERMAL_ICL_VOTER "THERMAL_ICL_VOTER"
+#define HVDCP_ICL_VOTER "HVDCP_ICL_VOTER"
+#define USER_ICL_VOTER "USER_ICL_VOTER"
+#define WEAK_CHARGER_ICL_VOTER "WEAK_CHARGER_ICL_VOTER"
+#define SW_AICL_ICL_VOTER "SW_AICL_ICL_VOTER"
+#define CHG_SUSPEND_WORKAROUND_ICL_VOTER "CHG_SUSPEND_WORKAROUND_ICL_VOTER"
+
+/* USB SUSPEND VOTERS */
+/* userspace has suspended charging altogether */
+#define USER_EN_VOTER "USER_EN_VOTER"
+/*
+ * this specific path has been suspended through the power supply
+ * framework
+ */
+#define POWER_SUPPLY_EN_VOTER "POWER_SUPPLY_EN_VOTER"
+/*
+ * the usb driver has suspended this path by setting a current limit
+ * of < 2MA
+ */
+#define USB_EN_VOTER "USB_EN_VOTER"
+/*
+ * the thermal daemon can suspend a charge path when the system
+ * temperature levels rise
+ */
+#define THERMAL_EN_VOTER "THERMAL_EN_VOTER"
+/*
+ * an external OTG supply is being used, suspend charge path so the
+ * charger does not accidentally try to charge from the external supply.
+ */
+#define OTG_EN_VOTER "OTG_EN_VOTER"
+/*
+ * the charger is very weak, do not draw any current from it
+ */
+#define WEAK_CHARGER_EN_VOTER "WEAK_CHARGER_EN_VOTER"
+/*
+ * fake battery voter, if battery id-resistance around 7.5 Kohm
+ */
+#define FAKE_BATTERY_EN_VOTER "FAKE_BATTERY_EN_VOTER"
+
+/* battchg_enable_voters */
+ /* userspace has disabled battery charging */
+#define BATTCHG_USER_EN_VOTER "BATTCHG_USER_EN_VOTER"
+ /* battery charging disabled while loading battery profiles */
+#define BATTCHG_UNKNOWN_BATTERY_EN_VOTER "BATTCHG_UNKNOWN_BATTERY_EN_VOTER"
+
+/* hw_aicl_rerun_enable_indirect_voters */
+/* enabled via device tree */
+#define DEFAULT_CONFIG_HW_AICL_VOTER "DEFAULT_CONFIG_HW_AICL_VOTER"
+/* Varb workaround voter */
+#define VARB_WORKAROUND_VOTER "VARB_WORKAROUND_VOTER"
+/* SHUTDOWN workaround voter */
+#define SHUTDOWN_WORKAROUND_VOTER "SHUTDOWN_WORKAROUND_VOTER"
+
+/* hw_aicl_rerun_disable_voters */
+/* the results from enabling clients */
+#define HW_AICL_RERUN_ENABLE_INDIRECT_VOTER \
+ "HW_AICL_RERUN_ENABLE_INDIRECT_VOTER"
+/* Weak charger voter */
+#define WEAK_CHARGER_HW_AICL_VOTER "WEAK_CHARGER_HW_AICL_VOTER"
+
+/* aicl_short_deglitch_voters */
+/* Varb workaround voter */
+#define VARB_WORKAROUND_SHORT_DEGLITCH_VOTER \
+ "VARB_WRKARND_SHORT_DEGLITCH_VOTER"
+/* QC 2.0 */
+#define HVDCP_SHORT_DEGLITCH_VOTER "HVDCP_SHORT_DEGLITCH_VOTER"
+
+static const unsigned int smbchg_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static int smbchg_debug_mask;
+module_param_named(
+ debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_parallel_en = 1;
+module_param_named(
+ parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_fcc_percent = 50;
+module_param_named(
+ main_chg_fcc_percent, smbchg_main_chg_fcc_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_icl_percent = 60;
+module_param_named(
+ main_chg_icl_percent, smbchg_main_chg_icl_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp_icl_ma = 1800;
+module_param_named(
+ default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp3_icl_ma = 3000;
+module_param_named(
+ default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_dcp_icl_ma = 1800;
+module_param_named(
+ default_dcp_icl_ma, smbchg_default_dcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dyn_icl_en;
+module_param_named(
+ dynamic_icl_wipower_en, wipower_dyn_icl_en,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS;
+module_param_named(
+ wipower_dcin_interval, wipower_dcin_interval,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define WIPOWER_DEFAULT_HYSTERISIS_UV 250000
+static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV;
+module_param_named(
+ wipower_dcin_hyst_uv, wipower_dcin_hyst_uv,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define pr_smb(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define pr_smb_rt(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info_ratelimited(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int smbchg_read(struct smbchg_chip *chip, u8 *val,
+ u16 addr, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+
+ if (addr == 0) {
+ dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, count);
+ if (rc) {
+ dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid,
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * Do not use this function for register writes if possible. Instead use the
+ * smbchg_masked_write function.
+ *
+ * The sec_access_lock must be held for all register writes and this function
+ * does not do that. If this function is used, please hold the spinlock or
+ * random secure access writes may fail.
+ */
+static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, base, mask, val);
+ if (rc) {
+ dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n",
+ base, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * This function holds a spin lock to ensure secure access register writes goes
+ * through. If the secure access unlock register is armed, any old register
+ * write can unarm the secure access unlock, causing the next write to fail.
+ *
+ * Note: do not use this for sec_access registers. Instead use the function
+ * below: smbchg_sec_masked_write
+ */
+static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Unlocks sec access and writes to the register specified.
+ *
+ * This function holds a spin lock to exclude other register writes while
+ * the two writes are taking place.
+ */
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+ u16 peripheral_base = base & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+
+ rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET,
+ SEC_ACCESS_VALUE, SEC_ACCESS_VALUE);
+ if (rc) {
+ dev_err(chip->dev, "Unable to unlock sec_access: %d", rc);
+ goto out;
+ }
+
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+ return rc;
+}
+
+static void smbchg_stay_awake(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons | reason;
+ if (reasons != 0 && chip->wake_reasons == 0) {
+ pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_stay_awake(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+}
+
+static void smbchg_relax(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons & (~reason);
+ if (reasons == 0 && chip->wake_reasons != 0) {
+ pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_relax(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+};
+
+enum pwr_path_type {
+ UNKNOWN = 0,
+ PWR_PATH_BATTERY = 1,
+ PWR_PATH_USB = 2,
+ PWR_PATH_DC = 3,
+};
+
+#define PWR_PATH 0x08
+#define PWR_PATH_MASK 0x03
+static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + PWR_PATH, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc);
+ return PWR_PATH_BATTERY;
+ }
+
+ return reg & PWR_PATH_MASK;
+}
+
+#define RID_STS 0xB
+#define RID_MASK 0xF
+#define IDEV_STS 0x8
+#define RT_STS 0x10
+#define USBID_MSB 0xE
+#define USBIN_UV_BIT BIT(0)
+#define USBIN_OV_BIT BIT(1)
+#define USBIN_SRC_DET_BIT BIT(2)
+#define FMB_STS_MASK SMB_MASK(3, 0)
+#define USBID_GND_THRESHOLD 0x495
+static bool is_otg_present_schg(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+ u8 usbid_reg[2];
+ u16 usbid_val;
+ /*
+ * After the falling edge of the usbid change interrupt occurs,
+ * there may still be some time before the ADC conversion for USB RID
+ * finishes in the fuel gauge. In the worst case, this could be up to
+ * 15 ms.
+ *
+ * Sleep for 20 ms (minimum msleep time) to wait for the conversion to
+ * finish and the USB RID status register to be updated before trying
+ * to detect OTG insertions.
+ */
+
+ msleep(20);
+
+ /*
+ * There is a problem with USBID conversions on PMI8994 revisions
+ * 2.0.0. As a workaround, check that the cable is not
+ * detected as factory test before enabling OTG.
+ */
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & FMB_STS_MASK) != 0) {
+ pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg);
+ return false;
+ }
+
+ rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc);
+ return false;
+ }
+ usbid_val = (usbid_reg[0] << 8) | usbid_reg[1];
+
+ if (usbid_val > USBID_GND_THRESHOLD) {
+ pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n",
+ usbid_val);
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RID_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read usb rid status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "RID_STS = %02x\n", reg);
+
+ return (reg & RID_MASK) == 0;
+}
+
+#define RID_GND_DET_STS BIT(2)
+static bool is_otg_present_schg_lite(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->otg_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read otg RT status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & RID_GND_DET_STS);
+}
+
+static bool is_otg_present(struct smbchg_chip *chip)
+{
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ return is_otg_present_schg_lite(chip);
+
+ return is_otg_present_schg(chip);
+}
+
+#define USBIN_9V BIT(5)
+#define USBIN_UNREG BIT(4)
+#define USBIN_LV BIT(3)
+#define DCIN_9V BIT(2)
+#define DCIN_UNREG BIT(1)
+#define DCIN_LV BIT(0)
+#define INPUT_STS 0x0D
+#define DCIN_UV_BIT BIT(0)
+#define DCIN_OV_BIT BIT(1)
+static bool is_dc_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->dc_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT))
+ return false;
+
+ return true;
+}
+
+static bool is_usb_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ if (!(reg & USBIN_SRC_DET_BIT) || (reg & USBIN_OV_BIT))
+ return false;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV));
+}
+
+static char *usb_type_str[] = {
+ "SDP", /* bit 0 */
+ "OTHER", /* bit 1 */
+ "DCP", /* bit 2 */
+ "CDP", /* bit 3 */
+ "NONE", /* bit 4 error case */
+};
+
+#define N_TYPE_BITS 4
+#define TYPE_BITS_OFFSET 4
+
+static int get_type(u8 type_reg)
+{
+ unsigned long type = type_reg;
+ type >>= TYPE_BITS_OFFSET;
+ return find_first_bit(&type, N_TYPE_BITS);
+}
+
+/* helper to return the string of USB type */
+static inline char *get_usb_type_name(int type)
+{
+ return usb_type_str[type];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+ POWER_SUPPLY_TYPE_USB, /* bit 0 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 1 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */
+ POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 4 error case, report DCP */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static inline enum power_supply_type get_usb_supply_type(int type)
+{
+ return usb_type_enum[type];
+}
+
+static bool is_src_detect_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_SRC_DET_BIT;
+}
+
+static void read_usb_type(struct smbchg_chip *chip, char **usb_type_name,
+ enum power_supply_type *usb_supply_type)
+{
+ int rc, type;
+ u8 reg;
+
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low\n");
+ *usb_type_name = "Absent";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ *usb_type_name = "Other";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+ type = get_type(reg);
+ *usb_type_name = get_usb_type_name(type);
+ *usb_supply_type = get_usb_supply_type(type);
+}
+
+#define CHGR_STS 0x0E
+#define BATT_LESS_THAN_2V BIT(4)
+#define CHG_HOLD_OFF_BIT BIT(3)
+#define CHG_TYPE_MASK SMB_MASK(2, 1)
+#define CHG_TYPE_SHIFT 1
+#define BATT_NOT_CHG_VAL 0x0
+#define BATT_PRE_CHG_VAL 0x1
+#define BATT_FAST_CHG_VAL 0x2
+#define BATT_TAPER_CHG_VAL 0x3
+#define CHG_INHIBIT_BIT BIT(1)
+#define BAT_TCC_REACHED_BIT BIT(7)
+static int get_prop_batt_status(struct smbchg_chip *chip)
+{
+ int rc, status = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg = 0, chg_type;
+ bool charger_present, chg_inhibit;
+
+ charger_present = is_usb_present(chip) | is_dc_present(chip) |
+ chip->hvdcp_3_det_ignore_uv;
+ if (!charger_present)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & BAT_TCC_REACHED_BIT)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ chg_inhibit = reg & CHG_INHIBIT_BIT;
+ if (chg_inhibit)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & CHG_HOLD_OFF_BIT) {
+ /*
+ * when chg hold off happens the battery is
+ * not charging
+ */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ goto out;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+ if (chg_type == BATT_NOT_CHG_VAL && !chip->hvdcp_3_det_ignore_uv)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+ pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg);
+ return status;
+}
+
+#define BAT_PRES_STATUS 0x08
+#define BAT_PRES_BIT BIT(7)
+static int get_prop_batt_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + BAT_PRES_STATUS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ return !!(reg & BAT_PRES_BIT);
+}
+
+static int get_prop_charge_type(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, chg_type;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+ if (chg_type == BATT_NOT_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (chg_type == BATT_TAPER_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ else if (chg_type == BATT_FAST_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (chg_type == BATT_PRE_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int set_property_on_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ ret.intval = val;
+ rc = power_supply_set_property(chip->bms_psy, prop, &ret);
+ if (rc)
+ pr_smb(PR_STATUS,
+ "bms psy does not allow updating prop %d rc = %d\n",
+ prop, rc);
+
+ return rc;
+}
+
+static int get_property_from_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int *val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy, prop, &ret);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy doesn't support reading prop %d rc = %d\n",
+ prop, rc);
+ return rc;
+ }
+
+ *val = ret.intval;
+ return rc;
+}
+
+#define DEFAULT_BATT_CAPACITY 50
+static int get_prop_batt_capacity(struct smbchg_chip *chip)
+{
+ int capacity, rc;
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CAPACITY, &capacity);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get capacity rc = %d\n", rc);
+ capacity = DEFAULT_BATT_CAPACITY;
+ }
+ return capacity;
+}
+
+#define DEFAULT_BATT_TEMP 200
+static int get_prop_batt_temp(struct smbchg_chip *chip)
+{
+ int temp, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_TEMP, &temp);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get temperature rc = %d\n", rc);
+ temp = DEFAULT_BATT_TEMP;
+ }
+ return temp;
+}
+
+#define DEFAULT_BATT_CURRENT_NOW 0
+static int get_prop_batt_current_now(struct smbchg_chip *chip)
+{
+ int ua, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, &ua);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get current rc = %d\n", rc);
+ ua = DEFAULT_BATT_CURRENT_NOW;
+ }
+ return ua;
+}
+
+#define DEFAULT_BATT_VOLTAGE_NOW 0
+static int get_prop_batt_voltage_now(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_NOW, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_NOW;
+ }
+ return uv;
+}
+
+#define DEFAULT_BATT_VOLTAGE_MAX_DESIGN 4200000
+static int get_prop_batt_voltage_max_design(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_MAX_DESIGN;
+ }
+ return uv;
+}
+
+static int get_prop_batt_health(struct smbchg_chip *chip)
+{
+ if (chip->batt_hot)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ return POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void get_property_from_typec(struct smbchg_chip *chip,
+ enum power_supply_property property,
+ union power_supply_propval *prop)
+{
+ int rc;
+
+ rc = power_supply_get_property(chip->typec_psy,
+ property, prop);
+ if (rc)
+ pr_smb(PR_TYPEC,
+ "typec psy doesn't support reading prop %d rc = %d\n",
+ property, rc);
+}
+
+static void update_typec_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+ union power_supply_propval capability = {0, };
+
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ if (type.intval != POWER_SUPPLY_TYPE_UNKNOWN) {
+ get_property_from_typec(chip,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ &capability);
+ chip->typec_current_ma = capability.intval;
+ pr_smb(PR_TYPEC, "SMB Type-C mode = %d, current=%d\n",
+ type.intval, capability.intval);
+ } else {
+ pr_smb(PR_TYPEC,
+ "typec detection not completed continuing with USB update\n");
+ }
+}
+
+/*
+ * finds the index of the closest value in the array. If there are two that
+ * are equally close, the lower index will be returned
+ */
+static int find_closest_in_array(const int *arr, int len, int val)
+{
+ int i, closest = 0;
+
+ if (len == 0)
+ return closest;
+ for (i = 0; i < len; i++)
+ if (abs(val - arr[i]) < abs(val - arr[closest]))
+ closest = i;
+
+ return closest;
+}
+
+/* finds the index of the closest smaller value in the array. */
+static int find_smaller_in_array(const int *table, int val, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (val >= table[i])
+ break;
+ }
+
+ return i;
+}
+
+static const int iterm_ma_table_8994[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 500,
+ 600
+};
+
+static const int iterm_ma_table_8996[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 400,
+ 500
+};
+
+static const int usb_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static const int usb_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+ 2500,
+ 2600,
+ 2700,
+ 2800,
+ 2900,
+ 3000
+};
+
+static int dc_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+};
+
+static int dc_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+};
+
+static const int fcc_comp_table_8994[] = {
+ 250,
+ 700,
+ 900,
+ 1200,
+};
+
+static const int fcc_comp_table_8996[] = {
+ 250,
+ 1100,
+ 1200,
+ 1500,
+};
+
+static const int aicl_rerun_period[] = {
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static const int aicl_rerun_period_schg_lite[] = {
+ 3, /* 2.8s */
+ 6, /* 5.6s */
+ 11, /* 11.3s */
+ 23, /* 22.5s */
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static void use_pmi8994_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8994;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8994);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8994;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8994);
+ chip->tables.iterm_ma_table = iterm_ma_table_8994;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8994);
+ chip->tables.fcc_comp_table = fcc_comp_table_8994;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8994);
+ chip->tables.rchg_thr_mv = 200;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+static void use_pmi8996_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8996;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8996);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8996;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8996);
+ chip->tables.iterm_ma_table = iterm_ma_table_8996;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8996);
+ chip->tables.fcc_comp_table = fcc_comp_table_8996;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8996);
+ chip->tables.rchg_thr_mv = 150;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+#define CMD_CHG_REG 0x42
+#define EN_BAT_CHG_BIT BIT(1)
+static int smbchg_charging_en(struct smbchg_chip *chip, bool en)
+{
+ /* The en bit is configured active low */
+ return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT);
+}
+
+#define CMD_IL 0x40
+#define USBIN_SUSPEND_BIT BIT(4)
+#define CURRENT_100_MA 100
+#define CURRENT_150_MA 150
+#define CURRENT_500_MA 500
+#define CURRENT_900_MA 900
+#define CURRENT_1500_MA 1500
+#define SUSPEND_CURRENT_MA 2
+#define ICL_OVERRIDE_BIT BIT(2)
+static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define DCIN_SUSPEND_BIT BIT(3)
+static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc = 0;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define IL_CFG 0xF2
+#define DCIN_INPUT_MASK SMB_MASK(4, 0)
+static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma)
+{
+ int i;
+ u8 dc_cur_val;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ current_ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %dma current_table\n",
+ current_ma);
+ return -EINVAL;
+ }
+
+ chip->dc_max_current_ma = chip->tables.dc_ilim_ma_table[i];
+ dc_cur_val = i & DCIN_INPUT_MASK;
+
+ pr_smb(PR_STATUS, "dc current set to %d mA\n",
+ chip->dc_max_current_ma);
+ return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG,
+ DCIN_INPUT_MASK, dc_cur_val);
+}
+
+#define AICL_WL_SEL_CFG 0xF5
+#define AICL_WL_SEL_MASK SMB_MASK(1, 0)
+#define AICL_WL_SEL_SCHG_LITE_MASK SMB_MASK(2, 0)
+static int smbchg_set_aicl_rerun_period_s(struct smbchg_chip *chip,
+ int period_s)
+{
+ int i;
+ u8 reg, mask;
+
+ i = find_smaller_in_array(chip->tables.aicl_rerun_period_table,
+ period_s, chip->tables.aicl_rerun_period_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %ds in aicl rerun period\n",
+ period_s);
+ return -EINVAL;
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ mask = AICL_WL_SEL_SCHG_LITE_MASK;
+ else
+ mask = AICL_WL_SEL_MASK;
+
+ reg = i & mask;
+
+ pr_smb(PR_STATUS, "aicl rerun period set to %ds\n",
+ chip->tables.aicl_rerun_period_table[i]);
+ return smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + AICL_WL_SEL_CFG,
+ mask, reg);
+}
+
+static struct power_supply *get_parallel_psy(struct smbchg_chip *chip)
+{
+ if (!chip->parallel.avail)
+ return NULL;
+ if (chip->parallel.psy)
+ return chip->parallel.psy;
+ chip->parallel.psy = power_supply_get_by_name("usb-parallel");
+ if (!chip->parallel.psy)
+ pr_smb(PR_STATUS, "parallel charger not found\n");
+ return chip->parallel.psy;
+}
+
+static void smbchg_usb_update_online_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ usb_set_online_work);
+ bool user_enabled = !get_client_vote(chip->usb_suspend_votable,
+ USER_EN_VOTER);
+ int online;
+
+ online = user_enabled && chip->usb_present && !chip->very_weak_charger;
+
+ mutex_lock(&chip->usb_set_online_lock);
+ if (chip->usb_online != online) {
+ pr_smb(PR_MISC, "setting usb psy online = %d\n", online);
+ chip->usb_online = online;
+ power_supply_changed(chip->usb_psy);
+ }
+ mutex_unlock(&chip->usb_set_online_lock);
+}
+
+#define CHGPTH_CFG 0xF4
+#define CFG_USB_2_3_SEL_BIT BIT(7)
+#define CFG_USB_2 0
+#define CFG_USB_3 BIT(7)
+#define USBIN_INPUT_MASK SMB_MASK(4, 0)
+#define USBIN_MODE_CHG_BIT BIT(0)
+#define USBIN_LIMITED_MODE 0
+#define USBIN_HC_MODE BIT(0)
+#define USB51_MODE_BIT BIT(1)
+#define USB51_100MA 0
+#define USB51_500MA BIT(1)
+static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 usb_cur_val;
+
+ if (current_ma == CURRENT_100_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CFG_USB_2 rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT | ICL_OVERRIDE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA | ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set ICL_OVERRIDE rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS,
+ "Forcing 100mA current limit\n");
+ chip->usb_max_current_ma = CURRENT_100_MA;
+ return rc;
+ }
+
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_150_MA);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_150_MA, rc);
+ else
+ chip->usb_max_current_ma = 150;
+ return rc;
+ }
+
+ usb_cur_val = i & USBIN_INPUT_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG,
+ USBIN_INPUT_MASK, usb_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT, USBIN_HC_MODE);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+ chip->usb_max_current_ma = chip->tables.usb_ilim_ma_table[i];
+ return rc;
+}
+
+/* if APSD results are used
+ * if SDP is detected it will look at 500mA setting
+ * if set it will draw 500mA
+ * if unset it will draw 100mA
+ * if CDP/DCP it will look at 0x0C setting
+ * i.e. values in 0x41[1, 0] does not matter
+ */
+static int smbchg_set_usb_current_max(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ /*
+ * if the battery is not present, do not allow the usb ICL to lower in
+ * order to avoid browning out the device during a hotswap.
+ */
+ if (!chip->batt_present && current_ma < chip->usb_max_current_ma) {
+ pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n",
+ current_ma);
+ return 0;
+ }
+ pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma);
+
+ if (current_ma <= SUSPEND_CURRENT_MA) {
+ /* suspend the usb if current <= 2mA */
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, true, 0);
+ chip->usb_max_current_ma = 0;
+ goto out;
+ } else {
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, false, 0);
+ }
+
+ switch (chip->usb_supply_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ if ((current_ma < CURRENT_150_MA) &&
+ (chip->wa_flags & SMBCHG_USB100_WA))
+ current_ma = CURRENT_150_MA;
+
+ if (current_ma < CURRENT_150_MA) {
+ /* force 100mA */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 100;
+ }
+ /* specific current values */
+ if (current_ma == CURRENT_150_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 150;
+ }
+ if (current_ma == CURRENT_500_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 500;
+ }
+ if (current_ma == CURRENT_900_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 900;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ if (current_ma < CURRENT_1500_MA) {
+ /* use override for CDP */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+ }
+ /* fall through */
+ default:
+ rc = smbchg_set_high_usb_chg_current(chip, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't set %dmA rc = %d\n", current_ma, rc);
+ break;
+ }
+
+out:
+ pr_smb(PR_STATUS, "usb type = %d current set to %d mA\n",
+ chip->usb_supply_type, chip->usb_max_current_ma);
+ return rc;
+}
+
+#define USBIN_HVDCP_STS 0x0C
+#define USBIN_HVDCP_SEL_BIT BIT(4)
+#define USBIN_HVDCP_SEL_9V_BIT BIT(1)
+#define SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT BIT(2)
+#define SCHG_LITE_USBIN_HVDCP_SEL_BIT BIT(0)
+static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel, hvdcp_sel_9v;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return 0;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT;
+ } else {
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = USBIN_HVDCP_SEL_9V_BIT;
+ }
+
+ if ((reg & hvdcp_sel) && (reg & hvdcp_sel_9v))
+ return chip->parallel.min_9v_current_thr_ma;
+ return chip->parallel.min_current_thr_ma;
+}
+
+static bool is_hvdcp_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ pr_err("Couldn't read hvdcp status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP_STS = 0x%02x\n", reg);
+ /*
+ * If a valid HVDCP is detected, notify it to the usb_psy only
+ * if USB is still present.
+ */
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ else
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+
+ if ((reg & hvdcp_sel) && is_usb_present(chip))
+ return true;
+
+ return false;
+}
+
+#define FCC_CFG 0xF2
+#define FCC_500MA_VAL 0x4
+#define FCC_MASK SMB_MASK(4, 0)
+static int smbchg_set_fastchg_current_raw(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 cur_val;
+
+ /* the fcc enumerations are the same as the usb currents */
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_500_MA);
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK,
+ FCC_500MA_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_500_MA, rc);
+ else
+ chip->fastchg_current_ma = 500;
+ return rc;
+ }
+
+ if (chip->tables.usb_ilim_ma_table[i] == chip->fastchg_current_ma) {
+ pr_smb(PR_STATUS, "skipping fastchg current request: %d\n",
+ chip->fastchg_current_ma);
+ return 0;
+ }
+
+ cur_val = i & FCC_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK, cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "fastcharge current requested %d, set to %d\n",
+ current_ma, chip->tables.usb_ilim_ma_table[cur_val]);
+
+ chip->fastchg_current_ma = chip->tables.usb_ilim_ma_table[cur_val];
+ return rc;
+}
+
+#define ICL_STS_1_REG 0x7
+#define ICL_STS_2_REG 0x9
+#define ICL_STS_MASK 0x1F
+#define AICL_SUSP_BIT BIT(6)
+#define AICL_STS_BIT BIT(5)
+#define USBIN_SUSPEND_STS_BIT BIT(3)
+#define USBIN_ACTIVE_PWR_SRC_BIT BIT(1)
+#define DCIN_ACTIVE_PWR_SRC_BIT BIT(0)
+#define PARALLEL_REENABLE_TIMER_MS 1000
+#define PARALLEL_CHG_THRESHOLD_CURRENT 1800
+static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc);
+ return false;
+ }
+
+ return !(reg & USBIN_SUSPEND_STS_BIT)
+ && (reg & USBIN_ACTIVE_PWR_SRC_BIT);
+}
+
+static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return 0;
+
+ pval.intval = en;
+ return power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+}
+
+#define ESR_PULSE_CURRENT_DELTA_MA 200
+static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en)
+{
+ int rc, fg_current_now, icl_ma;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW,
+ &fg_current_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA,
+ fg_current_now - ESR_PULSE_CURRENT_DELTA_MA);
+ rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma);
+ if (rc < 0) {
+ pr_err("Couldn't Vote FCC en = %d rc = %d\n", en, rc);
+ return rc;
+ }
+ rc = smbchg_parallel_usb_charging_en(chip, !en);
+ return rc;
+}
+
+#define USB_AICL_CFG 0xF3
+#define AICL_EN_BIT BIT(2)
+static void smbchg_rerun_aicl(struct smbchg_chip *chip)
+{
+ pr_smb(PR_STATUS, "Rerunning AICL...\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ /* Add a delay so that AICL successfully clears */
+ msleep(50);
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+}
+
+static void taper_irq_en(struct smbchg_chip *chip, bool en)
+{
+ mutex_lock(&chip->taper_irq_lock);
+ if (en != chip->taper_irq_enabled) {
+ if (en) {
+ enable_irq(chip->taper_irq);
+ enable_irq_wake(chip->taper_irq);
+ } else {
+ disable_irq_wake(chip->taper_irq);
+ disable_irq_nosync(chip->taper_irq);
+ }
+ chip->taper_irq_enabled = en;
+ }
+ mutex_unlock(&chip->taper_irq_lock);
+}
+
+static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc);
+ return 0;
+ }
+ if (reg & AICL_SUSP_BIT) {
+ pr_warn("AICL suspended: %02x\n", reg);
+ return 0;
+ }
+ reg &= ICL_STS_MASK;
+ if (reg >= chip->tables.usb_ilim_ma_len) {
+ pr_warn("invalid AICL value: %02x\n", reg);
+ return 0;
+ }
+ return chip->tables.usb_ilim_ma_table[reg];
+}
+
+static void smbchg_parallel_usb_disable(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int fcc_ma, usb_icl_ma;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+ pr_smb(PR_STATUS, "disabling parallel charger\n");
+ chip->parallel.last_disabled = ktime_get_boottime();
+ taper_irq_en(chip, false);
+ chip->parallel.initial_aicl_ma = 0;
+ chip->parallel.current_max_ma = 0;
+ pval.intval = SUSPEND_CURRENT_MA * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ pval.intval = false;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (fcc_ma < 0)
+ pr_err("no voters for fcc, skip it\n");
+ else
+ smbchg_set_fastchg_current_raw(chip, fcc_ma);
+
+ if (usb_icl_ma < 0)
+ pr_err("no voters for usb_icl, skip it\n");
+ else
+ smbchg_set_usb_current_max(chip, usb_icl_ma);
+
+ smbchg_rerun_aicl(chip);
+}
+
+#define PARALLEL_TAPER_MAX_TRIES 3
+#define PARALLEL_FCC_PERCENT_REDUCTION 75
+#define MINIMUM_PARALLEL_FCC_MA 500
+#define CHG_ERROR_BIT BIT(0)
+#define BAT_TAPER_MODE_BIT BIT(6)
+static void smbchg_parallel_usb_taper(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int parallel_fcc_ma, tries = 0;
+ u8 reg = 0;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_TAPER);
+try_again:
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ tries += 1;
+ parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n",
+ tries, parallel_fcc_ma);
+ if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA
+ || tries > PARALLEL_TAPER_MAX_TRIES) {
+ smbchg_parallel_usb_disable(chip);
+ goto done;
+ }
+ pval.intval = ((parallel_fcc_ma
+ * PARALLEL_FCC_PERCENT_REDUCTION) / 100);
+ pr_smb(PR_STATUS, "reducing FCC of parallel charger to %d\n",
+ pval.intval);
+ /* Change it to uA */
+ pval.intval *= 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /*
+ * sleep here for 100 ms in order to make sure the charger has a chance
+ * to go back into constant current charging
+ */
+ mutex_unlock(&chip->parallel.lock);
+ msleep(100);
+
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (reg & BAT_TAPER_MODE_BIT) {
+ mutex_unlock(&chip->parallel.lock);
+ goto try_again;
+ }
+ taper_irq_en(chip, true);
+done:
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_TAPER);
+}
+
+static void smbchg_parallel_usb_enable(struct smbchg_chip *chip,
+ int total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int new_parallel_cl_ma, set_parallel_cl_ma, new_pmi_cl_ma, rc;
+ int current_table_index, target_icl_ma;
+ int fcc_ma, main_fastchg_current_ma;
+ int target_parallel_fcc_ma, supplied_parallel_fcc_ma;
+ int parallel_chg_fcc_percent;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ pr_smb(PR_STATUS, "Attempting to enable parallel charger\n");
+ pval.intval = chip->vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set Vflt on parallel psy rc: %d\n", rc);
+ return;
+ }
+ /* Set USB ICL */
+ target_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (target_icl_ma < 0) {
+ pr_err("no voters for usb_icl, skip it\n");
+ return;
+ }
+ new_parallel_cl_ma = total_current_ma
+ * (100 - smbchg_main_chg_icl_percent) / 100;
+ taper_irq_en(chip, true);
+
+ pval.intval = true;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ pval.intval = new_parallel_cl_ma * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ /* read back the real amount of current we are getting */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ set_parallel_cl_ma = pval.intval / 1000;
+ chip->parallel.current_max_ma = new_parallel_cl_ma;
+ pr_smb(PR_MISC, "Requested ICL = %d from parallel, got %d\n",
+ new_parallel_cl_ma, set_parallel_cl_ma);
+ new_pmi_cl_ma = max(0, target_icl_ma - set_parallel_cl_ma);
+ pr_smb(PR_STATUS, "New Total USB current = %d[%d, %d]\n",
+ total_current_ma, new_pmi_cl_ma,
+ set_parallel_cl_ma);
+ smbchg_set_usb_current_max(chip, new_pmi_cl_ma);
+
+ /* begin splitting the fast charge current */
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc, skip it\n");
+ return;
+ }
+ parallel_chg_fcc_percent = 100 - smbchg_main_chg_fcc_percent;
+ target_parallel_fcc_ma = (fcc_ma * parallel_chg_fcc_percent) / 100;
+ pval.intval = target_parallel_fcc_ma * 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /* check how much actual current is supplied by the parallel charger */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ supplied_parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_MISC, "Requested FCC = %d from parallel, got %d\n",
+ target_parallel_fcc_ma, supplied_parallel_fcc_ma);
+
+ /* then for the main charger, use the left over FCC */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ fcc_ma - supplied_parallel_fcc_ma,
+ chip->tables.usb_ilim_ma_len);
+ main_fastchg_current_ma =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ smbchg_set_fastchg_current_raw(chip, main_fastchg_current_ma);
+ pr_smb(PR_STATUS, "FCC = %d[%d, %d]\n", fcc_ma, main_fastchg_current_ma,
+ supplied_parallel_fcc_ma);
+
+ chip->parallel.enabled_once = true;
+
+ return;
+}
+
+static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip,
+ int *ret_total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int min_current_thr_ma, rc, type;
+ int total_current_ma, current_limit_ma, parallel_cl_ma;
+ ktime_t kt_since_last_disable;
+ u8 reg;
+ int fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ const char *fcc_voter
+ = get_effective_client_locked(chip->fcc_votable);
+ int usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+
+ if (!parallel_psy || !smbchg_parallel_en
+ || !chip->parallel_charger_detected) {
+ pr_smb(PR_STATUS, "Parallel charging not enabled\n");
+ return false;
+ }
+
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc! Can't enable parallel\n");
+ return false;
+ }
+ if (usb_icl_ma < 0) {
+ pr_err("no voters for usb_icl, Can't enable parallel\n");
+ return false;
+ }
+
+ kt_since_last_disable = ktime_sub(ktime_get_boottime(),
+ chip->parallel.last_disabled);
+ if (chip->parallel.current_max_ma == 0
+ && chip->parallel.enabled_once
+ && ktime_to_ms(kt_since_last_disable)
+ < PARALLEL_REENABLE_TIMER_MS) {
+ pr_smb(PR_STATUS, "Only been %lld since disable, skipping\n",
+ ktime_to_ms(kt_since_last_disable));
+ return false;
+ }
+
+ /*
+ * If the battery is not present, try not to change parallel charging
+ * from OFF to ON or from ON to OFF, as it could cause the device to
+ * brown out in the instant that the USB settings are changed.
+ *
+ * Only allow parallel charging check to report false (thereby turnin
+ * off parallel charging) if the battery is still there, or if parallel
+ * charging is disabled in the first place.
+ */
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (get_prop_batt_present(chip)
+ || chip->parallel.current_max_ma == 0)) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return false;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ return false;
+ }
+
+ type = get_type(reg);
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB_CDP) {
+ pr_smb(PR_STATUS, "CDP adapter, skipping\n");
+ return false;
+ }
+
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB) {
+ pr_smb(PR_STATUS, "SDP adapter, skipping\n");
+ return false;
+ }
+
+ /*
+ * If USBIN is suspended or not the active power source, do not enable
+ * parallel charging. The device may be charging off of DCIN.
+ */
+ if (!smbchg_is_usbin_active_pwr_src(chip)) {
+ pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg);
+ return false;
+ }
+
+ min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip);
+ if (min_current_thr_ma <= 0) {
+ pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n",
+ min_current_thr_ma);
+ return false;
+ }
+
+ if (usb_icl_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n",
+ usb_icl_ma, min_current_thr_ma);
+ return false;
+ }
+
+ if (!fcc_voter)
+ return false;
+ /*
+ * Suspend the parallel charger if the charging current is < 1800 mA
+ * and is not because of an ESR pulse.
+ */
+ if ((strcmp(fcc_voter, ESR_PULSE_FCC_VOTER) == 0)
+ && fcc_ma < PARALLEL_CHG_THRESHOLD_CURRENT) {
+ pr_smb(PR_STATUS, "FCC %d lower than %d\n",
+ fcc_ma,
+ PARALLEL_CHG_THRESHOLD_CURRENT);
+ return false;
+ }
+
+ current_limit_ma = smbchg_get_aicl_level_ma(chip);
+ if (current_limit_ma <= 0)
+ return false;
+
+ if (chip->parallel.initial_aicl_ma == 0) {
+ if (current_limit_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n",
+ current_limit_ma, min_current_thr_ma);
+ return false;
+ }
+ chip->parallel.initial_aicl_ma = current_limit_ma;
+ }
+
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ parallel_cl_ma = pval.intval / 1000;
+ /*
+ * Read back the real amount of current we are getting
+ * Treat 2mA as 0 because that is the suspend current setting
+ */
+ if (parallel_cl_ma <= SUSPEND_CURRENT_MA)
+ parallel_cl_ma = 0;
+
+ /*
+ * Set the parallel charge path's input current limit (ICL)
+ * to the total current / 2
+ */
+ total_current_ma = min(current_limit_ma + parallel_cl_ma, usb_icl_ma);
+
+ if (total_current_ma < chip->parallel.initial_aicl_ma
+ - chip->parallel.allowed_lowering_ma) {
+ pr_smb(PR_STATUS,
+ "Total current reduced a lot: %d (%d + %d) < %d - %d\n",
+ total_current_ma,
+ current_limit_ma, parallel_cl_ma,
+ chip->parallel.initial_aicl_ma,
+ chip->parallel.allowed_lowering_ma);
+ return false;
+ }
+
+ *ret_total_current_ma = total_current_ma;
+ return true;
+}
+
+#define PARALLEL_CHARGER_EN_DELAY_MS 500
+static void smbchg_parallel_usb_en_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ parallel_en_work.work);
+ int previous_aicl_ma, total_current_ma, aicl_ma;
+ bool in_progress;
+
+ /* do a check to see if the aicl is stable */
+ previous_aicl_ma = smbchg_get_aicl_level_ma(chip);
+ msleep(PARALLEL_CHARGER_EN_DELAY_MS);
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (previous_aicl_ma == aicl_ma) {
+ pr_smb(PR_STATUS, "AICL at %d\n", aicl_ma);
+ } else {
+ pr_smb(PR_STATUS,
+ "AICL changed [%d -> %d], recheck %d ms\n",
+ previous_aicl_ma, aicl_ma,
+ PARALLEL_CHARGER_EN_DELAY_MS);
+ goto recheck;
+ }
+
+ mutex_lock(&chip->parallel.lock);
+ in_progress = (chip->parallel.current_max_ma != 0);
+ if (smbchg_is_parallel_usb_ok(chip, &total_current_ma)) {
+ smbchg_parallel_usb_enable(chip, total_current_ma);
+ } else {
+ if (in_progress) {
+ pr_smb(PR_STATUS, "parallel charging unavailable\n");
+ smbchg_parallel_usb_disable(chip);
+ }
+ }
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_CHECK);
+ return;
+
+recheck:
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_CHECK);
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static int charging_suspend_vote_cb(struct votable *votable, void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_charging_en(chip, !suspend);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't configure batt chg: 0x%x rc = %d\n",
+ !suspend, rc);
+ }
+
+ return rc;
+}
+
+static int usb_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_usb_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if ((strcmp(client, THERMAL_EN_VOTER) == 0)
+ || (strcmp(client, POWER_SUPPLY_EN_VOTER) == 0)
+ || (strcmp(client, USER_EN_VOTER) == 0)
+ || (strcmp(client, FAKE_BATTERY_EN_VOTER) == 0))
+ smbchg_parallel_usb_check_ok(chip);
+
+ return rc;
+}
+
+static int dc_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_dc_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if (chip->dc_psy_type != -EINVAL && chip->dc_psy)
+ power_supply_changed(chip->dc_psy);
+
+ return rc;
+}
+
+static int set_fastchg_current_vote_cb(struct votable *votable,
+ void *data,
+ int fcc_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc;
+
+ if (fcc_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_fastchg_current_raw(chip, fcc_ma);
+ if (rc < 0) {
+ pr_err("Can't set FCC fcc_ma=%d rc=%d\n", fcc_ma, rc);
+ return rc;
+ }
+ }
+ /*
+ * check if parallel charging can be enabled, and if enabled,
+ * distribute the fcc
+ */
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_set_fastchg_current_user(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ pr_smb(PR_STATUS, "User setting FCC to %d\n", current_ma);
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote en rc %d\n", rc);
+ return rc;
+}
+
+static struct ilim_entry *smbchg_wipower_find_entry(struct smbchg_chip *chip,
+ struct ilim_map *map, int uv)
+{
+ int i;
+ struct ilim_entry *ret = &(chip->wipower_default.entries[0]);
+
+ for (i = 0; i < map->num; i++) {
+ if (is_between(map->entries[i].vmin_uv, map->entries[i].vmax_uv,
+ uv))
+ ret = &map->entries[i];
+ }
+ return ret;
+}
+
+#define ZIN_ICL_PT 0xFC
+#define ZIN_ICL_LV 0xFD
+#define ZIN_ICL_HV 0xFE
+#define ZIN_ICL_MASK SMB_MASK(4, 0)
+static int smbchg_dcin_ilim_config(struct smbchg_chip *chip, int offset, int ma)
+{
+ int i, rc;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0)
+ i = 0;
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + offset,
+ ZIN_ICL_MASK, i);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write bat if offset %d value = %d rc = %d\n",
+ offset, i, rc);
+ return rc;
+}
+
+static int smbchg_wipower_ilim_config(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc = 0;
+
+ if (chip->current_ilim.icl_pt_ma != ilim->icl_pt_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_PT, ilim->icl_pt_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_PT, ilim->icl_pt_ma, rc);
+ else
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ }
+
+ if (chip->current_ilim.icl_lv_ma != ilim->icl_lv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_LV, ilim->icl_lv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_LV, ilim->icl_lv_ma, rc);
+ else
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ }
+
+ if (chip->current_ilim.icl_hv_ma != ilim->icl_hv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_HV, ilim->icl_hv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_HV, ilim->icl_hv_ma, rc);
+ else
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ }
+ return rc;
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx);
+static int smbchg_wipower_dcin_btm_configure(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc;
+
+ if (ilim->vmin_uv == chip->current_ilim.vmin_uv
+ && ilim->vmax_uv == chip->current_ilim.vmax_uv)
+ return 0;
+
+ chip->param.channel = DCIN;
+ chip->param.btm_ctx = chip;
+ if (wipower_dcin_interval < ADC_MEAS1_INTERVAL_0MS)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_0MS;
+
+ if (wipower_dcin_interval > ADC_MEAS1_INTERVAL_16S)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_16S;
+
+ chip->param.timer_interval = wipower_dcin_interval;
+ chip->param.threshold_notification = &btm_notify_dcin;
+ chip->param.high_thr = ilim->vmax_uv + wipower_dcin_hyst_uv;
+ chip->param.low_thr = ilim->vmin_uv - wipower_dcin_hyst_uv;
+ chip->param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+ rc = qpnp_vadc_channel_monitor(chip->vadc_dev, &chip->param);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't configure btm for dcin rc = %d\n",
+ rc);
+ } else {
+ chip->current_ilim.vmin_uv = ilim->vmin_uv;
+ chip->current_ilim.vmax_uv = ilim->vmax_uv;
+ pr_smb(PR_STATUS, "btm ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ }
+ return rc;
+}
+
+static int smbchg_wipower_icl_configure(struct smbchg_chip *chip,
+ int dcin_uv, bool div2)
+{
+ int rc = 0;
+ struct ilim_map *map = div2 ? &chip->wipower_div2 : &chip->wipower_pt;
+ struct ilim_entry *ilim = smbchg_wipower_find_entry(chip, map, dcin_uv);
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config ilim rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+
+ rc = smbchg_wipower_dcin_btm_configure(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config btm rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+ chip->wipower_configured = true;
+ return 0;
+}
+
+static void smbchg_wipower_icl_deconfigure(struct smbchg_chip *chip)
+{
+ int rc;
+ struct ilim_entry *ilim = &(chip->wipower_default.entries[0]);
+
+ if (!chip->wipower_configured)
+ return;
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc)
+ dev_err(chip->dev, "Couldn't config default ilim rc = %d\n",
+ rc);
+
+ rc = qpnp_vadc_end_channel_monitor(chip->vadc_dev);
+ if (rc)
+ dev_err(chip->dev, "Couldn't de configure btm for dcin rc = %d\n",
+ rc);
+
+ chip->wipower_configured = false;
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ pr_smb(PR_WIPOWER, "De config btm\n");
+}
+
+#define FV_STS 0x0C
+#define DIV2_ACTIVE BIT(7)
+static void __smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ int chg_type;
+ bool usb_present, dc_present;
+ int rc;
+ int dcin_uv;
+ bool div2;
+ struct qpnp_vadc_result adc_result;
+ u8 reg;
+
+ if (!wipower_dyn_icl_en) {
+ smbchg_wipower_icl_deconfigure(chip);
+ return;
+ }
+
+ chg_type = get_prop_charge_type(chip);
+ usb_present = is_usb_present(chip);
+ dc_present = is_dc_present(chip);
+ if (chg_type != POWER_SUPPLY_CHARGE_TYPE_NONE
+ && !usb_present
+ && dc_present
+ && chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) {
+ rc = qpnp_vadc_read(chip->vadc_dev, DCIN, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ dcin_uv = adc_result.physical;
+
+ /* check div_by_2 */
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS, 1);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ div2 = !!(reg & DIV2_ACTIVE);
+
+ pr_smb(PR_WIPOWER,
+ "config ICL chg_type = %d usb = %d dc = %d dcin_uv(adc_code) = %d (0x%x) div2 = %d\n",
+ chg_type, usb_present, dc_present, dcin_uv,
+ adc_result.adc_code, div2);
+ smbchg_wipower_icl_configure(chip, dcin_uv, div2);
+ } else {
+ pr_smb(PR_WIPOWER,
+ "deconfig ICL chg_type = %d usb = %d dc = %d\n",
+ chg_type, usb_present, dc_present);
+ smbchg_wipower_icl_deconfigure(chip);
+ }
+}
+
+static void smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ if (!chip->wipower_dyn_icl_avail)
+ return;
+
+ mutex_lock(&chip->wipower_config);
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx)
+{
+ struct smbchg_chip *chip = ctx;
+
+ mutex_lock(&chip->wipower_config);
+ pr_smb(PR_WIPOWER, "%s state\n",
+ state == ADC_TM_LOW_STATE ? "low" : "high");
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static int force_dcin_icl_write(void *data, u64 val)
+{
+ struct smbchg_chip *chip = data;
+
+ smbchg_wipower_check(chip);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dcin_icl_ops, NULL,
+ force_dcin_icl_write, "0x%02llx\n");
+
+/*
+ * set the dc charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_dc_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ return smbchg_set_dc_current_max(chip, icl_ma);
+}
+
+/*
+ * set the usb charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_usb_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc, aicl_ma;
+ const char *effective_id;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+ effective_id = get_effective_client_locked(chip->usb_icl_votable);
+
+ if (!effective_id)
+ return 0;
+
+ /* disable parallel charging if HVDCP is voting for 300mA */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ smbchg_parallel_usb_disable(chip);
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_usb_current_max(chip, icl_ma);
+ if (rc) {
+ pr_err("Failed to set usb current max: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* skip the aicl rerun if hvdcp icl voter is active */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ return 0;
+
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (icl_ma > aicl_ma)
+ smbchg_rerun_aicl(chip);
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_system_temp_level_set(struct smbchg_chip *chip,
+ int lvl_sel)
+{
+ int rc = 0;
+ int prev_therm_lvl;
+ int thermal_icl_ma;
+
+ if (!chip->thermal_mitigation) {
+ dev_err(chip->dev, "Thermal mitigation not supported\n");
+ return -EINVAL;
+ }
+
+ if (lvl_sel < 0) {
+ dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel);
+ return -EINVAL;
+ }
+
+ if (lvl_sel >= chip->thermal_levels) {
+ dev_err(chip->dev, "Unsupported level selected %d forcing %d\n",
+ lvl_sel, chip->thermal_levels - 1);
+ lvl_sel = chip->thermal_levels - 1;
+ }
+
+ if (lvl_sel == chip->therm_lvl_sel)
+ return 0;
+
+ mutex_lock(&chip->therm_lvl_lock);
+ prev_therm_lvl = chip->therm_lvl_sel;
+ chip->therm_lvl_sel = lvl_sel;
+ if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+ /*
+ * Disable charging if highest value selected by
+ * setting the DC and USB path in suspend
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ goto out;
+ }
+
+ if (chip->therm_lvl_sel == 0) {
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable USB thermal ICL vote rc=%d\n",
+ rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable DC thermal ICL vote rc=%d\n",
+ rc);
+ } else {
+ thermal_icl_ma =
+ (int)chip->thermal_mitigation[chip->therm_lvl_sel];
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for USB thermal ICL rc=%d\n", rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for DC thermal ICL rc=%d\n", rc);
+ }
+
+ if (prev_therm_lvl == chip->thermal_levels - 1) {
+ /*
+ * If previously highest value was selected charging must have
+ * been disabed. Enable charging by taking the DC and USB path
+ * out of suspend.
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER,
+ false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&chip->therm_lvl_lock);
+ return rc;
+}
+
+static int smbchg_ibat_ocp_threshold_ua = 4500000;
+module_param(smbchg_ibat_ocp_threshold_ua, int, 0644);
+
+#define UCONV 1000000LL
+#define MCONV 1000LL
+#define FLASH_V_THRESHOLD 3000000
+#define FLASH_VDIP_MARGIN 100000
+#define VPH_FLASH_VDIP (FLASH_V_THRESHOLD + FLASH_VDIP_MARGIN)
+#define BUCK_EFFICIENCY 800LL
+static int smbchg_calc_max_flash_current(struct smbchg_chip *chip)
+{
+ int ocv_uv, esr_uohm, rbatt_uohm, ibat_now, rc;
+ int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+ int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE,
+ &esr_uohm);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support resistance\n");
+ return 0;
+ }
+
+ rc = msm_bcl_read(BCL_PARAM_CURRENT, &ibat_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "BCL current read failed: %d\n", rc);
+ return 0;
+ }
+
+ rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm;
+ /*
+ * Calculate the maximum current that can pulled out of the battery
+ * before the battery voltage dips below a safe threshold.
+ */
+ ibat_safe_ua = div_s64((ocv_uv - VPH_FLASH_VDIP) * UCONV,
+ rbatt_uohm);
+
+ if (ibat_safe_ua <= smbchg_ibat_ocp_threshold_ua) {
+ /*
+ * If the calculated current is below the OCP threshold, then
+ * use it as the possible flash current.
+ */
+ ibat_flash_ua = ibat_safe_ua - ibat_now;
+ vph_flash_uv = VPH_FLASH_VDIP;
+ } else {
+ /*
+ * If the calculated current is above the OCP threshold, then
+ * use the ocp threshold instead.
+ *
+ * Any higher current will be tripping the battery OCP.
+ */
+ ibat_flash_ua = smbchg_ibat_ocp_threshold_ua - ibat_now;
+ vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+ * smbchg_ibat_ocp_threshold_ua, UCONV);
+ }
+ /* Calculate the input voltage of the flash module. */
+ vin_flash_uv = max((chip->vled_max_uv + 500000LL),
+ div64_s64((vph_flash_uv * 1200), 1000));
+ /* Calculate the available power for the flash module. */
+ avail_flash_power_fw = BUCK_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+ /*
+ * Calculate the available amount of current the flash module can draw
+ * before collapsing the battery. (available power/ flash input voltage)
+ */
+ avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+ pr_smb(PR_MISC,
+ "avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d\n",
+ avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm);
+ return (int)avail_flash_ua;
+}
+
+#define FCC_CMP_CFG 0xF3
+#define FCC_COMP_MASK SMB_MASK(1, 0)
+static int smbchg_fastchg_current_comp_set(struct smbchg_chip *chip,
+ int comp_current)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < chip->tables.fcc_comp_len; i++)
+ if (comp_current == chip->tables.fcc_comp_table[i])
+ break;
+
+ if (i >= chip->tables.fcc_comp_len)
+ return -EINVAL;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CMP_CFG,
+ FCC_COMP_MASK, i);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define CFG_TCC_REG 0xF9
+#define CHG_ITERM_MASK SMB_MASK(2, 0)
+static int smbchg_iterm_set(struct smbchg_chip *chip, int iterm_ma)
+{
+ int rc;
+ u8 reg;
+
+ reg = find_closest_in_array(
+ chip->tables.iterm_ma_table,
+ chip->tables.iterm_ma_len,
+ iterm_ma);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CFG_TCC_REG,
+ CHG_ITERM_MASK, reg);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n",
+ iterm_ma, reg);
+ chip->iterm_ma = iterm_ma;
+
+ return 0;
+}
+
+#define FV_CMP_CFG 0xF5
+#define FV_COMP_MASK SMB_MASK(5, 0)
+static int smbchg_float_voltage_comp_set(struct smbchg_chip *chip, int code)
+{
+ int rc;
+ u8 val;
+
+ val = code & FV_COMP_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FV_CMP_CFG,
+ FV_COMP_MASK, val);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define VFLOAT_CFG_REG 0xF4
+#define MIN_FLOAT_MV 3600
+#define MAX_FLOAT_MV 4500
+#define VFLOAT_MASK SMB_MASK(5, 0)
+
+#define MID_RANGE_FLOAT_MV_MIN 3600
+#define MID_RANGE_FLOAT_MIN_VAL 0x05
+#define MID_RANGE_FLOAT_STEP_MV 20
+
+#define HIGH_RANGE_FLOAT_MIN_MV 4340
+#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV 10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV 4360
+#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2C
+#define VHIGH_RANGE_FLOAT_STEP_MV 20
+static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval prop;
+ int rc, delta;
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+ vfloat_mv);
+ return -EINVAL;
+ }
+
+ if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+ /* mid range */
+ delta = vfloat_mv - MID_RANGE_FLOAT_MV_MIN;
+ temp = MID_RANGE_FLOAT_MIN_VAL + delta
+ / MID_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % MID_RANGE_FLOAT_STEP_MV;
+ } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) {
+ /* high range */
+ delta = vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV;
+ temp = HIGH_RANGE_FLOAT_MIN_VAL + delta
+ / HIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % HIGH_RANGE_FLOAT_STEP_MV;
+ } else {
+ /* very high range */
+ delta = vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV;
+ temp = VHIGH_RANGE_FLOAT_MIN_VAL + delta
+ / VHIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % VHIGH_RANGE_FLOAT_STEP_MV;
+ }
+
+ if (parallel_psy) {
+ prop.intval = vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage on parallel psy rc: %d\n",
+ rc);
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG,
+ VFLOAT_MASK, temp);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc);
+ else
+ chip->vfloat_mv = vfloat_mv;
+
+ return rc;
+}
+
+static int smbchg_float_voltage_get(struct smbchg_chip *chip)
+{
+ return chip->vfloat_mv;
+}
+
+#define SFT_CFG 0xFD
+#define SFT_EN_MASK SMB_MASK(5, 4)
+#define SFT_TO_MASK SMB_MASK(3, 2)
+#define PRECHG_SFT_TO_MASK SMB_MASK(1, 0)
+#define SFT_TIMER_DISABLE_BIT BIT(5)
+#define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4)
+#define SAFETY_TIME_MINUTES_SHIFT 2
+static int smbchg_safety_timer_enable(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+ u8 reg;
+
+ if (enable == chip->safety_timer_en)
+ return 0;
+
+ if (enable)
+ reg = 0;
+ else
+ reg = SFT_TIMER_DISABLE_BIT | PRECHG_SFT_TIMER_DISABLE_BIT;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s safety timer rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ chip->safety_timer_en = enable;
+ return 0;
+}
+
+enum skip_reason {
+ REASON_OTG_ENABLED = BIT(0),
+ REASON_FLASH_ENABLED = BIT(1)
+};
+
+#define BAT_IF_TRIM7_REG 0xF7
+#define CFG_750KHZ_BIT BIT(1)
+#define MISC_CFG_NTC_VOUT_REG 0xF3
+#define CFG_NTC_VOUT_FSW_BIT BIT(0)
+static int smbchg_switch_buck_frequency(struct smbchg_chip *chip,
+ bool flash_active)
+{
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA))
+ return 0;
+
+ if (chip->flash_active == flash_active) {
+ pr_smb(PR_STATUS, "Fsw not changed, flash_active: %d\n",
+ flash_active);
+ return 0;
+ }
+
+ /*
+ * As per the systems team recommendation, before the flash fires,
+ * buck switching frequency(Fsw) needs to be increased to 1MHz. Once the
+ * flash is disabled, Fsw needs to be set back to 750KHz.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->misc_base +
+ MISC_CFG_NTC_VOUT_REG, CFG_NTC_VOUT_FSW_BIT,
+ flash_active ? CFG_NTC_VOUT_FSW_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set switching frequency multiplier rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, flash_active ? 0 : CFG_750KHZ_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Fsw @ %sHz\n", flash_active ? "1M" : "750K");
+ chip->flash_active = flash_active;
+ return 0;
+}
+
+#define OTG_TRIM6 0xF6
+#define TR_ENB_SKIP_BIT BIT(2)
+#define OTG_EN_BIT BIT(0)
+static int smbchg_otg_pulse_skip_disable(struct smbchg_chip *chip,
+ enum skip_reason reason, bool disable)
+{
+ int rc;
+ bool disabled;
+
+ disabled = !!chip->otg_pulse_skip_dis;
+ pr_smb(PR_STATUS, "%s pulse skip, reason %d\n",
+ disable ? "disabling" : "enabling", reason);
+ if (disable)
+ chip->otg_pulse_skip_dis |= reason;
+ else
+ chip->otg_pulse_skip_dis &= ~reason;
+ if (disabled == !!chip->otg_pulse_skip_dis)
+ return 0;
+ disabled = !!chip->otg_pulse_skip_dis;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_TRIM6,
+ TR_ENB_SKIP_BIT, disabled ? TR_ENB_SKIP_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg pulse skip rc = %d\n",
+ disabled ? "disable" : "enable", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "%s pulse skip\n", disabled ? "disabled" : "enabled");
+ return 0;
+}
+
+#define LOW_PWR_OPTIONS_REG 0xFF
+#define FORCE_TLIM_BIT BIT(4)
+static int smbchg_force_tlim_en(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + LOW_PWR_OPTIONS_REG,
+ FORCE_TLIM_BIT, enable ? FORCE_TLIM_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg force tlim rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_vfloat_adjust_check(struct smbchg_chip *chip)
+{
+ if (!chip->use_vfloat_adjustments)
+ return;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ pr_smb(PR_STATUS, "Starting vfloat adjustments\n");
+ schedule_delayed_work(&chip->vfloat_adjust_work, 0);
+}
+
+#define FV_STS_REG 0xC
+#define AICL_INPUT_STS_BIT BIT(6)
+static bool smbchg_is_input_current_limited(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read FV_STS rc=%d\n", rc);
+ return false;
+ }
+
+ return !!(reg & AICL_INPUT_STS_BIT);
+}
+
+#define SW_ESR_PULSE_MS 1500
+static void smbchg_cc_esr_wa_check(struct smbchg_chip *chip)
+{
+ int rc, esr_count;
+
+ if (!(chip->wa_flags & SMBCHG_CC_ESR_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "No inputs present, skipping\n");
+ return;
+ }
+
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return;
+ }
+
+ if (!smbchg_is_input_current_limited(chip)) {
+ pr_smb(PR_STATUS, "Not input current limited, skipping\n");
+ return;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_ESR_COUNT, &esr_count);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "could not read ESR counter rc = %d\n", rc);
+ return;
+ }
+
+ /*
+ * The esr_count is counting down the number of fuel gauge cycles
+ * before a ESR pulse is needed.
+ *
+ * After a successful ESR pulse, this count is reset to some
+ * high number like 28. If this reaches 0, then the fuel gauge
+ * hardware should force a ESR pulse.
+ *
+ * However, if the device is in constant current charge mode while
+ * being input current limited, the ESR pulse will not affect the
+ * battery current, so the measurement will fail.
+ *
+ * As a failsafe, force a manual ESR pulse if this value is read as
+ * 0.
+ */
+ if (esr_count != 0) {
+ pr_smb(PR_STATUS, "ESR count is not zero, skipping\n");
+ return;
+ }
+
+ pr_smb(PR_STATUS, "Lowering charge current for ESR pulse\n");
+ smbchg_stay_awake(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, true);
+ msleep(SW_ESR_PULSE_MS);
+ pr_smb(PR_STATUS, "Raising charge current for ESR pulse\n");
+ smbchg_relax(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, false);
+}
+
+static void smbchg_soc_changed(struct smbchg_chip *chip)
+{
+ smbchg_cc_esr_wa_check(chip);
+}
+
+#define DC_AICL_CFG 0xF3
+#define MISC_TRIM_OPT_15_8 0xF5
+#define USB_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_LONG 0
+#define DC_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_LONG 0
+#define AICL_RERUN_MASK (BIT(5) | BIT(4))
+#define AICL_RERUN_ON (BIT(5) | BIT(4))
+#define AICL_RERUN_OFF 0
+
+static int smbchg_hw_aicl_rerun_enable_indirect_cb(struct votable *votable,
+ void *data,
+ int enable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (enable < 0) {
+ pr_err("No voters\n");
+ enable = 0;
+ }
+ /*
+ * If the indirect voting result of all the clients is to enable hw aicl
+ * rerun, then remove our vote to disable hw aicl rerun
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ HW_AICL_RERUN_ENABLE_INDIRECT_VOTER, !enable, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote for hw rerun rc= %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smbchg_hw_aicl_rerun_disable_cb(struct votable *votable, void *data,
+ int disable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (disable < 0) {
+ pr_err("No voters\n");
+ disable = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8,
+ AICL_RERUN_MASK, disable ? AICL_RERUN_OFF : AICL_RERUN_ON);
+ if (rc < 0)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_aicl_deglitch_config_cb(struct votable *votable, void *data,
+ int shorter,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (shorter < 0) {
+ pr_err("No voters\n");
+ shorter = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USB_AICL_CFG,
+ USB_AICL_DEGLITCH_MASK,
+ shorter ? USB_AICL_DEGLITCH_SHORT : USB_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to USB_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + DC_AICL_CFG,
+ DC_AICL_DEGLITCH_MASK,
+ shorter ? DC_AICL_DEGLITCH_SHORT : DC_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to DC_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_aicl_deglitch_wa_en(struct smbchg_chip *chip, bool en)
+{
+ int rc;
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote %s deglitch rc=%d\n",
+ en ? "short" : "long", rc);
+ return;
+ }
+ pr_smb(PR_STATUS, "AICL deglitch set to %s\n", en ? "short" : "long");
+
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote hw aicl rerun rc= %d\n", rc);
+ return;
+ }
+ chip->aicl_deglitch_short = en;
+}
+
+static void smbchg_aicl_deglitch_wa_check(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ int rc;
+ bool low_volt_chgr = true;
+
+ if (!(chip->wa_flags & SMBCHG_AICL_DEGLITCH_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "Charger removed\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ if (!chip->bms_psy)
+ return;
+
+ if (is_usb_present(chip)) {
+ if (is_hvdcp_present(chip))
+ low_volt_chgr = false;
+ } else if (is_dc_present(chip)) {
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ low_volt_chgr = false;
+ else
+ low_volt_chgr = chip->low_volt_dcin;
+ }
+
+ if (!low_volt_chgr) {
+ pr_smb(PR_STATUS, "High volt charger! Don't set deglitch\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ /* It is possible that battery voltage went high above threshold
+ * when the charger is inserted and can go low because of system
+ * load. We shouldn't be reconfiguring AICL deglitch when this
+ * happens as it will lead to oscillation again which is being
+ * fixed here. Do it once when the battery voltage crosses the
+ * threshold (e.g. 4.2 V) and clear it only when the charger
+ * is removed.
+ */
+ if (!chip->vbat_above_headroom) {
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN, &prop);
+ if (rc < 0) {
+ pr_err("could not read voltage_min, rc=%d\n", rc);
+ return;
+ }
+ chip->vbat_above_headroom = !prop.intval;
+ }
+ smbchg_aicl_deglitch_wa_en(chip, chip->vbat_above_headroom);
+}
+
+#define MISC_TEST_REG 0xE2
+#define BB_LOOP_DISABLE_ICL BIT(2)
+static int smbchg_icl_loop_disable_check(struct smbchg_chip *chip)
+{
+ bool icl_disabled = !chip->chg_otg_enabled && chip->flash_triggered;
+ int rc = 0;
+
+ if ((chip->wa_flags & SMBCHG_FLASH_ICL_DISABLE_WA)
+ && icl_disabled != chip->icl_disabled) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TEST_REG,
+ BB_LOOP_DISABLE_ICL,
+ icl_disabled ? BB_LOOP_DISABLE_ICL : 0);
+ chip->icl_disabled = icl_disabled;
+ }
+
+ return rc;
+}
+
+#define UNKNOWN_BATT_TYPE "Unknown Battery"
+#define LOADING_BATT_TYPE "Loading Battery Data"
+static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
+{
+ int rc = 0, max_voltage_uv = 0, fastchg_ma = 0, ret = 0, iterm_ua = 0;
+ struct device_node *batt_node, *profile_node;
+ struct device_node *node = chip->pdev->dev.of_node;
+ union power_supply_propval prop = {0,};
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ if (rc) {
+ pr_smb(PR_STATUS, "Unable to read battery-type rc=%d\n", rc);
+ return 0;
+ }
+ if (!strcmp(prop.strval, UNKNOWN_BATT_TYPE) ||
+ !strcmp(prop.strval, LOADING_BATT_TYPE)) {
+ pr_smb(PR_MISC, "Battery-type not identified\n");
+ return 0;
+ }
+ /* quit if there is no change in the battery-type from previous */
+ if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
+ return 0;
+
+ chip->battery_type = prop.strval;
+ batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
+ if (!batt_node) {
+ pr_smb(PR_MISC, "No batterydata available\n");
+ return 0;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ if (rc < 0) {
+ pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc);
+ return 0;
+ }
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ prop.intval / 1000, NULL);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ return rc;
+ }
+
+ /* change vfloat */
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_voltage_uv);
+ if (rc) {
+ pr_warn("couldn't find battery max voltage rc=%d\n", rc);
+ ret = rc;
+ } else {
+ if (chip->vfloat_mv != (max_voltage_uv / 1000)) {
+ pr_info("Vfloat changed from %dmV to %dmV for battery-type %s\n",
+ chip->vfloat_mv, (max_voltage_uv / 1000),
+ chip->battery_type);
+ rc = smbchg_float_voltage_set(chip,
+ (max_voltage_uv / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+
+ /* change chg term */
+ rc = of_property_read_u32(profile_node, "qcom,chg-term-ua",
+ &iterm_ua);
+ if (rc && rc != -EINVAL) {
+ pr_warn("couldn't read battery term current=%d\n", rc);
+ ret = rc;
+ } else if (!rc) {
+ if (chip->iterm_ma != (iterm_ua / 1000)
+ && !chip->iterm_disabled) {
+ pr_info("Term current changed from %dmA to %dmA for battery-type %s\n",
+ chip->iterm_ma, (iterm_ua / 1000),
+ chip->battery_type);
+ rc = smbchg_iterm_set(chip,
+ (iterm_ua / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+ chip->iterm_ma = iterm_ua / 1000;
+ }
+
+ /*
+ * Only configure from profile if fastchg-ma is not defined in the
+ * charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fastchg-current-ma", NULL)) {
+ rc = of_property_read_u32(profile_node,
+ "qcom,fastchg-current-ma", &fastchg_ma);
+ if (rc) {
+ ret = rc;
+ } else {
+ pr_smb(PR_MISC,
+ "fastchg-ma changed from to %dma for battery-type %s\n",
+ fastchg_ma, chip->battery_type);
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ fastchg_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for fastchg current rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return ret;
+}
+
+#define MAX_INV_BATT_ID 7700
+#define MIN_INV_BATT_ID 7300
+static void check_battery_type(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ bool en;
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (chip->bms_psy) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ en = (strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0
+ || chip->charge_unknown_battery)
+ && (strcmp(prop.strval, LOADING_BATT_TYPE) != 0);
+ vote(chip->battchg_suspend_votable,
+ BATTCHG_UNKNOWN_BATTERY_EN_VOTER, !en, 0);
+
+ if (!chip->skip_usb_suspend_for_fake_battery) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ /* suspend USB path for invalid battery-id */
+ en = (prop.intval <= MAX_INV_BATT_ID &&
+ prop.intval >= MIN_INV_BATT_ID) ? 1 : 0;
+ vote(chip->usb_suspend_votable, FAKE_BATTERY_EN_VOTER,
+ en, 0);
+ }
+ }
+}
+
+static void smbchg_external_power_changed(struct power_supply *psy)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0, soc;
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name = "null";
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->bms_psy) {
+ check_battery_type(chip);
+ soc = get_prop_batt_capacity(chip);
+ if (chip->previous_soc != soc) {
+ chip->previous_soc = soc;
+ smbchg_soc_changed(chip);
+ }
+
+ rc = smbchg_config_chg_battery_type(chip);
+ if (rc)
+ pr_smb(PR_MISC,
+ "Couldn't update charger configuration rc=%d\n",
+ rc);
+ }
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc == 0)
+ vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !prop.intval, 0);
+
+ current_limit = chip->usb_current_max / 1000;
+
+ /* Override if type-c charger used */
+ if (chip->typec_current_ma > 500 &&
+ current_limit < chip->typec_current_ma)
+ current_limit = chip->typec_current_ma;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB)
+ goto skip_current_for_non_sdp;
+
+ pr_smb(PR_MISC, "usb type = %s current_limit = %d\n",
+ usb_type_name, current_limit);
+
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit);
+ if (rc < 0)
+ pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc);
+
+skip_current_for_non_sdp:
+ smbchg_vfloat_adjust_check(chip);
+
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+}
+
+static int smbchg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ chip->otg_retries = 0;
+ chip->chg_otg_enabled = true;
+ smbchg_icl_loop_disable_check(chip);
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, true);
+
+ /* If pin control mode then return from here */
+ if (chip->otg_pinctrl)
+ return rc;
+
+ /* sleep to make sure the pulse skip is actually disabled */
+ msleep(20);
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc);
+ else
+ chip->otg_enable_time = ktime_get();
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ if (!chip->otg_pinctrl) {
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+ rc);
+ }
+
+ chip->chg_otg_enabled = false;
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, false);
+ smbchg_icl_loop_disable_check(chip);
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + CMD_CHG_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smbchg_otg_reg_ops = {
+ .enable = smbchg_otg_regulator_enable,
+ .disable = smbchg_otg_regulator_disable,
+ .is_enabled = smbchg_otg_regulator_is_enable,
+};
+
+#define USBIN_CHGR_CFG 0xF1
+#define ADAPTER_ALLOWANCE_MASK 0x7
+#define USBIN_ADAPTER_9V 0x3
+#define USBIN_ADAPTER_5V_9V_CONT 0x2
+#define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5
+#define HVDCP_EN_BIT BIT(3)
+static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * To disallow source detect and usbin_uv interrupts, set the adapter
+ * allowance to 9V, so that the audio boost operating in reverse never
+ * gets detected as a valid input
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Reenable HVDCP and set the adapter allowance back to the original
+ * value in order to allow normal USBs to be recognized as a valid
+ * input.
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, chip->original_usbin_allowance);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ return get_client_vote(chip->usb_suspend_votable, OTG_EN_VOTER);
+}
+
+struct regulator_ops smbchg_external_otg_reg_ops = {
+ .enable = smbchg_external_otg_regulator_enable,
+ .disable = smbchg_external_otg_regulator_disable,
+ .is_enabled = smbchg_external_otg_regulator_is_enable,
+};
+
+static int smbchg_regulator_init(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct regulator_config cfg = {};
+ struct device_node *regulator_node;
+
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops;
+ chip->otg_vreg.rdesc.of_match = "qcom,smbcharger-boost-otg";
+ chip->otg_vreg.rdesc.name = "qcom,smbcharger-boost-otg";
+
+ chip->otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "OTG reg failed, rc=%d\n", rc);
+ }
+ if (rc)
+ return rc;
+
+ regulator_node = of_get_child_by_name(chip->dev->of_node,
+ "qcom,smbcharger-external-otg");
+ if (!regulator_node) {
+ dev_dbg(chip->dev, "external-otg node absent\n");
+ return 0;
+ }
+
+ chip->ext_otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops;
+ chip->ext_otg_vreg.rdesc.of_match = "qcom,smbcharger-external-otg";
+ chip->ext_otg_vreg.rdesc.name = "qcom,smbcharger-external-otg";
+ if (of_get_property(chip->dev->of_node, "otg-parent-supply", NULL))
+ chip->ext_otg_vreg.rdesc.supply_name = "otg-parent";
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->ext_otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->ext_otg_vreg.rdesc,
+ &cfg);
+ if (IS_ERR(chip->ext_otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->ext_otg_vreg.rdev);
+ chip->ext_otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "external OTG reg failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+#define CMD_CHG_LED_REG 0x43
+#define CHG_LED_CTRL_BIT BIT(0)
+#define LED_SW_CTRL_BIT 0x1
+#define LED_CHG_CTRL_BIT 0x0
+#define CHG_LED_ON 0x03
+#define CHG_LED_OFF 0x00
+#define LED_BLINKING_PATTERN1 0x01
+#define LED_BLINKING_PATTERN2 0x02
+#define LED_BLINKING_CFG_MASK SMB_MASK(2, 1)
+#define CHG_LED_SHIFT 1
+static int smbchg_chg_led_controls(struct smbchg_chip *chip)
+{
+ u8 reg, mask;
+ int rc;
+
+ if (chip->cfg_chg_led_sw_ctrl) {
+ /* turn-off LED by default for software control */
+ mask = CHG_LED_CTRL_BIT | LED_BLINKING_CFG_MASK;
+ reg = LED_SW_CTRL_BIT;
+ } else {
+ mask = CHG_LED_CTRL_BIT;
+ reg = LED_CHG_CTRL_BIT;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_LED_REG,
+ mask, reg);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't write LED_CTRL_BIT rc=%d\n", rc);
+ return rc;
+}
+
+static void smbchg_chg_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT :
+ CHG_LED_OFF << CHG_LED_SHIFT;
+ pval.intval = value > LED_OFF ? 1 : 0;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+ pr_smb(PR_STATUS,
+ "set the charger led brightness to value=%d\n",
+ value);
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static enum
+led_brightness smbchg_chg_led_brightness_get(struct led_classdev *cdev)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ u8 reg_val, chg_led_sts;
+ int rc;
+
+ rc = smbchg_read(chip, &reg_val, chip->bat_if_base + CMD_CHG_LED_REG,
+ 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read CHG_LED_REG sts rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg_led_sts = (reg_val & LED_BLINKING_CFG_MASK) >> CHG_LED_SHIFT;
+
+ pr_smb(PR_STATUS, "chg_led_sts = %02x\n", chg_led_sts);
+
+ return (chg_led_sts == CHG_LED_OFF) ? LED_OFF : LED_FULL;
+}
+
+static void smbchg_chg_led_blink_set(struct smbchg_chip *chip,
+ unsigned long blinking)
+{
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ pval.intval = (blinking == 0) ? 0 : 1;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+
+ if (blinking == 0) {
+ reg = CHG_LED_OFF << CHG_LED_SHIFT;
+ } else {
+ if (blinking == 1)
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ else if (blinking == 2)
+ reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT;
+ else
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static ssize_t smbchg_chg_led_blink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct smbchg_chip *chip = container_of(cdev, struct smbchg_chip,
+ led_cdev);
+ unsigned long blinking;
+ ssize_t rc = -EINVAL;
+
+ rc = kstrtoul(buf, 10, &blinking);
+ if (rc)
+ return rc;
+
+ smbchg_chg_led_blink_set(chip, blinking);
+
+ return len;
+}
+
+static DEVICE_ATTR(blink, 0664, NULL, smbchg_chg_led_blink_store);
+
+static struct attribute *led_blink_attributes[] = {
+ &dev_attr_blink.attr,
+ NULL,
+};
+
+static struct attribute_group smbchg_led_attr_group = {
+ .attrs = led_blink_attributes
+};
+
+static int smbchg_register_chg_led(struct smbchg_chip *chip)
+{
+ int rc;
+
+ chip->led_cdev.name = "red";
+ chip->led_cdev.brightness_set = smbchg_chg_led_brightness_set;
+ chip->led_cdev.brightness_get = smbchg_chg_led_brightness_get;
+
+ rc = led_classdev_register(chip->dev, &chip->led_cdev);
+ if (rc) {
+ dev_err(chip->dev, "unable to register charger led, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = sysfs_create_group(&chip->led_cdev.dev->kobj,
+ &smbchg_led_attr_group);
+ if (rc) {
+ dev_err(chip->dev, "led sysfs rc: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int vf_adjust_low_threshold = 5;
+module_param(vf_adjust_low_threshold, int, 0644);
+
+static int vf_adjust_high_threshold = 7;
+module_param(vf_adjust_high_threshold, int, 0644);
+
+static int vf_adjust_n_samples = 10;
+module_param(vf_adjust_n_samples, int, 0644);
+
+static int vf_adjust_max_delta_mv = 40;
+module_param(vf_adjust_max_delta_mv, int, 0644);
+
+static int vf_adjust_trim_steps_per_adjust = 1;
+module_param(vf_adjust_trim_steps_per_adjust, int, 0644);
+
+#define CENTER_TRIM_CODE 7
+#define MAX_LIN_CODE 14
+#define MAX_TRIM_CODE 15
+#define SCALE_SHIFT 4
+#define VF_TRIM_OFFSET_MASK SMB_MASK(3, 0)
+#define VF_STEP_SIZE_MV 10
+#define SCALE_LSB_MV 17
+static int smbchg_trim_add_steps(int prev_trim, int delta_steps)
+{
+ int scale_steps;
+ int linear_offset, linear_scale;
+ int offset_code = prev_trim & VF_TRIM_OFFSET_MASK;
+ int scale_code = (prev_trim & ~VF_TRIM_OFFSET_MASK) >> SCALE_SHIFT;
+
+ if (abs(delta_steps) > 1) {
+ pr_smb(PR_STATUS,
+ "Cant trim multiple steps delta_steps = %d\n",
+ delta_steps);
+ return prev_trim;
+ }
+ if (offset_code <= CENTER_TRIM_CODE)
+ linear_offset = offset_code + CENTER_TRIM_CODE;
+ else if (offset_code > CENTER_TRIM_CODE)
+ linear_offset = MAX_TRIM_CODE - offset_code;
+
+ if (scale_code <= CENTER_TRIM_CODE)
+ linear_scale = scale_code + CENTER_TRIM_CODE;
+ else if (scale_code > CENTER_TRIM_CODE)
+ linear_scale = scale_code - (CENTER_TRIM_CODE + 1);
+
+ /* check if we can accomodate delta steps with just the offset */
+ if (linear_offset + delta_steps >= 0
+ && linear_offset + delta_steps <= MAX_LIN_CODE) {
+ linear_offset += delta_steps;
+
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+ }
+
+ /* changing offset cannot satisfy delta steps, change the scale bits */
+ scale_steps = delta_steps > 0 ? 1 : -1;
+
+ if (linear_scale + scale_steps < 0
+ || linear_scale + scale_steps > MAX_LIN_CODE) {
+ pr_smb(PR_STATUS,
+ "Cant trim scale_steps = %d delta_steps = %d\n",
+ scale_steps, delta_steps);
+ return prev_trim;
+ }
+
+ linear_scale += scale_steps;
+
+ if (linear_scale > CENTER_TRIM_CODE)
+ scale_code = linear_scale - CENTER_TRIM_CODE;
+ else
+ scale_code = linear_scale + (CENTER_TRIM_CODE + 1);
+ prev_trim = (prev_trim & VF_TRIM_OFFSET_MASK)
+ | scale_code << SCALE_SHIFT;
+
+ /*
+ * now that we have changed scale which is a 17mV jump, change the
+ * offset bits (10mV) too so the effective change is just 7mV
+ */
+ delta_steps = -1 * delta_steps;
+
+ linear_offset = clamp(linear_offset + delta_steps, 0, MAX_LIN_CODE);
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+}
+
+#define TRIM_14 0xFE
+#define VF_TRIM_MASK 0xFF
+static int smbchg_adjust_vfloat_mv_trim(struct smbchg_chip *chip,
+ int delta_mv)
+{
+ int sign, delta_steps, rc = 0;
+ u8 prev_trim, new_trim;
+ int i;
+
+ sign = delta_mv > 0 ? 1 : -1;
+ delta_steps = (delta_mv + sign * VF_STEP_SIZE_MV / 2)
+ / VF_STEP_SIZE_MV;
+
+ rc = smbchg_read(chip, &prev_trim, chip->misc_base + TRIM_14, 1);
+ if (rc) {
+ dev_err(chip->dev, "Unable to read trim 14: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 1; i <= abs(delta_steps)
+ && i <= vf_adjust_trim_steps_per_adjust; i++) {
+ new_trim = (u8)smbchg_trim_add_steps(prev_trim,
+ delta_steps > 0 ? 1 : -1);
+ if (new_trim == prev_trim) {
+ pr_smb(PR_STATUS,
+ "VFloat trim unchanged from %02x\n", prev_trim);
+ /* treat no trim change as an error */
+ return -EINVAL;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_14,
+ VF_TRIM_MASK, new_trim);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't change vfloat trim rc=%d\n", rc);
+ }
+ pr_smb(PR_STATUS,
+ "VFlt trim %02x to %02x, delta steps: %d\n",
+ prev_trim, new_trim, delta_steps);
+ prev_trim = new_trim;
+ }
+
+ return rc;
+}
+
+#define VFLOAT_RESAMPLE_DELAY_MS 10000
+static void smbchg_vfloat_adjust_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ vfloat_adjust_work.work);
+ int vbat_uv, vbat_mv, ibat_ua, rc, delta_vfloat_mv;
+ bool taper, enable;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ taper = (get_prop_charge_type(chip)
+ == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+ enable = taper && (chip->parallel.current_max_ma == 0);
+
+ if (!enable) {
+ pr_smb(PR_MISC,
+ "Stopping vfloat adj taper=%d parallel_ma = %d\n",
+ taper, chip->parallel.current_max_ma);
+ goto stop;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ goto stop;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &vbat_uv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support voltage rc = %d\n", rc);
+ goto stop;
+ }
+ vbat_mv = vbat_uv / 1000;
+
+ if ((vbat_mv - chip->vfloat_mv) < -1 * vf_adjust_max_delta_mv) {
+ pr_smb(PR_STATUS, "Skip vbat out of range: %d\n", vbat_mv);
+ goto reschedule;
+ }
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_CURRENT_NOW, &ibat_ua);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support current_now rc = %d\n", rc);
+ goto stop;
+ }
+
+ if (ibat_ua / 1000 > -chip->iterm_ma) {
+ pr_smb(PR_STATUS, "Skip ibat too high: %d\n", ibat_ua);
+ goto reschedule;
+ }
+
+ pr_smb(PR_STATUS, "sample number = %d vbat_mv = %d ibat_ua = %d\n",
+ chip->n_vbat_samples,
+ vbat_mv,
+ ibat_ua);
+
+ chip->max_vbat_sample = max(chip->max_vbat_sample, vbat_mv);
+ chip->n_vbat_samples += 1;
+ if (chip->n_vbat_samples < vf_adjust_n_samples) {
+ pr_smb(PR_STATUS, "Skip %d samples; max = %d\n",
+ chip->n_vbat_samples, chip->max_vbat_sample);
+ goto reschedule;
+ }
+ /* if max vbat > target vfloat, delta_vfloat_mv could be negative */
+ delta_vfloat_mv = chip->vfloat_mv - chip->max_vbat_sample;
+ pr_smb(PR_STATUS, "delta_vfloat_mv = %d, samples = %d, mvbat = %d\n",
+ delta_vfloat_mv, chip->n_vbat_samples, chip->max_vbat_sample);
+ /*
+ * enough valid samples has been collected, adjust trim codes
+ * based on maximum of collected vbat samples if necessary
+ */
+ if (delta_vfloat_mv > vf_adjust_high_threshold
+ || delta_vfloat_mv < -1 * vf_adjust_low_threshold) {
+ rc = smbchg_adjust_vfloat_mv_trim(chip, delta_vfloat_mv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "Stopping vfloat adj after trim adj rc = %d\n",
+ rc);
+ goto stop;
+ }
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ goto reschedule;
+ }
+
+stop:
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ smbchg_relax(chip, PM_REASON_VFLOAT_ADJUST);
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->vfloat_adjust_work,
+ msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS));
+ return;
+}
+
+static int smbchg_charging_status_change(struct smbchg_chip *chip)
+{
+ smbchg_vfloat_adjust_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ return 0;
+}
+
+#define BB_CLMP_SEL 0xF8
+#define BB_CLMP_MASK SMB_MASK(1, 0)
+#define BB_CLMP_VFIX_3338MV 0x1
+#define BB_CLMP_VFIX_3512MV 0x2
+static int smbchg_set_optimal_charging_mode(struct smbchg_chip *chip, int type)
+{
+ int rc;
+ bool hvdcp2 = (type == POWER_SUPPLY_TYPE_USB_HVDCP
+ && smbchg_is_usbin_active_pwr_src(chip));
+
+ /*
+ * Set the charger switching freq to 1MHZ if HVDCP 2.0,
+ * or 750KHZ otherwise
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, hvdcp2 ? 0 : CFG_750KHZ_BIT);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Set the charger switch frequency clamp voltage threshold to 3.338V
+ * if HVDCP 2.0, or 3.512V otherwise.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BB_CLMP_SEL,
+ BB_CLMP_MASK,
+ hvdcp2 ? BB_CLMP_VFIX_3338MV : BB_CLMP_VFIX_3512MV);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define DEFAULT_SDP_MA 100
+#define DEFAULT_CDP_MA 1500
+static int smbchg_change_usb_supply_type(struct smbchg_chip *chip,
+ enum power_supply_type type)
+{
+ int rc, current_limit_ma;
+
+ /*
+ * if the type is not unknown, set the type before changing ICL vote
+ * in order to ensure that the correct current limit registers are
+ * used
+ */
+ if (type != POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ /*
+ * Type-C only supports STD(900), MEDIUM(1500) and HIGH(3000) current
+ * modes, skip all BC 1.2 current if external typec is supported.
+ * Note: for SDP supporting current based on USB notifications.
+ */
+ if (chip->typec_psy && (type != POWER_SUPPLY_TYPE_USB))
+ current_limit_ma = chip->typec_current_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB)
+ current_limit_ma = DEFAULT_SDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_CDP)
+ current_limit_ma = DEFAULT_CDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP)
+ current_limit_ma = smbchg_default_hvdcp_icl_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+ current_limit_ma = smbchg_default_hvdcp3_icl_ma;
+ else
+ current_limit_ma = smbchg_default_dcp_icl_ma;
+
+ pr_smb(PR_STATUS, "Type %d: setting mA = %d\n",
+ type, current_limit_ma);
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit_ma);
+ if (rc < 0) {
+ pr_err("Couldn't vote for new USB ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ /* otherwise if it is unknown, set type after the vote */
+ if (type == POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ if (!chip->skip_usb_notification)
+ power_supply_changed(chip->usb_psy);
+
+ /* set the correct buck switching frequency */
+ rc = smbchg_set_optimal_charging_mode(chip, type);
+ if (rc < 0)
+ pr_err("Couldn't set charger optimal mode rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_5V 0x00
+#define HVDCP_9V 0x10
+#define USB_CMD_HVDCP_1 0x42
+#define FORCE_HVDCP_2p0 BIT(3)
+
+static int force_9v_hvdcp(struct smbchg_chip *chip)
+{
+ int rc;
+
+ /* Force 5V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Force QC2.0 */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, FORCE_HVDCP_2p0);
+ rc |= smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, 0);
+ if (rc < 0) {
+ pr_err("Couldn't force QC2.0 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Delay to switch into HVDCP 2.0 and avoid UV */
+ msleep(500);
+
+ /* Force 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc)
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+
+ return rc;
+}
+
+static void smbchg_hvdcp_det_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ hvdcp_det_work.work);
+ int rc;
+
+ if (is_hvdcp_present(chip)) {
+ if (!chip->hvdcp3_supported &&
+ (chip->wa_flags & SMBCHG_HVDCP_9V_EN_WA)) {
+ /* force HVDCP 2.0 */
+ rc = force_9v_hvdcp(chip);
+ if (rc)
+ pr_err("could not force 9V HVDCP continuing rc=%d\n",
+ rc);
+ }
+ smbchg_change_usb_supply_type(chip,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_aicl_deglitch_wa_check(chip);
+ }
+ smbchg_relax(chip, PM_DETECT_HVDCP);
+}
+
+static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state)
+{
+ int rc;
+ u8 reg;
+ union power_supply_propval pval = {0, };
+
+ /*
+ * ensure that we are not in the middle of an insertion where usbin_uv
+ * is low and src_detect hasnt gone high. If so force dp=F dm=F
+ * which guarantees proper type detection
+ */
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "overwriting state = %d with %d\n",
+ state, POWER_SUPPLY_DP_DM_DPF_DMF);
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ return regulator_enable(chip->dpdm_reg);
+ }
+ pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
+ pval.intval = state;
+ return power_supply_set_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_DP_DM, &pval);
+}
+
+#define APSD_CFG 0xF5
+#define AUTO_SRC_DETECT_EN_BIT BIT(0)
+#define APSD_TIMEOUT_MS 1500
+static void restore_from_hvdcp_detection(struct smbchg_chip *chip)
+{
+ int rc;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ /* switch to 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0)
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+
+ /* enable HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+
+ /* enable APSD */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+
+ /* Reset back to 5V unregulated */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK, USBIN_ADAPTER_5V_UNREGULATED_9V);
+ if (rc < 0)
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ chip->pulse_cnt = 0;
+}
+
+#define RESTRICTED_CHG_FCC_PERCENT 50
+static int smbchg_restricted_charging(struct smbchg_chip *chip, bool enable)
+{
+ int current_table_index, fastchg_current;
+ int rc = 0;
+
+ /* If enable, set the fcc to the set point closest
+ * to 50% of the configured fcc while remaining below it
+ */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ chip->cfg_fastchg_current_ma
+ * RESTRICTED_CHG_FCC_PERCENT / 100,
+ chip->tables.usb_ilim_ma_len);
+ fastchg_current =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ rc = vote(chip->fcc_votable, RESTRICTED_CHG_FCC_VOTER, enable,
+ fastchg_current);
+
+ pr_smb(PR_STATUS, "restricted_charging set to %d\n", enable);
+ chip->restricted_charging = enable;
+
+ return rc;
+}
+
+static void handle_usb_removal(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ pr_smb(PR_STATUS, "triggered\n");
+ smbchg_aicl_deglitch_wa_check(chip);
+ /* Clear the OV detected status set before */
+ if (chip->usb_ov_det)
+ chip->usb_ov_det = false;
+ /* Clear typec current status */
+ if (chip->typec_psy)
+ chip->typec_current_ma = 0;
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present);
+ if (chip->dpdm_reg)
+ regulator_disable(chip->dpdm_reg);
+ schedule_work(&chip->usb_set_online_work);
+
+ pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ power_supply_changed(chip->usb_psy);
+
+ if (parallel_psy && chip->parallel_charger_detected) {
+ pval.intval = false;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ }
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && chip->enable_aicl_wake) {
+ disable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = false;
+ }
+ chip->parallel.enabled_once = false;
+ chip->vbat_above_headroom = false;
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+
+ vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, false, 0);
+ chip->usb_icl_delta = 0;
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+ if (!chip->hvdcp_not_supported)
+ restore_from_hvdcp_detection(chip);
+}
+
+static bool is_usbin_uv_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_UV_BIT;
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static void handle_usb_insertion(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ enum power_supply_type usb_supply_type;
+ int rc;
+ char *usb_type_name = "null";
+
+ pr_smb(PR_STATUS, "triggered\n");
+ /* usb inserted */
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ pr_smb(PR_STATUS,
+ "inserted type = %d (%s)", usb_supply_type, usb_type_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->typec_psy)
+ update_typec_status(chip);
+ smbchg_change_usb_supply_type(chip, usb_supply_type);
+
+ /* Only notify USB if it's not a charger */
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB ||
+ usb_supply_type == POWER_SUPPLY_TYPE_USB_CDP)
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB,
+ chip->usb_present);
+
+ /* Notify the USB psy if OV condition is not present */
+ if (!chip->usb_ov_det) {
+ /*
+ * Note that this could still be a very weak charger
+ * if the handle_usb_insertion was triggered from
+ * the falling edge of an USBIN_OV interrupt
+ */
+ pr_smb(PR_MISC, "setting usb psy health %s\n",
+ chip->very_weak_charger
+ ? "UNSPEC_FAILURE" : "GOOD");
+ chip->usb_health = chip->very_weak_charger
+ ? POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
+ : POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_changed(chip->usb_psy);
+ }
+ schedule_work(&chip->usb_set_online_work);
+
+ if (!chip->hvdcp_not_supported &&
+ (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP)) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ smbchg_stay_awake(chip, PM_DETECT_HVDCP);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ if (parallel_psy) {
+ pval.intval = true;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ chip->parallel_charger_detected = rc ? false : true;
+ if (rc)
+ pr_debug("parallel-charger absent rc=%d\n", rc);
+ }
+
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && !chip->enable_aicl_wake) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+}
+
+void update_usb_status(struct smbchg_chip *chip, bool usb_present, bool force)
+{
+ mutex_lock(&chip->usb_status_lock);
+ if (force) {
+ chip->usb_present = usb_present;
+ chip->usb_present ? handle_usb_insertion(chip)
+ : handle_usb_removal(chip);
+ goto unlock;
+ }
+ if (!chip->usb_present && usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ } else if (chip->usb_present && !usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_removal(chip);
+ }
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+unlock:
+ mutex_unlock(&chip->usb_status_lock);
+}
+
+static int otg_oc_reset(struct smbchg_chip *chip)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc)
+ pr_err("Failed to disable OTG rc=%d\n", rc);
+
+ msleep(20);
+
+ /*
+ * There is a possibility that an USBID interrupt might have
+ * occurred notifying USB power supply to disable OTG. We
+ * should not enable OTG in such cases.
+ */
+ if (!is_otg_present(chip)) {
+ pr_smb(PR_STATUS,
+ "OTG is not present, not enabling OTG_EN_BIT\n");
+ goto out;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc)
+ pr_err("Failed to re-enable OTG rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define AICL_IRQ_LIMIT_SECONDS 60
+#define AICL_IRQ_LIMIT_COUNT 25
+static void increment_aicl_count(struct smbchg_chip *chip)
+{
+ bool bad_charger = false;
+ int max_aicl_count, rc;
+ u8 reg;
+ long elapsed_seconds;
+ unsigned long now_seconds;
+
+ pr_smb(PR_INTERRUPT, "aicl count c:%d dgltch:%d first:%ld\n",
+ chip->aicl_irq_count, chip->aicl_deglitch_short,
+ chip->first_aicl_seconds);
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (!rc)
+ chip->aicl_complete = reg & AICL_STS_BIT;
+ else
+ chip->aicl_complete = false;
+
+ if (chip->aicl_deglitch_short || chip->force_aicl_rerun) {
+ if (!chip->aicl_irq_count)
+ get_current_time(&chip->first_aicl_seconds);
+ get_current_time(&now_seconds);
+ elapsed_seconds = now_seconds
+ - chip->first_aicl_seconds;
+
+ if (elapsed_seconds > AICL_IRQ_LIMIT_SECONDS) {
+ pr_smb(PR_INTERRUPT,
+ "resetting: elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ chip->aicl_irq_count = 1;
+ get_current_time(&chip->first_aicl_seconds);
+ return;
+ }
+ /*
+ * Double the amount of AICLs allowed if parallel charging is
+ * enabled.
+ */
+ max_aicl_count = AICL_IRQ_LIMIT_COUNT
+ * (chip->parallel.avail ? 2 : 1);
+ chip->aicl_irq_count++;
+
+ if (chip->aicl_irq_count > max_aicl_count) {
+ pr_smb(PR_INTERRUPT, "elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ pr_smb(PR_INTERRUPT, "Disable AICL rerun\n");
+ chip->very_weak_charger = true;
+ bad_charger = true;
+
+ /*
+ * Disable AICL rerun since many interrupts were
+ * triggered in a short time
+ */
+ /* disable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ return;
+ }
+
+ /* Vote 100mA current limit */
+ rc = vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER,
+ true, CURRENT_100_MA);
+ if (rc < 0) {
+ pr_err("Can't vote %d current limit rc=%d\n",
+ CURRENT_100_MA, rc);
+ }
+
+ chip->aicl_irq_count = 0;
+ } else if ((get_prop_charge_type(chip) ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST) &&
+ (reg & AICL_SUSP_BIT)) {
+ /*
+ * If the AICL_SUSP_BIT is on, then AICL reruns have
+ * already been disabled. Set the very weak charger
+ * flag so that the driver reports a bad charger
+ * and does not reenable AICL reruns.
+ */
+ chip->very_weak_charger = true;
+ bad_charger = true;
+ }
+ if (bad_charger) {
+ pr_smb(PR_MISC,
+ "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+ }
+}
+
+static int wait_for_usbin_uv(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->usbin_uv_lowered;
+ bool usbin_uv;
+
+ if (high)
+ completion = &chip->usbin_uv_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ usbin_uv = is_usbin_uv_high(chip);
+
+ if (high == usbin_uv)
+ return 0;
+
+ pr_err("usbin uv didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ usbin_uv ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int wait_for_src_detect(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->src_det_lowered;
+ bool src_detect;
+
+ if (high)
+ completion = &chip->src_det_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ src_detect = is_src_detect_high(chip);
+
+ if (high == src_detect)
+ return 0;
+
+ pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ src_detect ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion)
+{
+ int rc;
+ bool src_detect;
+ bool usbin_uv;
+
+ if (insertion) {
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ } else {
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+ }
+
+ /* ensure that usbin uv real time status is in the right state */
+ usbin_uv = is_usbin_uv_high(chip);
+ if (usbin_uv != insertion) {
+ pr_err("Skip faking, usbin uv is already %d\n", usbin_uv);
+ return -EINVAL;
+ }
+
+ /* ensure that src_detect real time status is in the right state */
+ src_detect = is_src_detect_high(chip);
+ if (src_detect == insertion) {
+ pr_err("Skip faking, src detect is already %d\n", src_detect);
+ return -EINVAL;
+ }
+
+ pr_smb(PR_MISC, "Allow only %s charger\n",
+ insertion ? "5-9V" : "9V only");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK,
+ insertion ?
+ USBIN_ADAPTER_5V_9V_CONT : USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s usbin uv\n",
+ insertion ? "falling" : "rising");
+ rc = wait_for_usbin_uv(chip, !insertion);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s src det\n",
+ insertion ? "rising" : "falling");
+ rc = wait_for_src_detect(chip, insertion);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+ goto handle_removal;
+ }
+
+ /* disable APSD */
+ pr_smb(PR_MISC, "Disabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable APSD rc=%d\n", rc);
+ goto out;
+ }
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto handle_removal;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DMF);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if ((reg >> TYPE_BITS_OFFSET) != 0) {
+ pr_smb(PR_MISC, "type bits set after 2s sleep - abort\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DM3P3);
+ /* Wait 60mS after entering continuous mode */
+ msleep(60);
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+handle_removal:
+ chip->hvdcp_3_det_ignore_uv = false;
+ update_usb_status(chip, 0, 0);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+
+ /* switch to 9V HVDCP */
+ pr_smb(PR_MISC, "Switch to 9V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable HVDCP */
+ pr_smb(PR_MISC, "Enable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable APSD */
+ pr_smb(PR_MISC, "Enabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Disable AICL */
+ pr_smb(PR_MISC, "Disable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+ /* fake a removal */
+ chip->hvdcp_3_det_ignore_uv = true;
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal rc=%d\n", rc);
+ goto out;
+ }
+
+ /*
+ * reset the enabled once flag for parallel charging so
+ * parallel charging can immediately restart after the HVDCP pulsing
+ * is complete
+ */
+ chip->parallel.enabled_once = false;
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto out;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ /* Enable AICL */
+ pr_smb(PR_MISC, "Enable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+out:
+ /*
+ * There are many QC 2.0 chargers that collapse before the aicl deglitch
+ * timer can mitigate. Hence set the aicl deglitch time to a shorter
+ * period.
+ */
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't reduce aicl deglitch rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "HVDCP removed\n");
+ update_usb_status(chip, 0, 0);
+ }
+ return rc;
+}
+
+#define USB_CMD_APSD 0x41
+#define APSD_RERUN BIT(0)
+static int rerun_apsd(struct smbchg_chip *chip)
+{
+ int rc;
+
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+
+ /* re-run APSD */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD,
+ APSD_RERUN, APSD_RERUN);
+ if (rc) {
+ pr_err("Couldn't re-run APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising usbin uv\n");
+ rc = wait_for_usbin_uv(chip, true);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling src det\n");
+ rc = wait_for_src_detect(chip, false);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling usbin uv\n");
+ rc = wait_for_usbin_uv(chip, false);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising src det\n");
+ rc = wait_for_src_detect(chip, true);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define SCHG_LITE_USBIN_HVDCP_5_9V 0x8
+#define SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK 0x38
+#define SCHG_LITE_USBIN_HVDCP_SEL_IDLE BIT(3)
+static bool is_hvdcp_5v_cont_mode(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc) {
+ pr_err("Unable to read HVDCP status rc=%d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP status = %x\n", reg);
+
+ if (reg & SCHG_LITE_USBIN_HVDCP_SEL_IDLE) {
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc) {
+ pr_err("Unable to read INPUT status rc=%d\n", rc);
+ return false;
+ }
+ pr_smb(PR_STATUS, "INPUT status = %x\n", reg);
+ if ((reg & SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK) ==
+ SCHG_LITE_USBIN_HVDCP_5_9V)
+ return true;
+ }
+ return false;
+}
+
+static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /* check if HVDCP is already in 5V continuous mode */
+ if (is_hvdcp_5v_cont_mode(chip)) {
+ pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n");
+ return 0;
+ }
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+
+ /* re-run APSD */
+ rc = rerun_apsd(chip);
+ if (rc) {
+ pr_err("APSD rerun failed\n");
+ goto out;
+ }
+
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* We are set if HVDCP in 5V continuous mode */
+ if (!is_hvdcp_5v_cont_mode(chip)) {
+ pr_err("HVDCP could not be set in 5V continuous mode\n");
+ goto out;
+ }
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n");
+ rc = force_9v_hvdcp(chip);
+ if (rc) {
+ pr_err("Failed to force 9V HVDCP=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ return rc;
+}
+
+#define CMD_HVDCP_2 0x43
+#define SINGLE_INCREMENT BIT(0)
+#define SINGLE_DECREMENT BIT(1)
+static int smbchg_dp_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Increment DP\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_INCREMENT, SINGLE_INCREMENT);
+ if (rc)
+ pr_err("Single-increment failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_dm_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Decrement DM\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_DECREMENT, SINGLE_DECREMENT);
+ if (rc)
+ pr_err("Single-decrement failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_hvdcp3_confirmed(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /*
+ * reset the enabled once flag for parallel charging because this is
+ * effectively a new insertion.
+ */
+ chip->parallel.enabled_once = false;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_USB_HVDCP_3);
+
+ return rc;
+}
+
+static int smbchg_dp_dm(struct smbchg_chip *chip, int val)
+{
+ int rc = 0;
+ int target_icl_vote_ma;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_PREPARE:
+ if (!is_hvdcp_present(chip)) {
+ pr_err("No pulsing unless HVDCP\n");
+ return -ENODEV;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_prepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_prepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_UNPREPARE:
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_unprepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_unprepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3:
+ rc = smbchg_hvdcp3_confirmed(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DP_PULSE);
+ else
+ rc = smbchg_dp_pulse_lite(chip);
+ if (!rc)
+ chip->pulse_cnt++;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DM_PULSE);
+ else
+ rc = smbchg_dm_pulse_lite(chip);
+ if (!rc && chip->pulse_cnt)
+ chip->pulse_cnt--;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED:
+ chip->hvdcp3_supported = true;
+ pr_smb(PR_MISC, "HVDCP3 supported\n");
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chip->usb_icl_delta -= 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ chip->usb_icl_delta += 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void update_typec_capability_status(struct smbchg_chip *chip,
+ const union power_supply_propval *val)
+{
+ pr_smb(PR_TYPEC, "typec capability = %dma\n", val->intval);
+
+ pr_debug("changing ICL from %dma to %dma\n", chip->typec_current_ma,
+ val->intval);
+ chip->typec_current_ma = val->intval;
+ smbchg_change_usb_supply_type(chip, chip->usb_supply_type);
+}
+
+static void update_typec_otg_status(struct smbchg_chip *chip, int mode,
+ bool force)
+{
+ union power_supply_propval pval = {0, };
+ pr_smb(PR_TYPEC, "typec mode = %d\n", mode);
+
+ if (mode == POWER_SUPPLY_TYPE_DFP) {
+ chip->typec_dfp = true;
+ pval.intval = 1;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ } else if (force || chip->typec_dfp) {
+ chip->typec_dfp = false;
+ pval.intval = 0;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ }
+}
+
+static int smbchg_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->usb_current_max;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->usb_present;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->usb_online;
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = chip->usb_supply_type;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = chip->usb_health;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ chip->usb_current_max = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ chip->usb_online = val->intval;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ power_supply_changed(psy);
+ return 0;
+}
+
+static int
+smbchg_usb_is_writeable(struct power_supply *psy, enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+static char *smbchg_usb_supplicants[] = {
+ "battery",
+ "bms",
+};
+
+static enum power_supply_property smbchg_usb_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+#define CHARGE_OUTPUT_VTG_RATIO 840
+static int smbchg_get_iusb(struct smbchg_chip *chip)
+{
+ int rc, iusb_ua = -EINVAL;
+ struct qpnp_vadc_result adc_result;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip))
+ return 0;
+
+ if (chip->vchg_vadc_dev && chip->vchg_adc_channel != -EINVAL) {
+ rc = qpnp_vadc_read(chip->vchg_vadc_dev,
+ chip->vchg_adc_channel, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "error in VCHG (channel-%d) read rc = %d\n",
+ chip->vchg_adc_channel, rc);
+ return 0;
+ }
+ iusb_ua = div_s64(adc_result.physical * 1000,
+ CHARGE_OUTPUT_VTG_RATIO);
+ }
+
+ return iusb_ua;
+}
+
+static enum power_supply_property smbchg_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+};
+
+static int smbchg_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ vote(chip->battchg_suspend_votable, BATTCHG_USER_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->usb_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ rc = vote(chip->dc_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ chip->chg_enabled = val->intval;
+ schedule_work(&chip->usb_set_online_work);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smbchg_system_temp_level_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smbchg_set_fastchg_current_user(chip, val->intval / 1000);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smbchg_float_voltage_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ rc = smbchg_safety_timer_enable(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ rc = smbchg_switch_buck_frequency(chip, val->intval);
+ if (rc) {
+ pr_err("Couldn't switch buck frequency, rc=%d\n", rc);
+ /*
+ * Trigger a panic if there is an error while switching
+ * buck frequency. This will prevent LS FET damage.
+ */
+ BUG_ON(1);
+ }
+
+ rc = smbchg_otg_pulse_skip_disable(chip,
+ REASON_FLASH_ENABLED, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ chip->flash_triggered = !!val->intval;
+ smbchg_icl_loop_disable_check(chip);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_TLIM:
+ rc = smbchg_force_tlim_en(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smbchg_dp_dm(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ smbchg_rerun_aicl(chip);
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = smbchg_restricted_charging(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+ if (chip->typec_psy)
+ update_typec_capability_status(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ if (chip->typec_psy)
+ update_typec_otg_status(chip, val->intval, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_battery_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int smbchg_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ val->intval
+ = get_effective_result(chip->battchg_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = smbchg_float_voltage_get(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
+ val->intval = smbchg_calc_max_flash_current(chip);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = chip->fastchg_current_ma * 1000;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ val->intval = chip->therm_lvl_sel;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ val->intval = smbchg_get_aicl_level_ma(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ val->intval = (int)chip->aicl_complete;
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ val->intval = (int)chip->restricted_charging;
+ break;
+ /* properties from fg */
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_prop_batt_current_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_prop_batt_voltage_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_prop_batt_temp(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = get_prop_batt_voltage_max_design(chip);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ val->intval = chip->safety_timer_en;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ val->intval = chip->otg_pulse_skip_dis;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ val->intval = chip->flash_triggered;
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chip->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ val->intval = smbchg_is_input_current_limited(chip);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ val->intval = smbchg_get_iusb(chip);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static char *smbchg_dc_supplicants[] = {
+ "bms",
+};
+
+static enum power_supply_property smbchg_dc_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smbchg_dc_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->dc_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = vote(chip->dc_icl_votable, USER_ICL_VOTER, true,
+ val->intval / 1000);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_dc_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = is_dc_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = get_effective_result(chip->dc_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* return if dc is charging the battery */
+ val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC)
+ && (get_prop_batt_status(chip)
+ == POWER_SUPPLY_STATUS_CHARGING);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->dc_max_current_ma * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smbchg_dc_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+#define HOT_BAT_HARD_BIT BIT(0)
+#define HOT_BAT_SOFT_BIT BIT(1)
+#define COLD_BAT_HARD_BIT BIT(2)
+#define COLD_BAT_SOFT_BIT BIT(3)
+#define BAT_OV_BIT BIT(4)
+#define BAT_LOW_BIT BIT(5)
+#define BAT_MISSING_BIT BIT(6)
+#define BAT_TERM_MISSING_BIT BIT(7)
+static irqreturn_t batt_hot_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cold_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_warm_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cool_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_pres_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_present = !(reg & BAT_MISSING_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vbat_low_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("vbat low\n");
+ return IRQ_HANDLED;
+}
+
+#define CHG_COMP_SFT_BIT BIT(3)
+static irqreturn_t chg_error_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc = 0;
+ u8 reg;
+
+ pr_smb(PR_INTERRUPT, "chg-error triggered\n");
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ } else {
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (reg & CHG_COMP_SFT_BIT)
+ set_property_on_fg(chip,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ 1);
+ }
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fastchg_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "p2f triggered\n");
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_hot_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("chg hot\n");
+ smbchg_wipower_check(_chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_term_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "tcc triggered\n");
+ /*
+ * Charge termination is a pulse and not level triggered. That means,
+ * TCC bit in RT_STS can get cleared by the time this interrupt is
+ * handled. Instead of relying on that to determine whether the
+ * charge termination had happened, we've to simply notify the FG
+ * about this as long as the interrupt is handled.
+ */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_CHARGE_DONE, 1);
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t taper_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ taper_irq_en(chip, false);
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_taper(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t recharge_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_timeout_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_warn_ratelimited("wdog timeout rt_stat = 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static irqreturn_t power_ok_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+#define DCIN_UNSUSPEND_DELAY_MS 1000
+static irqreturn_t dcin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool dc_present = is_dc_present(chip);
+
+ pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ if (chip->dc_present != dc_present) {
+ /* dc changed */
+ chip->dc_present = dc_present;
+ if (chip->dc_psy_type != -EINVAL && chip->batt_psy)
+ power_supply_changed(chip->dc_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_aicl_deglitch_wa_check(chip);
+ chip->vbat_above_headroom = false;
+ }
+
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_ov_handler() - this is called when an overvoltage condition occurs
+ * @chip: pointer to smbchg_chip chip
+ */
+static irqreturn_t usbin_ov_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc;
+ u8 reg;
+ bool usb_present;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ goto out;
+ }
+
+ /* OV condition is detected. Notify it to USB psy */
+ if (reg & USBIN_OV_BIT) {
+ chip->usb_ov_det = true;
+ pr_smb(PR_MISC, "setting usb psy health OV\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ power_supply_changed(chip->usb_psy);
+ } else {
+ chip->usb_ov_det = false;
+ /* If USB is present, then handle the USB insertion */
+ usb_present = is_usb_present(chip);
+ if (usb_present)
+ update_usb_status(chip, usb_present, false);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_uv_handler() - this is called when USB charger is removed
+ * @chip: pointer to smbchg_chip chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+#define ICL_MODE_MASK SMB_MASK(5, 4)
+#define ICL_MODE_HIGH_CURRENT 0
+static irqreturn_t usbin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc) {
+ pr_err("could not read rt sts: %d", rc);
+ goto out;
+ }
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d rt_sts = 0x%02x hvdcp_3_det_ignore_uv = %d aicl = %d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, reg, chip->hvdcp_3_det_ignore_uv,
+ aicl_level);
+
+ /*
+ * set usb_psy's dp=f dm=f if this is a new insertion, i.e. it is
+ * not already src_detected and usbin_uv is seen falling
+ */
+ if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (reg & USBIN_UV_BIT)
+ complete_all(&chip->usbin_uv_raised);
+ else
+ complete_all(&chip->usbin_uv_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ if ((reg & USBIN_UV_BIT) && (reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_STATUS, "Very weak charger detected\n");
+ chip->very_weak_charger = true;
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n",
+ rc);
+ goto out;
+ }
+ if ((reg & ICL_MODE_MASK) != ICL_MODE_HIGH_CURRENT) {
+ /*
+ * If AICL is not even enabled, this is either an
+ * SDP or a grossly out of spec charger. Do not
+ * draw any current from it.
+ */
+ rc = vote(chip->usb_suspend_votable,
+ WEAK_CHARGER_EN_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("could not disable charger: %d", rc);
+ } else if (aicl_level == chip->tables.usb_ilim_ma_table[0]) {
+ /*
+ * we are in a situation where the adapter is not able
+ * to supply even 300mA. Disable hw aicl reruns else it
+ * is only a matter of time when we get back here again
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ }
+ pr_smb(PR_MISC, "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+
+ smbchg_wipower_check(chip);
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB charger type
+ * is detected and on falling edge when USB voltage falls
+ * below the coarse detect voltage(1V), use it for
+ * handling USB charger insertion and removal.
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static irqreturn_t src_detect_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ bool src_detect = is_src_detect_high(chip);
+ int rc;
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d usb_present = %d src_detect = %d hvdcp_3_det_ignore_uv=%d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, usb_present, src_detect,
+ chip->hvdcp_3_det_ignore_uv);
+
+ if (src_detect)
+ complete_all(&chip->src_det_raised);
+ else
+ complete_all(&chip->src_det_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ /*
+ * When VBAT is above the AICL threshold (4.25V) - 180mV (4.07V),
+ * an input collapse due to AICL will actually cause an USBIN_UV
+ * interrupt to fire as well.
+ *
+ * Handle USB insertions and removals in the source detect handler
+ * instead of the USBIN_UV handler since the latter is untrustworthy
+ * when the battery voltage is high.
+ */
+ chip->very_weak_charger = false;
+ /*
+ * a src detect marks a new insertion or a real removal,
+ * vote for enable aicl hw reruns
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't enable hw aicl rerun rc=%d\n", rc);
+
+ rc = vote(chip->usb_suspend_votable, WEAK_CHARGER_EN_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("could not enable charger: %d\n", rc);
+
+ if (src_detect) {
+ update_usb_status(chip, usb_present, 0);
+ } else {
+ update_usb_status(chip, 0, false);
+ chip->aicl_irq_count = 0;
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_oc_handler() - called when the usb otg goes over current
+ */
+#define NUM_OTG_RETRIES 5
+#define OTG_OC_RETRY_DELAY_US 50000
+static irqreturn_t otg_oc_handler(int irq, void *_chip)
+{
+ int rc;
+ struct smbchg_chip *chip = _chip;
+ s64 elapsed_us = ktime_us_delta(ktime_get(), chip->otg_enable_time);
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ pr_warn("OTG OC triggered - OTG disabled\n");
+ return IRQ_HANDLED;
+ }
+
+ if (elapsed_us > OTG_OC_RETRY_DELAY_US)
+ chip->otg_retries = 0;
+
+ /*
+ * Due to a HW bug in the PMI8994 charger, the current inrush that
+ * occurs when connecting certain OTG devices can cause the OTG
+ * overcurrent protection to trip.
+ *
+ * The work around is to try reenabling the OTG when getting an
+ * overcurrent interrupt once.
+ */
+ if (chip->otg_retries < NUM_OTG_RETRIES) {
+ chip->otg_retries += 1;
+ pr_smb(PR_STATUS,
+ "Retrying OTG enable. Try #%d, elapsed_us %lld\n",
+ chip->otg_retries, elapsed_us);
+ rc = otg_oc_reset(chip);
+ if (rc)
+ pr_err("Failed to reset OTG OC state rc=%d\n", rc);
+ chip->otg_enable_time = ktime_get();
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_fail_handler() - called when the usb otg fails
+ * (when vbat < OTG UVLO threshold)
+ */
+static irqreturn_t otg_fail_handler(int irq, void *_chip)
+{
+ pr_smb(PR_INTERRUPT, "triggered\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * aicl_done_handler() - called when the usb AICL algorithm is finished
+ * and a current is set.
+ */
+static irqreturn_t aicl_done_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+
+ pr_smb(PR_INTERRUPT, "triggered, aicl: %d\n", aicl_level);
+
+ increment_aicl_count(chip);
+
+ if (usb_present)
+ smbchg_parallel_usb_check_ok(chip);
+
+ if (chip->aicl_complete && chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbid_change_handler() - called when the usb RID changes.
+ * This is used mostly for detecting OTG
+ */
+static irqreturn_t usbid_change_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool otg_present;
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ otg_present = is_otg_present(chip);
+ pr_smb(PR_MISC, "setting usb psy OTG = %d\n",
+ otg_present ? 1 : 0);
+
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, otg_present);
+
+ if (otg_present)
+ pr_smb(PR_STATUS, "OTG detected\n");
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+
+ return IRQ_HANDLED;
+}
+
+static int determine_initial_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ batt_pres_handler(0, chip);
+ batt_hot_handler(0, chip);
+ batt_warm_handler(0, chip);
+ batt_cool_handler(0, chip);
+ batt_cold_handler(0, chip);
+ if (chip->typec_psy) {
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ update_typec_otg_status(chip, type.intval, true);
+ } else {
+ usbid_change_handler(0, chip);
+ }
+ src_detect_handler(0, chip);
+
+ chip->usb_present = is_usb_present(chip);
+ chip->dc_present = is_dc_present(chip);
+
+ if (chip->usb_present) {
+ int rc = 0;
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ handle_usb_insertion(chip);
+ } else {
+ handle_usb_removal(chip);
+ }
+
+ return 0;
+}
+
+static int prechg_time[] = {
+ 24,
+ 48,
+ 96,
+ 192,
+};
+static int chg_time[] = {
+ 192,
+ 384,
+ 768,
+ 1536,
+};
+
+enum bpd_type {
+ BPD_TYPE_BAT_NONE,
+ BPD_TYPE_BAT_ID,
+ BPD_TYPE_BAT_THM,
+ BPD_TYPE_BAT_THM_BAT_ID,
+ BPD_TYPE_DEFAULT,
+};
+
+static const char * const bpd_label[] = {
+ [BPD_TYPE_BAT_NONE] = "bpd_none",
+ [BPD_TYPE_BAT_ID] = "bpd_id",
+ [BPD_TYPE_BAT_THM] = "bpd_thm",
+ [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id",
+};
+
+static inline int get_bpd(const char *name)
+{
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(bpd_label); i++) {
+ if (strcmp(bpd_label[i], name) == 0)
+ return i;
+ }
+ return -EINVAL;
+}
+
+#define REVISION1_REG 0x0
+#define DIG_MINOR 0
+#define DIG_MAJOR 1
+#define ANA_MINOR 2
+#define ANA_MAJOR 3
+#define CHGR_CFG1 0xFB
+#define RECHG_THRESHOLD_SRC_BIT BIT(1)
+#define TERM_I_SRC_BIT BIT(2)
+#define TERM_SRC_FG BIT(2)
+#define CHG_INHIB_CFG_REG 0xF7
+#define CHG_INHIBIT_50MV_VAL 0x00
+#define CHG_INHIBIT_100MV_VAL 0x01
+#define CHG_INHIBIT_200MV_VAL 0x02
+#define CHG_INHIBIT_300MV_VAL 0x03
+#define CHG_INHIBIT_MASK 0x03
+#define USE_REGISTER_FOR_CURRENT BIT(2)
+#define CHGR_CFG2 0xFC
+#define CHG_EN_SRC_BIT BIT(7)
+#define CHG_EN_POLARITY_BIT BIT(6)
+#define P2F_CHG_TRAN BIT(5)
+#define CHG_BAT_OV_ECC BIT(4)
+#define I_TERM_BIT BIT(3)
+#define AUTO_RECHG_BIT BIT(2)
+#define CHARGER_INHIBIT_BIT BIT(0)
+#define USB51_COMMAND_POL BIT(2)
+#define USB51AC_CTRL BIT(1)
+#define TR_8OR32B 0xFE
+#define BUCK_8_16_FREQ_BIT BIT(0)
+#define BM_CFG 0xF3
+#define BATT_MISSING_ALGO_BIT BIT(2)
+#define BMD_PIN_SRC_MASK SMB_MASK(1, 0)
+#define PIN_SRC_SHIFT 0
+#define CHGR_CFG 0xFF
+#define RCHG_LVL_BIT BIT(0)
+#define VCHG_EN_BIT BIT(1)
+#define VCHG_INPUT_CURRENT_BIT BIT(3)
+#define CFG_AFVC 0xF6
+#define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0)
+#define TR_RID_REG 0xFA
+#define FG_INPUT_FET_DELAY_BIT BIT(3)
+#define TRIM_OPTIONS_7_0 0xF6
+#define INPUT_MISSING_POLLER_EN_BIT BIT(3)
+#define CHGR_CCMP_CFG 0xFA
+#define JEITA_TEMP_HARD_LIMIT_BIT BIT(5)
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_ADAPTER_SEL_9V_BIT BIT(4)
+#define HVDCP_AUTH_ALG_EN_BIT BIT(6)
+#define CMD_APSD 0x41
+#define APSD_RERUN_BIT BIT(0)
+#define OTG_CFG 0xF1
+#define HICCUP_ENABLED_BIT BIT(6)
+#define OTG_PIN_POLARITY_BIT BIT(4)
+#define OTG_PIN_ACTIVE_LOW BIT(4)
+#define OTG_EN_CTRL_MASK SMB_MASK(3, 2)
+#define OTG_PIN_CTRL_RID_DIS 0x04
+#define OTG_CMD_CTRL_RID_EN 0x08
+#define AICL_ADC_BIT BIT(6)
+static void batt_ov_wa_check(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ /* disable-'battery OV disables charging' feature */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_BAT_OV_ECC, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if battery OV is set:
+ * restart charging by disable/enable charging
+ */
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read Battery RT status rc = %d\n", rc);
+ return;
+ }
+
+ if (reg & BAT_OV_BIT) {
+ rc = smbchg_charging_en(chip, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't disable charging: rc = %d\n", rc);
+ return;
+ }
+
+ /* delay for charging-disable to take affect */
+ msleep(200);
+
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't enable charging: rc = %d\n", rc);
+ return;
+ }
+ }
+}
+
+static int smbchg_hw_init(struct smbchg_chip *chip)
+{
+ int rc, i;
+ u8 reg, mask;
+
+ rc = smbchg_read(chip, chip->revision,
+ chip->misc_base + REVISION1_REG, 4);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read revision rc=%d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]);
+
+ /* Setup 9V HVDCP */
+ if (!chip->hvdcp_not_supported) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->aicl_rerun_period_s > 0) {
+ rc = smbchg_set_aicl_rerun_period_s(chip,
+ chip->aicl_rerun_period_s);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set AICL rerun timer rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG,
+ FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0,
+ INPUT_MISSING_POLLER_EN_BIT,
+ INPUT_MISSING_POLLER_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable input missing poller rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Do not force using current from the register i.e. use auto
+ * power source detect (APSD) mA ratings for the initial current values.
+ *
+ * If this is set, AICL will not rerun at 9V for HVDCPs
+ */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USE_REGISTER_FOR_CURRENT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * set chg en by cmd register, set chg en by writing bit 1,
+ * enable auto pre to fast, enable auto recharge by default.
+ * enable current termination and charge inhibition based on
+ * the device tree configuration.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT | P2F_CHG_TRAN
+ | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT,
+ CHG_EN_POLARITY_BIT
+ | (chip->chg_inhibit_en ? CHARGER_INHIBIT_BIT : 0)
+ | (chip->iterm_disabled ? I_TERM_BIT : 0));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * enable battery charging to make sure it hasn't been changed earlier
+ * by the bootloader.
+ */
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable battery charging=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Based on the configuration, use the analog sensors or the fuelgauge
+ * adc for recharge threshold source.
+ */
+
+ if (chip->chg_inhibit_source_fg)
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT,
+ TERM_SRC_FG | RECHG_THRESHOLD_SRC_BIT);
+ else
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * control USB suspend via command bits and set correct 100/500mA
+ * polarity on the usb current
+ */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ USB51_COMMAND_POL | USB51AC_CTRL, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ check_battery_type(chip);
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smbchg_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv);
+ }
+
+ /* set the fast charge current compensation */
+ if (chip->fastchg_current_comp != -EINVAL) {
+ rc = smbchg_fastchg_current_comp_set(chip,
+ chip->fastchg_current_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set fastchg current comp to %d\n",
+ chip->fastchg_current_comp);
+ }
+
+ /* set the float voltage compensation */
+ if (chip->float_voltage_comp != -EINVAL) {
+ rc = smbchg_float_voltage_comp_set(chip,
+ chip->float_voltage_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set float voltage comp to %d\n",
+ chip->float_voltage_comp);
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+ return -EINVAL;
+ } else {
+ smbchg_iterm_set(chip, chip->iterm_ma);
+ }
+ }
+
+ /* set the safety time voltage */
+ if (chip->safety_time != -EINVAL) {
+ reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) |
+ (chip->prechg_safety_time > 0
+ ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT);
+
+ for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+ if (chip->safety_time <= chg_time[i]) {
+ reg |= i << SAFETY_TIME_MINUTES_SHIFT;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(prechg_time); i++) {
+ if (chip->prechg_safety_time <= prechg_time[i]) {
+ reg |= i;
+ break;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK | SFT_TO_MASK |
+ (chip->prechg_safety_time > 0
+ ? PRECHG_SFT_TO_MASK : 0), reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set safety timer rc = %d\n",
+ rc);
+ return rc;
+ }
+ chip->safety_timer_en = true;
+ } else {
+ rc = smbchg_read(chip, &reg, chip->chgr_base + SFT_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Unable to read SFT_CFG rc = %d\n",
+ rc);
+ else if (!(reg & SFT_EN_MASK))
+ chip->safety_timer_en = true;
+ }
+
+ /* configure jeita temperature hard limit */
+ if (chip->jeita_temp_hard_limit >= 0) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CCMP_CFG,
+ JEITA_TEMP_HARD_LIMIT_BIT,
+ chip->jeita_temp_hard_limit
+ ? 0 : JEITA_TEMP_HARD_LIMIT_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set jeita temp hard limit rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* make the buck switch faster to prevent some vbus oscillation */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + TR_8OR32B,
+ BUCK_8_16_FREQ_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc);
+ return rc;
+ }
+
+ /* battery missing detection */
+ mask = BATT_MISSING_ALGO_BIT;
+ reg = chip->bmd_algo_disabled ? 0 : BATT_MISSING_ALGO_BIT;
+ if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) {
+ mask |= BMD_PIN_SRC_MASK;
+ reg |= chip->bmd_pin_src << PIN_SRC_SHIFT;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BM_CFG, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (chip->vchg_adc_channel != -EINVAL) {
+ /* configure and enable VCHG */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ smbchg_charging_status_change(chip);
+
+ vote(chip->usb_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ vote(chip->dc_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+
+ /*
+ * Configure only if the recharge threshold source is not
+ * fuel gauge ADC.
+ */
+ if (!chip->chg_inhibit_source_fg) {
+ if (chip->resume_delta_mv < 100)
+ reg = CHG_INHIBIT_50MV_VAL;
+ else if (chip->resume_delta_mv < 200)
+ reg = CHG_INHIBIT_100MV_VAL;
+ else if (chip->resume_delta_mv < 300)
+ reg = CHG_INHIBIT_200MV_VAL;
+ else
+ reg = CHG_INHIBIT_300MV_VAL;
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHG_INHIB_CFG_REG,
+ CHG_INHIBIT_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CFG,
+ RCHG_LVL_BIT,
+ (chip->resume_delta_mv
+ < chip->tables.rchg_thr_mv)
+ ? 0 : RCHG_LVL_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* DC path current settings */
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = vote(chip->dc_icl_votable, PSY_ICL_VOTER, true,
+ chip->dc_target_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for initial DC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+
+ /*
+ * on some devices the battery is powered via external sources which
+ * could raise its voltage above the float voltage. smbchargers go
+ * in to reverse boost in such a situation and the workaround is to
+ * disable float voltage compensation (note that the battery will appear
+ * hot/cold when powered via external source).
+ */
+ if (chip->soft_vfloat_comp_disabled) {
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC,
+ VFLOAT_COMP_ENABLE_MASK, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ chip->cfg_fastchg_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't vote fastchg ma rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+
+ if (chip->wipower_dyn_icl_avail) {
+ rc = smbchg_wipower_ilim_config(chip,
+ &(chip->wipower_default.entries[0]));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set default wipower ilim = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ /* unsuspend dc path, it could be suspended by the bootloader */
+ rc = smbchg_dc_suspend(chip, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend dc path= %d\n", rc);
+ return rc;
+ }
+
+ if (chip->force_aicl_rerun) {
+ /* vote to enable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ DEFAULT_CONFIG_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote enable hw aicl rerun rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ /* enable OTG hiccup mode */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ HICCUP_ENABLED_BIT, HICCUP_ENABLED_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set OTG OC config rc = %d\n",
+ rc);
+ }
+
+ if (chip->otg_pinctrl) {
+ /* configure OTG enable to pin control active low */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ OTG_PIN_POLARITY_BIT | OTG_EN_CTRL_MASK,
+ OTG_PIN_ACTIVE_LOW | OTG_PIN_CTRL_RID_DIS);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set OTG EN config rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->wa_flags & SMBCHG_BATT_OV_WA)
+ batt_ov_wa_check(chip);
+
+ /* turn off AICL adc for improved accuracy */
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8, AICL_ADC_BIT, 0);
+ if (rc)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static struct of_device_id smbchg_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-smbcharger",
+ },
+ { },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define OF_PROP_READ(chip, prop, dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ if (optional) \
+ prop = -EINVAL; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," dt_property , \
+ &prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ dev_err(chip->dev, "Error reading " #dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define ILIM_ENTRIES 3
+#define VOLTAGE_RANGE_ENTRIES 2
+#define RANGE_ENTRY (ILIM_ENTRIES + VOLTAGE_RANGE_ENTRIES)
+static int smb_parse_wipower_map_dt(struct smbchg_chip *chip,
+ struct ilim_map *map, char *property)
+{
+ struct device_node *node = chip->dev->of_node;
+ int total_elements, size;
+ struct property *prop;
+ const __be32 *data;
+ int num, i;
+
+ prop = of_find_property(node, property, &size);
+ if (!prop) {
+ dev_err(chip->dev, "%s missing\n", property);
+ return -EINVAL;
+ }
+
+ total_elements = size / sizeof(int);
+ if (total_elements % RANGE_ENTRY) {
+ dev_err(chip->dev, "%s table not in multiple of %d, total elements = %d\n",
+ property, RANGE_ENTRY, total_elements);
+ return -EINVAL;
+ }
+
+ data = prop->value;
+ num = total_elements / RANGE_ENTRY;
+ map->entries = devm_kzalloc(chip->dev,
+ num * sizeof(struct ilim_entry), GFP_KERNEL);
+ if (!map->entries) {
+ dev_err(chip->dev, "kzalloc failed for default ilim\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < num; i++) {
+ map->entries[i].vmin_uv = be32_to_cpup(data++);
+ map->entries[i].vmax_uv = be32_to_cpup(data++);
+ map->entries[i].icl_pt_ma = be32_to_cpup(data++);
+ map->entries[i].icl_lv_ma = be32_to_cpup(data++);
+ map->entries[i].icl_hv_ma = be32_to_cpup(data++);
+ }
+ map->num = num;
+ return 0;
+}
+
+static int smb_parse_wipower_dt(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ chip->wipower_dyn_icl_avail = false;
+
+ if (!chip->vadc_dev)
+ goto err;
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_default,
+ "qcom,wipower-default-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_pt,
+ "qcom,wipower-pt-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_div2,
+ "qcom,wipower-div2-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-div2-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+ chip->wipower_dyn_icl_avail = true;
+ return 0;
+err:
+ chip->wipower_default.num = 0;
+ chip->wipower_pt.num = 0;
+ chip->wipower_default.num = 0;
+ if (chip->wipower_default.entries)
+ devm_kfree(chip->dev, chip->wipower_default.entries);
+ if (chip->wipower_pt.entries)
+ devm_kfree(chip->dev, chip->wipower_pt.entries);
+ if (chip->wipower_div2.entries)
+ devm_kfree(chip->dev, chip->wipower_div2.entries);
+ chip->wipower_default.entries = NULL;
+ chip->wipower_pt.entries = NULL;
+ chip->wipower_div2.entries = NULL;
+ chip->vadc_dev = NULL;
+ return rc;
+}
+
+#define DEFAULT_VLED_MAX_UV 3500000
+#define DEFAULT_FCC_MA 2000
+static int smb_parse_dt(struct smbchg_chip *chip)
+{
+ int rc = 0, ocp_thresh = -EINVAL;
+ struct device_node *node = chip->dev->of_node;
+ const char *dc_psy_type, *bpd;
+
+ if (!node) {
+ dev_err(chip->dev, "device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ /* read optional u32 properties */
+ OF_PROP_READ(chip, ocp_thresh,
+ "ibat-ocp-threshold-ua", rc, 1);
+ if (ocp_thresh >= 0)
+ smbchg_ibat_ocp_threshold_ua = ocp_thresh;
+ OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1);
+ OF_PROP_READ(chip, chip->cfg_fastchg_current_ma,
+ "fastchg-current-ma", rc, 1);
+ if (chip->cfg_fastchg_current_ma == -EINVAL)
+ chip->cfg_fastchg_current_ma = DEFAULT_FCC_MA;
+ OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1);
+ OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1);
+ OF_PROP_READ(chip, chip->vled_max_uv, "vled-max-uv", rc, 1);
+ if (chip->vled_max_uv < 0)
+ chip->vled_max_uv = DEFAULT_VLED_MAX_UV;
+ OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1);
+ if (chip->rpara_uohm < 0)
+ chip->rpara_uohm = 0;
+ OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins",
+ rc, 1);
+ OF_PROP_READ(chip, chip->fastchg_current_comp, "fastchg-current-comp",
+ rc, 1);
+ OF_PROP_READ(chip, chip->float_voltage_comp, "float-voltage-comp",
+ rc, 1);
+ if (chip->safety_time != -EINVAL &&
+ (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+ dev_err(chip->dev, "Bad charging-timeout-mins %d\n",
+ chip->safety_time);
+ return -EINVAL;
+ }
+ if (chip->prechg_safety_time != -EINVAL &&
+ (chip->prechg_safety_time >
+ prechg_time[ARRAY_SIZE(prechg_time) - 1])) {
+ dev_err(chip->dev, "Bad precharging-timeout-mins %d\n",
+ chip->prechg_safety_time);
+ return -EINVAL;
+ }
+ OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_current_thr_ma,
+ "parallel-usb-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma,
+ "parallel-usb-9v-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma,
+ "parallel-allowed-lowering-ma", rc, 1);
+ if (chip->parallel.min_current_thr_ma != -EINVAL
+ && chip->parallel.min_9v_current_thr_ma != -EINVAL)
+ chip->parallel.avail = true;
+ /*
+ * use the dt values if they exist, otherwise do not touch the params
+ */
+ of_property_read_u32(node, "qcom,parallel-main-chg-fcc-percent",
+ &smbchg_main_chg_fcc_percent);
+ of_property_read_u32(node, "qcom,parallel-main-chg-icl-percent",
+ &smbchg_main_chg_icl_percent);
+ pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n",
+ chip->parallel.min_current_thr_ma,
+ chip->parallel.min_9v_current_thr_ma);
+ OF_PROP_READ(chip, chip->jeita_temp_hard_limit,
+ "jeita-temp-hard-limit", rc, 1);
+ OF_PROP_READ(chip, chip->aicl_rerun_period_s,
+ "aicl-rerun-period-s", rc, 1);
+ OF_PROP_READ(chip, chip->vchg_adc_channel,
+ "vchg-adc-channel-id", rc, 1);
+
+ /* read boolean configuration properties */
+ chip->use_vfloat_adjustments = of_property_read_bool(node,
+ "qcom,autoadjust-vfloat");
+ chip->bmd_algo_disabled = of_property_read_bool(node,
+ "qcom,bmd-algo-disabled");
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+ chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-vfloat-comp-disabled");
+ chip->chg_enabled = !(of_property_read_bool(node,
+ "qcom,charging-disabled"));
+ chip->charge_unknown_battery = of_property_read_bool(node,
+ "qcom,charge-unknown-battery");
+ chip->chg_inhibit_en = of_property_read_bool(node,
+ "qcom,chg-inhibit-en");
+ chip->chg_inhibit_source_fg = of_property_read_bool(node,
+ "qcom,chg-inhibit-fg");
+ chip->low_volt_dcin = of_property_read_bool(node,
+ "qcom,low-volt-dcin");
+ chip->force_aicl_rerun = of_property_read_bool(node,
+ "qcom,force-aicl-rerun");
+ chip->skip_usb_suspend_for_fake_battery = of_property_read_bool(node,
+ "qcom,skip-usb-suspend-for-fake-battery");
+
+ /* parse the battery missing detection pin source */
+ rc = of_property_read_string(chip->pdev->dev.of_node,
+ "qcom,bmd-pin-src", &bpd);
+ if (rc) {
+ /* Select BAT_THM as default BPD scheme */
+ chip->bmd_pin_src = BPD_TYPE_DEFAULT;
+ rc = 0;
+ } else {
+ chip->bmd_pin_src = get_bpd(bpd);
+ if (chip->bmd_pin_src < 0) {
+ dev_err(chip->dev,
+ "failed to determine bpd schema %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* parse the dc power supply configuration */
+ rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type);
+ if (rc) {
+ chip->dc_psy_type = -EINVAL;
+ rc = 0;
+ } else {
+ if (strcmp(dc_psy_type, "Mains") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (strcmp(dc_psy_type, "Wireless") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+ else if (strcmp(dc_psy_type, "Wipower") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIPOWER;
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ OF_PROP_READ(chip, chip->dc_target_current_ma,
+ "dc-psy-ma", rc, 0);
+ if (rc)
+ return rc;
+ if (chip->dc_target_current_ma < DC_MA_MIN
+ || chip->dc_target_current_ma > DC_MA_MAX) {
+ dev_err(chip->dev, "Bad dc mA %d\n",
+ chip->dc_target_current_ma);
+ return -EINVAL;
+ }
+ }
+
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ smb_parse_wipower_dt(chip);
+
+ /* read the bms power supply name */
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ /* read the battery power supply name */
+ rc = of_property_read_string(node, "qcom,battery-psy-name",
+ &chip->battery_psy_name);
+ if (rc)
+ chip->battery_psy_name = "battery";
+
+ /* Get the charger led support property */
+ chip->cfg_chg_led_sw_ctrl =
+ of_property_read_bool(node, "qcom,chg-led-sw-controls");
+ chip->cfg_chg_led_support =
+ of_property_read_bool(node, "qcom,chg-led-support");
+
+ if (of_find_property(node, "qcom,thermal-mitigation",
+ &chip->thermal_levels)) {
+ chip->thermal_mitigation = devm_kzalloc(chip->dev,
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ chip->skip_usb_notification
+ = of_property_read_bool(node,
+ "qcom,skip-usb-notification");
+
+ chip->otg_pinctrl = of_property_read_bool(node, "qcom,otg-pinctrl");
+
+ return 0;
+}
+
+#define SUBTYPE_REG 0x5
+#define SMBCHG_CHGR_SUBTYPE 0x1
+#define SMBCHG_OTG_SUBTYPE 0x8
+#define SMBCHG_BAT_IF_SUBTYPE 0x3
+#define SMBCHG_USB_CHGPTH_SUBTYPE 0x4
+#define SMBCHG_DC_CHGPTH_SUBTYPE 0x5
+#define SMBCHG_MISC_SUBTYPE 0x7
+#define SMBCHG_LITE_CHGR_SUBTYPE 0x51
+#define SMBCHG_LITE_OTG_SUBTYPE 0x58
+#define SMBCHG_LITE_BAT_IF_SUBTYPE 0x53
+#define SMBCHG_LITE_USB_CHGPTH_SUBTYPE 0x54
+#define SMBCHG_LITE_DC_CHGPTH_SUBTYPE 0x55
+#define SMBCHG_LITE_MISC_SUBTYPE 0x57
+static int smbchg_request_irq(struct smbchg_chip *chip,
+ struct device_node *child,
+ int irq_num, char *irq_name,
+ irqreturn_t (irq_handler)(int irq, void *_chip),
+ int flags)
+{
+ int rc;
+
+ irq_num = of_irq_get_byname(child, irq_name);
+ if (irq_num < 0) {
+ dev_err(chip->dev, "Unable to get %s irqn", irq_name);
+ rc = -ENXIO;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ irq_num, NULL, irq_handler, flags, irq_name,
+ chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to request %s irq: %dn",
+ irq_name, rc);
+ rc = -ENXIO;
+ }
+ return 0;
+}
+
+static int smbchg_request_irqs(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->chg_error_irq, "chg-error",
+ chg_error_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->taper_irq,
+ "chg-taper-thr", taper_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ disable_irq_nosync(chip->taper_irq);
+ rc = smbchg_request_irq(chip, child, chip->chg_term_irq,
+ "chg-tcc-thr", chg_term_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->recharge_irq,
+ "chg-rechg-thr", recharge_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->fastchg_irq,
+ "chg-p2f-thr", fastchg_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_term_irq);
+ enable_irq_wake(chip->chg_error_irq);
+ enable_irq_wake(chip->fastchg_irq);
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->batt_hot_irq,
+ "batt-hot", batt_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_warm_irq,
+ "batt-warm", batt_warm_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cool_irq,
+ "batt-cool", batt_cool_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cold_irq,
+ "batt-cold", batt_cold_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_missing_irq,
+ "batt-missing", batt_pres_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->vbat_low_irq,
+ "batt-low", vbat_low_handler, flags);
+ if (rc < 0)
+ return rc;
+
+ enable_irq_wake(chip->batt_hot_irq);
+ enable_irq_wake(chip->batt_warm_irq);
+ enable_irq_wake(chip->batt_cool_irq);
+ enable_irq_wake(chip->batt_cold_irq);
+ enable_irq_wake(chip->batt_missing_irq);
+ enable_irq_wake(chip->vbat_low_irq);
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_uv_irq,
+ "usbin-uv", usbin_uv_handler,
+ flags | IRQF_EARLY_RESUME);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_ov_irq,
+ "usbin-ov", usbin_ov_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->src_detect_irq,
+ "usbin-src-det",
+ src_detect_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->aicl_done_irq,
+ "aicl-done",
+ aicl_done_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+
+ if (chip->schg_version != QPNP_SCHG_LITE) {
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ }
+ enable_irq_wake(chip->usbin_uv_irq);
+ enable_irq_wake(chip->usbin_ov_irq);
+ enable_irq_wake(chip->src_detect_irq);
+ if (chip->parallel.avail && chip->usb_present) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->dcin_uv_irq,
+ "dcin-uv", dcin_uv_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->dcin_uv_irq);
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->power_ok_irq,
+ "power-ok", power_ok_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->chg_hot_irq,
+ "temp-shutdown", chg_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->wdog_timeout_irq,
+ "wdog-timeout",
+ wdog_timeout_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_hot_irq);
+ enable_irq_wake(chip->wdog_timeout_irq);
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ break;
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define REQUIRE_BASE(chip, base, rc) \
+do { \
+ if (!rc && !chip->base) { \
+ dev_err(chip->dev, "Missing " #base "\n"); \
+ rc = -EINVAL; \
+ } \
+} while (0)
+
+static int smbchg_parse_peripherals(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ chip->chgr_base = base;
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ chip->bat_if_base = base;
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ chip->usb_chgpth_base = base;
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ chip->dc_chgpth_base = base;
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ chip->misc_base = base;
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ chip->otg_base = base;
+ break;
+ }
+ }
+
+ REQUIRE_BASE(chip, chgr_base, rc);
+ REQUIRE_BASE(chip, bat_if_base, rc);
+ REQUIRE_BASE(chip, usb_chgpth_base, rc);
+ REQUIRE_BASE(chip, dc_chgpth_base, rc);
+ REQUIRE_BASE(chip, misc_base, rc);
+
+ return rc;
+}
+
+static inline void dump_reg(struct smbchg_chip *chip, u16 addr,
+ const char *name)
+{
+ u8 reg;
+
+ smbchg_read(chip, &reg, addr, 1);
+ pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg);
+}
+
+/* dumps useful registers for debug */
+static void dump_regs(struct smbchg_chip *chip)
+{
+ u16 addr;
+
+ /* charger peripheral */
+ for (addr = 0xB; addr <= 0x10; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Status");
+ for (addr = 0xF0; addr <= 0xFF; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Config");
+ /* battery interface peripheral */
+ dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status");
+ dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command");
+ for (addr = 0xF0; addr <= 0xFB; addr++)
+ dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config");
+ /* usb charge path peripheral */
+ for (addr = 0x7; addr <= 0x10; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status");
+ dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command");
+ for (addr = 0xF0; addr <= 0xF5; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config");
+ /* dc charge path peripheral */
+ dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status");
+ for (addr = 0xF0; addr <= 0xF6; addr++)
+ dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config");
+ /* misc peripheral */
+ dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status");
+ dump_reg(chip, chip->misc_base + RT_STS, "MISC Status");
+ for (addr = 0xF0; addr <= 0xF3; addr++)
+ dump_reg(chip, chip->misc_base + addr, "MISC CFG");
+}
+
+static int create_debugfs_entries(struct smbchg_chip *chip)
+{
+ struct dentry *ent;
+
+ chip->debug_root = debugfs_create_dir("qpnp-smbcharger", NULL);
+ if (!chip->debug_root) {
+ dev_err(chip->dev, "Couldn't create debug dir\n");
+ return -EINVAL;
+ }
+
+ ent = debugfs_create_file("force_dcin_icl_check",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_dcin_icl_ops);
+ if (!ent) {
+ dev_err(chip->dev,
+ "Couldn't create force dcin icl check file\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_check_chg_version(struct smbchg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+ int rc;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(pmic_rev_id)) {
+ rc = PTR_ERR(revid_dev_node);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA
+ | SMBCHG_BATT_OV_WA
+ | SMBCHG_CC_ESR_WA
+ | SMBCHG_RESTART_WA;
+ use_pmi8994_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ case PMI8950:
+ case PMI8937:
+ chip->wa_flags |= SMBCHG_BATT_OV_WA;
+ if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ {
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA;
+ } else { /* rev > PMI8950 v1.0 */
+ chip->wa_flags |= SMBCHG_HVDCP_9V_EN_WA
+ | SMBCHG_USB100_WA;
+ }
+ use_pmi8994_tables(chip);
+ chip->tables.aicl_rerun_period_table =
+ aicl_rerun_period_schg_lite;
+ chip->tables.aicl_rerun_period_len =
+ ARRAY_SIZE(aicl_rerun_period_schg_lite);
+
+ chip->schg_version = QPNP_SCHG_LITE;
+ if (pmic_rev_id->pmic_subtype == PMI8937)
+ chip->hvdcp_not_supported = true;
+ break;
+ case PMI8996:
+ chip->wa_flags |= SMBCHG_CC_ESR_WA
+ | SMBCHG_FLASH_ICL_DISABLE_WA
+ | SMBCHG_RESTART_WA
+ | SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA;
+ use_pmi8996_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported, WA flags not set\n",
+ pmic_rev_id->pmic_subtype);
+ }
+
+ pr_smb(PR_STATUS, "pmic=%s, wa_flags=0x%x, hvdcp_supported=%s\n",
+ pmic_rev_id->pmic_name, chip->wa_flags,
+ chip->hvdcp_not_supported ? "false" : "true");
+
+ return 0;
+}
+
+static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip)
+{
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name;
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP
+ && !is_hvdcp_present(chip)) {
+ pr_smb(PR_STATUS, "DCP found rerunning APSD\n");
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, true, 300);
+ if (rc < 0)
+ pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n",
+ rc);
+
+ pr_smb(PR_STATUS, "Faking Removal\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Faking Insertion\n");
+ fake_insertion_removal(chip, true);
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) {
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n");
+ fake_insertion_removal(chip, true);
+ }
+
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n",
+ rc);
+ }
+}
+
+static int smbchg_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct smbchg_chip *chip;
+ struct power_supply *typec_psy = NULL;
+ struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev;
+ const char *typec_psy_name;
+ struct power_supply_config usb_psy_cfg = {};
+ struct power_supply_config batt_psy_cfg = {};
+ struct power_supply_config dc_psy_cfg = {};
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,external-typec")) {
+ /* read the type power supply name */
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,typec-psy-name", &typec_psy_name);
+ if (rc) {
+ pr_err("failed to get prop typec-psy-name rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ typec_psy = power_supply_get_by_name(typec_psy_name);
+ if (!typec_psy) {
+ pr_smb(PR_STATUS,
+ "Type-C supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,dcin-vadc", NULL)) {
+ vadc_dev = qpnp_get_vadc(&pdev->dev, "dcin");
+ if (IS_ERR(vadc_dev)) {
+ rc = PTR_ERR(vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Couldn't get vadc rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ vchg_vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,vchg_sns-vadc", NULL)) {
+ vchg_vadc_dev = qpnp_get_vadc(&pdev->dev, "vchg_sns");
+ if (IS_ERR(vchg_vadc_dev)) {
+ rc = PTR_ERR(vchg_vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't get vadc 'vchg' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->fcc_votable = create_votable("BATT_FCC",
+ VOTE_MIN,
+ set_fastchg_current_vote_cb, chip);
+ if (IS_ERR(chip->fcc_votable)) {
+ rc = PTR_ERR(chip->fcc_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_icl_votable = create_votable("USB_ICL",
+ VOTE_MIN,
+ set_usb_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->usb_icl_votable)) {
+ rc = PTR_ERR(chip->usb_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_icl_votable = create_votable("DCIN_ICL",
+ VOTE_MIN,
+ set_dc_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->dc_icl_votable)) {
+ rc = PTR_ERR(chip->dc_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_suspend_votable = create_votable("USB_SUSPEND",
+ VOTE_SET_ANY,
+ usb_suspend_vote_cb, chip);
+ if (IS_ERR(chip->usb_suspend_votable)) {
+ rc = PTR_ERR(chip->usb_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_suspend_votable = create_votable("DC_SUSPEND",
+ VOTE_SET_ANY,
+ dc_suspend_vote_cb, chip);
+ if (IS_ERR(chip->dc_suspend_votable)) {
+ rc = PTR_ERR(chip->dc_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->battchg_suspend_votable = create_votable("BATTCHG_SUSPEND",
+ VOTE_SET_ANY,
+ charging_suspend_vote_cb, chip);
+ if (IS_ERR(chip->battchg_suspend_votable)) {
+ rc = PTR_ERR(chip->battchg_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_disable_votable = create_votable("HWAICL_DISABLE",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_disable_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_disable_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_disable_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_enable_indirect_votable = create_votable(
+ "HWAICL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_enable_indirect_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_enable_indirect_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_enable_indirect_votable);
+ goto votables_cleanup;
+ }
+
+ chip->aicl_deglitch_short_votable = create_votable(
+ "HWAICL_SHORT_DEGLITCH",
+ VOTE_SET_ANY,
+ smbchg_aicl_deglitch_config_cb, chip);
+ if (IS_ERR(chip->aicl_deglitch_short_votable)) {
+ rc = PTR_ERR(chip->aicl_deglitch_short_votable);
+ goto votables_cleanup;
+ }
+
+ INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work);
+ INIT_DELAYED_WORK(&chip->parallel_en_work,
+ smbchg_parallel_usb_en_work);
+ INIT_DELAYED_WORK(&chip->vfloat_adjust_work, smbchg_vfloat_adjust_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smbchg_hvdcp_det_work);
+ init_completion(&chip->src_det_lowered);
+ init_completion(&chip->src_det_raised);
+ init_completion(&chip->usbin_uv_lowered);
+ init_completion(&chip->usbin_uv_raised);
+ chip->vadc_dev = vadc_dev;
+ chip->vchg_vadc_dev = vchg_vadc_dev;
+ chip->pdev = pdev;
+ chip->dev = &pdev->dev;
+
+ chip->typec_psy = typec_psy;
+ chip->fake_battery_soc = -EINVAL;
+ chip->usb_online = -EINVAL;
+ dev_set_drvdata(&pdev->dev, chip);
+
+ spin_lock_init(&chip->sec_access_lock);
+ mutex_init(&chip->therm_lvl_lock);
+ mutex_init(&chip->usb_set_online_lock);
+ mutex_init(&chip->parallel.lock);
+ mutex_init(&chip->taper_irq_lock);
+ mutex_init(&chip->pm_lock);
+ mutex_init(&chip->wipower_config);
+ mutex_init(&chip->usb_status_lock);
+ device_init_wakeup(chip->dev, true);
+
+ rc = smbchg_parse_peripherals(chip);
+ if (rc) {
+ dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_check_chg_version(chip);
+ if (rc) {
+ pr_err("Unable to check schg version rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smb_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to parse DT nodes: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_regulator_init(chip);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Couldn't initialize regulator rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ chip->extcon = devm_extcon_dev_allocate(chip->dev, smbchg_extcon_cable);
+ if (IS_ERR(chip->extcon)) {
+ dev_err(chip->dev, "failed to allocate extcon device\n");
+ rc = PTR_ERR(chip->extcon);
+ goto votables_cleanup;
+ }
+
+ rc = devm_extcon_dev_register(chip->dev, chip->extcon);
+ if (rc) {
+ dev_err(chip->dev, "failed to register extcon device\n");
+ goto votables_cleanup;
+ }
+
+ chip->usb_psy_d.name = "usb";
+ chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB;
+ chip->usb_psy_d.get_property = smbchg_usb_get_property;
+ chip->usb_psy_d.set_property = smbchg_usb_set_property;
+ chip->usb_psy_d.properties = smbchg_usb_properties;
+ chip->usb_psy_d.num_properties = ARRAY_SIZE(smbchg_usb_properties);
+ chip->usb_psy_d.property_is_writeable = smbchg_usb_is_writeable;
+
+ usb_psy_cfg.drv_data = chip;
+ usb_psy_cfg.supplied_to = smbchg_usb_supplicants;
+ usb_psy_cfg.num_supplicants = ARRAY_SIZE(smbchg_usb_supplicants);
+
+ chip->usb_psy = devm_power_supply_register(chip->dev,
+ &chip->usb_psy_d, &usb_psy_cfg);
+ if (IS_ERR(chip->usb_psy)) {
+ dev_err(&pdev->dev, "Unable to register usb_psy rc = %ld\n",
+ PTR_ERR(chip->usb_psy));
+ rc = PTR_ERR(chip->usb_psy);
+ goto votables_cleanup;
+ }
+
+ if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) {
+ chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
+ if (IS_ERR(chip->dpdm_reg)) {
+ rc = PTR_ERR(chip->dpdm_reg);
+ goto votables_cleanup;
+ }
+ }
+
+ rc = smbchg_hw_init(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to intialize hardware rc = %d\n", rc);
+ goto out;
+ }
+
+ rc = determine_initial_status(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to determine init status rc = %d\n", rc);
+ goto out;
+ }
+
+ chip->previous_soc = -EINVAL;
+ chip->batt_psy_d.name = chip->battery_psy_name;
+ chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy_d.get_property = smbchg_battery_get_property;
+ chip->batt_psy_d.set_property = smbchg_battery_set_property;
+ chip->batt_psy_d.properties = smbchg_battery_properties;
+ chip->batt_psy_d.num_properties = ARRAY_SIZE(smbchg_battery_properties);
+ chip->batt_psy_d.external_power_changed = smbchg_external_power_changed;
+ chip->batt_psy_d.property_is_writeable = smbchg_battery_is_writeable;
+
+ batt_psy_cfg.drv_data = chip;
+ batt_psy_cfg.num_supplicants = 0;
+ chip->batt_psy = devm_power_supply_register(chip->dev,
+ &chip->batt_psy_d,
+ &batt_psy_cfg);
+ if (IS_ERR(chip->batt_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register batt_psy rc = %ld\n",
+ PTR_ERR(chip->batt_psy));
+ goto out;
+ }
+
+ if (chip->dc_psy_type != -EINVAL) {
+ chip->dc_psy_d.name = "dc";
+ chip->dc_psy_d.type = chip->dc_psy_type;
+ chip->dc_psy_d.get_property = smbchg_dc_get_property;
+ chip->dc_psy_d.set_property = smbchg_dc_set_property;
+ chip->dc_psy_d.property_is_writeable = smbchg_dc_is_writeable;
+ chip->dc_psy_d.properties = smbchg_dc_properties;
+ chip->dc_psy_d.num_properties
+ = ARRAY_SIZE(smbchg_dc_properties);
+
+ dc_psy_cfg.drv_data = chip;
+ dc_psy_cfg.num_supplicants
+ = ARRAY_SIZE(smbchg_dc_supplicants);
+ dc_psy_cfg.supplied_to = smbchg_dc_supplicants;
+
+ chip->dc_psy = devm_power_supply_register(chip->dev,
+ &chip->dc_psy_d,
+ &dc_psy_cfg);
+ if (IS_ERR(chip->dc_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register dc_psy rc = %ld\n",
+ PTR_ERR(chip->dc_psy));
+ goto out;
+ }
+ }
+
+ if (chip->cfg_chg_led_support &&
+ chip->schg_version == QPNP_SCHG_LITE) {
+ rc = smbchg_register_chg_led(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Unable to register charger led: %d\n",
+ rc);
+ goto out;
+ }
+
+ rc = smbchg_chg_led_controls(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Failed to set charger led controld bit: %d\n",
+ rc);
+ goto unregister_led_class;
+ }
+ }
+
+ rc = smbchg_request_irqs(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to request irqs rc = %d\n", rc);
+ goto unregister_led_class;
+ }
+
+ rerun_hvdcp_det_if_necessary(chip);
+
+ dump_regs(chip);
+ create_debugfs_entries(chip);
+ dev_info(chip->dev,
+ "SMBCHG successfully probe Charger version=%s Revision DIG:%d.%d ANA:%d.%d batt=%d dc=%d usb=%d\n",
+ version_str[chip->schg_version],
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ get_prop_batt_present(chip),
+ chip->dc_present, chip->usb_present);
+ return 0;
+
+unregister_led_class:
+ if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE)
+ led_classdev_unregister(&chip->led_cdev);
+out:
+ handle_usb_removal(chip);
+votables_cleanup:
+ if (chip->aicl_deglitch_short_votable)
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ if (chip->hw_aicl_rerun_enable_indirect_votable)
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ if (chip->hw_aicl_rerun_disable_votable)
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ if (chip->battchg_suspend_votable)
+ destroy_votable(chip->battchg_suspend_votable);
+ if (chip->dc_suspend_votable)
+ destroy_votable(chip->dc_suspend_votable);
+ if (chip->usb_suspend_votable)
+ destroy_votable(chip->usb_suspend_votable);
+ if (chip->dc_icl_votable)
+ destroy_votable(chip->dc_icl_votable);
+ if (chip->usb_icl_votable)
+ destroy_votable(chip->usb_icl_votable);
+ if (chip->fcc_votable)
+ destroy_votable(chip->fcc_votable);
+ return rc;
+}
+
+static int smbchg_remove(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ debugfs_remove_recursive(chip->debug_root);
+
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ destroy_votable(chip->battchg_suspend_votable);
+ destroy_votable(chip->dc_suspend_votable);
+ destroy_votable(chip->usb_suspend_votable);
+ destroy_votable(chip->dc_icl_votable);
+ destroy_votable(chip->usb_icl_votable);
+ destroy_votable(chip->fcc_votable);
+
+ return 0;
+}
+
+static void smbchg_shutdown(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ if (!is_hvdcp_present(chip))
+ return;
+
+ pr_smb(PR_MISC, "Disable Parallel\n");
+ mutex_lock(&chip->parallel.lock);
+ smbchg_parallel_en = 0;
+ smbchg_parallel_usb_disable(chip);
+ mutex_unlock(&chip->parallel.lock);
+
+ pr_smb(PR_MISC, "Disable all interrupts\n");
+ disable_irq(chip->aicl_done_irq);
+ disable_irq(chip->batt_cold_irq);
+ disable_irq(chip->batt_cool_irq);
+ disable_irq(chip->batt_hot_irq);
+ disable_irq(chip->batt_missing_irq);
+ disable_irq(chip->batt_warm_irq);
+ disable_irq(chip->chg_error_irq);
+ disable_irq(chip->chg_hot_irq);
+ disable_irq(chip->chg_term_irq);
+ disable_irq(chip->dcin_uv_irq);
+ disable_irq(chip->fastchg_irq);
+ disable_irq(chip->otg_fail_irq);
+ disable_irq(chip->otg_oc_irq);
+ disable_irq(chip->power_ok_irq);
+ disable_irq(chip->recharge_irq);
+ disable_irq(chip->src_detect_irq);
+ disable_irq(chip->taper_irq);
+ disable_irq(chip->usbid_change_irq);
+ disable_irq(chip->usbin_ov_irq);
+ disable_irq(chip->usbin_uv_irq);
+ disable_irq(chip->vbat_low_irq);
+ disable_irq(chip->wdog_timeout_irq);
+
+ /* remove all votes for short deglitch */
+ vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_SHORT_DEGLITCH_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+
+ /* vote to ensure AICL rerun is enabled */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ SHUTDOWN_WORKAROUND_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable indirect AICL rerun\n");
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable AICL rerun\n");
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ return;
+ }
+
+ pr_smb(PR_MISC, "Wait 500mS to lower to 5V\n");
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ return;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0)
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0)
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Wait 1S to settle\n");
+ msleep(1000);
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_STATUS, "wrote power off configurations\n");
+}
+
+static const struct dev_pm_ops smbchg_pm_ops = {
+};
+
+MODULE_DEVICE_TABLE(spmi, smbchg_id);
+
+static struct platform_driver smbchg_driver = {
+ .driver = {
+ .name = "qpnp-smbcharger",
+ .owner = THIS_MODULE,
+ .of_match_table = smbchg_match_table,
+ .pm = &smbchg_pm_ops,
+ },
+ .probe = smbchg_probe,
+ .remove = smbchg_remove,
+ .shutdown = smbchg_shutdown,
+};
+
+static int __init smbchg_init(void)
+{
+ return platform_driver_register(&smbchg_driver);
+}
+
+static void __exit smbchg_exit(void)
+{
+ return platform_driver_unregister(&smbchg_driver);
+}
+
+module_init(smbchg_init);
+module_exit(smbchg_exit);
+
+MODULE_DESCRIPTION("QPNP SMB Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-smbcharger");
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index b3c2b67abfde..93512f155c52 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -2224,12 +2224,6 @@ int smblib_get_prop_usb_voltage_max(struct smb_charger *chg,
int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc = 0;
-
- rc = smblib_get_prop_usb_present(chg, val);
- if (rc < 0 || !val->intval)
- return rc;
-
if (!chg->iio.usbin_v_chan ||
PTR_ERR(chg->iio.usbin_v_chan) == -EPROBE_DEFER)
chg->iio.usbin_v_chan = iio_channel_get(chg->dev, "usbin_v");
@@ -3740,8 +3734,165 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
return IRQ_HANDLED;
}
+static int typec_try_sink(struct smb_charger *chg)
+{
+ union power_supply_propval val;
+ bool debounce_done, vbus_detected, sink;
+ u8 stat;
+ int exit_mode = ATTACHED_SRC, rc;
+
+ /* ignore typec interrupt while try.snk WIP */
+ chg->try_sink_active = true;
+
+ /* force SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* reduce Tccdebounce time to ~20ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG,
+ TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * give opportunity to the other side to be a SRC,
+ * for tDRPTRY + Tccdebounce time
+ */
+ msleep(120);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (!debounce_done)
+ /*
+ * The other side didn't switch to source, either it
+ * is an adamant sink or is removed go back to showing Rp
+ */
+ goto try_wait_src;
+
+ /*
+ * We are in force sink mode and the other side has switched to
+ * showing Rp. Config DRP in case the other side removes Rp so we
+ * can quickly (20ms) switch to showing our Rp. Note that the spec
+ * needs us to show Rp for 80mS while the drp DFP residency is just
+ * 54mS. But 54mS is plenty time for us to react and force Rp for
+ * the remaining 26mS.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * while other side is Rp, wait for VBUS from it; exit if other side
+ * removes Rp
+ */
+ do {
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+ vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+ /* Successfully transitioned to ATTACHED.SNK */
+ if (vbus_detected && debounce_done) {
+ exit_mode = ATTACHED_SINK;
+ goto try_sink_exit;
+ }
+
+ /*
+ * Ensure sink since drp may put us in source if other
+ * side switches back to Rd
+ */
+ sink = !(stat & UFP_DFP_MODE_STATUS_BIT);
+
+ usleep_range(1000, 2000);
+ } while (debounce_done && sink);
+
+try_wait_src:
+ /*
+ * Transition to trywait.SRC state. check if other side still wants
+ * to be SNK or has been removed.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* Need to be in this state for tDRPTRY time, 75ms~150ms */
+ msleep(80);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (debounce_done)
+ /* the other side wants to be a sink */
+ exit_mode = ATTACHED_SRC;
+ else
+ /* the other side is detached */
+ exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+ /* release forcing of SRC/SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+ /* revert Tccdebounce time back to ~120ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+ chg->try_sink_active = false;
+
+ return exit_mode;
+}
+
static void typec_sink_insertion(struct smb_charger *chg)
{
+ int exit_mode;
+
+ /*
+ * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+ * or RD-Ra for TccDebounce time.
+ */
+
+ if (*chg->try_sink_enabled) {
+ exit_mode = typec_try_sink(chg);
+
+ if (exit_mode != ATTACHED_SRC) {
+ smblib_usb_typec_change(chg);
+ return;
+ }
+ }
+
/* when a sink is inserted we should not wait on hvdcp timeout to
* enable pd
*/
@@ -3999,7 +4150,7 @@ static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
smblib_typec_mode_name[chg->typec_mode]);
}
-static void smblib_usb_typec_change(struct smb_charger *chg)
+void smblib_usb_typec_change(struct smb_charger *chg)
{
int rc;
@@ -4035,7 +4186,8 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
return IRQ_HANDLED;
}
- if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+ if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+ chg->try_sink_active) {
smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
chg->cc2_detach_wa_active ?
"cc2_detach_wa" : "typec_en_dis");
@@ -4063,6 +4215,14 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
chg->is_hdc = true;
+ /*
+ * Disable usb IRQs after the flag set and re-enable IRQs after
+ * the flag cleared in the delayed work queue, to avoid any IRQ
+ * storming during the delays
+ */
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
return IRQ_HANDLED;
@@ -4240,6 +4400,8 @@ static void clear_hdc_work(struct work_struct *work)
clear_hdc_work.work);
chg->is_hdc = 0;
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
}
static void rdstd_cc2_detach_work(struct work_struct *work)
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 19c0d19106d6..f292ca09f532 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -128,6 +128,12 @@ enum smb_irq_index {
SMB_IRQ_MAX,
};
+enum try_sink_exit_mode {
+ ATTACHED_SRC = 0,
+ ATTACHED_SINK,
+ UNATTACHED_SINK,
+};
+
struct smb_irq_info {
const char *name;
const irq_handler_t handler;
@@ -232,6 +238,7 @@ struct smb_charger {
struct smb_params param;
struct smb_iio iio;
int *debug_mask;
+ int *try_sink_enabled;
enum smb_mode mode;
struct smb_chg_freq chg_freq;
int smb_version;
@@ -341,6 +348,7 @@ struct smb_charger {
u32 wa_flags;
bool cc2_detach_wa_active;
bool typec_en_dis_active;
+ bool try_sink_active;
int boost_current_ua;
int temp_speed_reading_count;
@@ -518,6 +526,7 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
const union power_supply_propval *val);
+void smblib_usb_typec_change(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d8671ab1fd06..4ddb085e9300 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -624,6 +624,7 @@ enum {
#define TAPER_TIMER_SEL_CFG_REG (USBIN_BASE + 0x64)
#define TYPEC_SPARE_CFG_BIT BIT(7)
+#define TYPEC_DRP_DFP_TIME_CFG_BIT BIT(5)
#define TAPER_TIMER_SEL_MASK GENMASK(1, 0)
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 06ecc7ea6e8a..acc0d772d44d 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -356,11 +356,21 @@ static void status_change_work(struct work_struct *work)
int reschedule_us;
int reschedule_jeita_work_us = 0;
int reschedule_step_work_us = 0;
+ union power_supply_propval pval = {0, };
+
+ if (!is_batt_available(chip)) {
+ __pm_relax(chip->step_chg_ws);
+ return;
+ }
- if (!is_batt_available(chip))
+ /* skip jeita and step if not charging */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &pval);
+ if (pval.intval != POWER_SUPPLY_STATUS_CHARGING) {
+ __pm_relax(chip->step_chg_ws);
return;
+ }
- /* skip elapsed_us debounce for handling battery temperature */
rc = handle_jeita(chip);
if (rc > 0)
reschedule_jeita_work_us = rc;
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index 2531b74b4588..5e808150a3dd 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1475,7 +1475,7 @@ static void qpnp_pwm_disable(struct pwm_chip *pwm_chip,
*/
int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
{
- int rc;
+ int rc = 0;
unsigned long flags;
struct qpnp_pwm_chip *chip;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36ec8a75..34367d172961 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
+ rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d44cfaf..b60667c145fd 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541c8287..a2275825186f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f839fc..1964391db904 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..9bd9b9a29dfc 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..00602abec0ea 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0e6aaef9a038..c74f74ab981c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi:
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 17c440b9d086..6835bae33ec4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+ cmd_mfi->frame->hdr.cmd_status =
+ MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
+ }
}
}
} else {
@@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+ dev_info(&instance->pdev->dev,
+ "%s %d out of range class %d send by application\n",
+ __func__, __LINE__, curr_aen.members.class);
+ return 0;
+ }
+
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1ed85dfc008d..ac12ee844bfc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
+ if (size > ha->optrom_size - start)
+ size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
@@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29d8c74e85e3..b0e2e292e3cb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@@ -153,11 +153,11 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
@@ -166,6 +166,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -209,7 +210,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -625,6 +625,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -633,6 +634,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -732,7 +734,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -837,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -902,7 +937,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
@@ -948,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@@ -961,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@@ -977,12 +1013,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1023,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
- }
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
+ result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
@@ -1164,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@@ -1245,6 +1260,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
+ int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1255,8 +1271,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
- if (req_sz > rsv_schp->bufflen)
- return -ENOMEM; /* cannot map more than reserved buffer */
+ mutex_lock(&sfp->f_mutex);
+ if (req_sz > rsv_schp->bufflen) {
+ ret = -ENOMEM; /* cannot map more than reserved buffer */
+ goto out;
+ }
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1270,7 +1289,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
- return 0;
+out:
+ mutex_unlock(&sfp->f_mutex);
+ return ret;
}
static void
@@ -1734,13 +1755,25 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if (hp->flags & SG_FLAG_MMAP_IO) {
+ res = -EBUSY; /* sfp->res_in_use == 1 */
+ if (dxfer_len > rsv_schp->bufflen)
+ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+ return res;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -2029,8 +2062,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2040,7 +2074,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@@ -2058,70 +2092,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@@ -2140,8 +2149,9 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2180,10 +2190,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ }
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@@ -2217,20 +2230,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
@@ -2600,7 +2599,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@@ -2620,13 +2619,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
- if (new_interface &&
+ if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
@@ -2657,7 +2654,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index cd5c1c060481..6df2841cb7f9 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 4547a6dbdb23..da670c0e31fa 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -117,6 +117,9 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
int ret = 0;
int major;
int minor;
+ unsigned long flags;
+ struct ufs_hba *hba = host->hba;
+
ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
&buff_pos, ubuf, cnt);
@@ -142,8 +145,15 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
goto out;
}
+ if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
host->testbus.select_major = (u8)major;
host->testbus.select_minor = (u8)minor;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* Sanity check of the {major, minor} tuple is done in the
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 47106f937371..f429547aef7b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2563,12 +2563,13 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
host->testbus.select_minor = 37;
}
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+ u8 select_major, u8 select_minor)
{
- if (host->testbus.select_major >= TSTBUS_MAX) {
+ if (select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
+ __func__, select_major);
return false;
}
@@ -2577,10 +2578,10 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0xFF) {
+ if (select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
+ __func__, select_minor);
return false;
}
@@ -2594,16 +2595,16 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
*/
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
- int reg;
- int offset;
+ int reg = 0;
+ int offset, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
+ unsigned long flags;
+ struct ufs_hba *hba;
if (!host)
return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
+ hba = host->hba;
+ spin_lock_irqsave(hba->host->host_lock, flags);
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
@@ -2661,21 +2662,27 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (reg) {
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << testbus_sel_offset,
REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
+ ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
+ } else {
+ dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
-
- return 0;
+out:
+ return ret;
}
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 9d532691f001..fd98a3381d61 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -100,7 +100,7 @@ enum {
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
+#define TEST_BUS_SEL 0x780000
#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
@@ -391,6 +391,8 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+ u8 select_minor);
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 523a2cff44a3..35575c071760 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -173,6 +173,9 @@ void ufshcd_update_query_stats(struct ufs_hba *hba,
}
#endif
+#define PWR_INFO_MASK 0xF
+#define PWR_RX_OFFSET 4
+
#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -4653,8 +4656,9 @@ int ufshcd_change_power_mode(struct ufs_hba *hba,
int ret = 0;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ if (!hba->restore_needed &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
@@ -6275,6 +6279,52 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
}
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int ret = 0;
+ u32 mode;
+
+ hba = container_of(work, struct ufs_hba, rls_work);
+ ufshcd_scsi_block_requests(hba);
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret) {
+ dev_err(hba->dev,
+ "Timed out (%d) waiting for DB to clear\n",
+ ret);
+ goto out;
+ }
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+ if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+ if (hba->pwr_info.gear_rx != mode)
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+ if (hba->pwr_info.gear_tx != mode)
+ hba->restore_needed = true;
+
+ if (hba->restore_needed)
+ ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ else
+ hba->restore_needed = false;
+
+out:
+ ufshcd_scsi_unblock_requests(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
@@ -6314,6 +6364,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
hba->full_init_linereset = true;
}
}
+ if (!hba->full_init_linereset)
+ schedule_work(&hba->rls_work);
}
retval |= IRQ_HANDLED;
}
@@ -8708,6 +8760,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
}
+ flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
@@ -9921,6 +9974,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index da3ad78d3405..dbc80848ed8b 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -854,6 +854,7 @@ struct ufs_hba {
/* Work Queues */
struct work_struct eh_work;
struct work_struct eeh_work;
+ struct work_struct rls_work;
/* HBA Errors */
u32 errors;
@@ -950,9 +951,10 @@ struct ufs_hba {
bool full_init_linereset;
struct pinctrl *pctrl;
-
+
int latency_hist_enabled;
struct io_latency_state io_lat_s;
+ bool restore_needed;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 829876226689..a2136c6863d3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -616,6 +616,15 @@ config MSM_QDSP6_APRV2
used by audio driver to configure QDSP6's
ASM, ADM and AFE.
+config MSM_QDSP6_APRV2_VM
+ bool "Audio QDSP6 APRv2 virtualization support"
+ depends on MSM_HAB
+ help
+ Enable APRv2 IPC protocol support over
+ HAB between application processor and
+ QDSP6. APR is used by audio driver to
+ configure QDSP6's ASM, ADM and AFE.
+
config MSM_QDSP6_APRV3
bool "Audio QDSP6 APRv3 support"
depends on MSM_SMD
@@ -800,7 +809,8 @@ config MSM_EVENT_TIMER
config MSM_AVTIMER
tristate "Avtimer Driver"
- depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK || \
+ MSM_QDSP6_APRV2_VM
help
This driver gets the Q6 out of power collapsed state and
exposes ioctl control to read avtimer tick.
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index f3debd14c27b..ad9bf3a2232d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1667,6 +1667,8 @@ void ch_purge_intent_lists(struct channel_ctx *ctx)
&ctx->local_rx_intent_list, list) {
ctx->notify_rx_abort(ctx, ctx->user_priv,
ptr_intent->pkt_priv);
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, ptr_intent);
list_del(&ptr_intent->list);
kfree(ptr_intent);
}
@@ -3765,6 +3767,8 @@ static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
xprt_ctx->name,
xprt_ctx->edge);
+ kfree(xprt_ctx->ops);
+ xprt_ctx->ops = NULL;
kfree(xprt_ctx);
}
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 2326487302fd..e9a097151141 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -73,6 +73,8 @@ module_param(qmi_timeout, ulong, 0600);
#define ICNSS_THRESHOLD_LOW 3450000
#define ICNSS_THRESHOLD_GUARD 20000
+#define ICNSS_MAX_PROBE_CNT 2
+
#define icnss_ipc_log_string(_x...) do { \
if (icnss_ipc_log_context) \
ipc_log_string(icnss_ipc_log_context, _x); \
@@ -2105,7 +2107,8 @@ static int icnss_driver_event_server_exit(void *data)
static int icnss_call_driver_probe(struct icnss_priv *priv)
{
- int ret;
+ int ret = 0;
+ int probe_cnt = 0;
if (!priv->ops || !priv->ops->probe)
return 0;
@@ -2117,10 +2120,15 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
icnss_hw_power_on(priv);
- ret = priv->ops->probe(&priv->pdev->dev);
+ while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+ ret = priv->ops->probe(&priv->pdev->dev);
+ probe_cnt++;
+ if (ret != -EPROBE_DEFER)
+ break;
+ }
if (ret < 0) {
- icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
- ret, priv->state);
+ icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+ ret, priv->state, probe_cnt);
goto out;
}
@@ -2228,6 +2236,7 @@ out:
static int icnss_driver_event_register_driver(void *data)
{
int ret = 0;
+ int probe_cnt = 0;
if (penv->ops)
return -EEXIST;
@@ -2247,11 +2256,15 @@ static int icnss_driver_event_register_driver(void *data)
if (ret)
goto out;
- ret = penv->ops->probe(&penv->pdev->dev);
-
+ while (probe_cnt < ICNSS_MAX_PROBE_CNT) {
+ ret = penv->ops->probe(&penv->pdev->dev);
+ probe_cnt++;
+ if (ret != -EPROBE_DEFER)
+ break;
+ }
if (ret) {
- icnss_pr_err("Driver probe failed: %d, state: 0x%lx\n",
- ret, penv->state);
+ icnss_pr_err("Driver probe failed: %d, state: 0x%lx, probe_cnt: %d\n",
+ ret, penv->state, probe_cnt);
goto power_off;
}
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 0c588c586306..bfcb452f9a22 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -31,6 +31,7 @@ static int ipc_router_glink_xprt_debug_mask;
module_param_named(debug_mask, ipc_router_glink_xprt_debug_mask,
int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define IPCRTR_INTENT_REQ_TIMEOUT_MS 5000
#if defined(DEBUG)
#define D(x...) do { \
if (ipc_router_glink_xprt_debug_mask) \
@@ -604,6 +605,7 @@ static void glink_xprt_ch_open(struct ipc_router_glink_xprt *glink_xprtp)
open_cfg.notify_state = glink_xprt_notify_state;
open_cfg.notify_rx_intent_req = glink_xprt_notify_rx_intent_req;
open_cfg.priv = glink_xprtp;
+ open_cfg.rx_intent_req_timeout_ms = IPCRTR_INTENT_REQ_TIMEOUT_MS;
glink_xprtp->pil = msm_ipc_load_subsystem(glink_xprtp);
glink_xprtp->ch_hndl = glink_open(&open_cfg);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index d77a12626330..d82c36480159 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -164,12 +164,17 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
int ss_mdump_seg_cnt;
int ret, i;
+ if (!ramdump_dev)
+ return -ENODEV;
+
memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
offset = offset + sizeof(priv->minidump->md_ss_smem_regions_baseptr);
/* There are 3 encryption keys which also need to be dumped */
ss_mdump_seg_cnt = readb_relaxed(offset) +
NUM_OF_ENCRYPTED_KEY;
+ pr_debug("SMEM base to read minidump segments is 0x%x\n",
+ __raw_readl(priv->minidump));
subsys_smem_base = ioremap(__raw_readl(priv->minidump),
ss_mdump_seg_cnt * sizeof(*region_info));
region_info =
@@ -191,6 +196,9 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
s->address = __raw_readl(offset);
offset = offset + sizeof(region_info->region_base_address);
s->size = __raw_readl(offset);
+ pr_debug("Dumping segment %s with address %pK and size 0x%x\n",
+ s->name, (void *)s->address,
+ (unsigned int)s->size);
s++;
region_info++;
}
@@ -199,7 +207,6 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
if (ret)
pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
__func__, desc->name, ret);
- writel_relaxed(0, &priv->minidump->md_ss_smem_regions_baseptr);
writeb_relaxed(1, &priv->minidump->md_ss_ssr_cause);
if (desc->subsys_vmid > 0)
@@ -216,16 +223,28 @@ static int pil_do_minidump(struct pil_desc *desc, void *ramdump_dev)
* Calls the ramdump API with a list of segments generated from the addresses
* that the descriptor corresponds to.
*/
-int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
struct pil_priv *priv = desc->priv;
struct pil_seg *seg;
int count = 0, ret;
struct ramdump_segment *ramdump_segs, *s;
+ void __iomem *offset;
- if (priv->minidump && (__raw_readl(priv->minidump) > 0))
- return pil_do_minidump(desc, ramdump_dev);
+ memcpy(&offset, &priv->minidump, sizeof(priv->minidump));
+ /*
+ * Collect minidump if smem base is initialized,
+ * ssr cause is 0. No need to check encryption status
+ */
+ if (priv->minidump
+ && (__raw_readl(priv->minidump) != 0)
+ && (readb_relaxed(offset + sizeof(u32) + 2 * sizeof(u8)) == 0)) {
+ pr_debug("Dumping Minidump for %s\n", desc->name);
+ return pil_do_minidump(desc, minidump_dev);
+ }
+ pr_debug("Continuing with full SSR dump for %s\n", desc->name);
list_for_each_entry(seg, &priv->segs, list)
count++;
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
index 908ab78124f7..6e74743c8c21 100644
--- a/drivers/soc/qcom/peripheral-loader.h
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -134,7 +134,8 @@ extern void pil_shutdown(struct pil_desc *desc);
extern void pil_free_memory(struct pil_desc *desc);
extern void pil_desc_release(struct pil_desc *desc);
extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
-extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev,
+ void *minidump_dev);
extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
size_t size);
extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
@@ -154,7 +155,8 @@ static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
{
return 0;
}
-static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+static inline int pil_do_ramdump(struct pil_desc *desc,
+ void *ramdump_dev, void *minidump_dev)
{
return 0;
}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 60d0f2a37026..7ede3e29dcf9 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -559,7 +559,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
char *fw_name_p;
void *mba_dp_virt;
dma_addr_t mba_dp_phys, mba_dp_phys_end;
- int ret, count;
+ int ret;
const u8 *data;
struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
@@ -624,10 +624,9 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
&mba_dp_phys, &mba_dp_phys_end, drv->mba_dp_size);
/* Load the MBA image into memory */
- count = fw->size;
- if (count <= SZ_1M) {
+ if (fw->size <= SZ_1M) {
/* Ensures memcpy is done for max 1MB fw size */
- memcpy(mba_dp_virt, data, count);
+ memcpy(mba_dp_virt, data, fw->size);
} else {
dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
__func__);
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index 896f0c7c232b..b1a5311859ff 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -24,6 +24,7 @@ struct modem_data {
struct subsys_device *subsys;
struct subsys_desc subsys_desc;
void *ramdump_dev;
+ void *minidump_dev;
bool crash_shutdown;
u32 pas_id;
bool ignore_errors;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index bbcff5923c53..45712457de73 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -171,7 +171,8 @@ static int modem_ramdump(int enable, const struct subsys_desc *subsys)
if (ret)
return ret;
- ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+ ret = pil_do_ramdump(&drv->q6->desc,
+ drv->ramdump_dev, drv->minidump_dev);
if (ret < 0)
pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
@@ -230,9 +231,18 @@ static int pil_subsys_init(struct modem_data *drv,
ret = -ENOMEM;
goto err_ramdump;
}
+ drv->minidump_dev = create_ramdump_device("md_modem", &pdev->dev);
+ if (!drv->minidump_dev) {
+ pr_err("%s: Unable to create a modem minidump device.\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_minidump;
+ }
return 0;
+err_minidump:
+ destroy_ramdump_device(drv->ramdump_dev);
err_ramdump:
subsys_unregister(drv->subsys);
err_subsys:
@@ -414,6 +424,7 @@ static int pil_mss_driver_exit(struct platform_device *pdev)
subsys_unregister(drv->subsys);
destroy_ramdump_device(drv->ramdump_dev);
+ destroy_ramdump_device(drv->minidump_dev);
pil_desc_release(&drv->q6->desc);
return 0;
}
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 8c5b0d0e81c8..90feb8b659d1 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -2,7 +2,9 @@ obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_VM) += apr_vm.o apr_v2.o voice_svc.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += msm_audio_ion_vm.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c
new file mode 100644
index 000000000000..d0ea7b22717a
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c
@@ -0,0 +1,1270 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/aprv2_vm.h>
+#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/ipc_logging.h>
+#include <linux/habmm.h>
+
+#define APR_PKT_IPC_LOG_PAGE_CNT 2
+#define APR_VM_CB_THREAD_NAME "apr_vm_cb_thread"
+#define APR_TX_BUF_SIZE 4096
+#define APR_RX_BUF_SIZE 4096
+
+static struct apr_q6 q6;
+static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
+static void *apr_pkt_ctx;
+static wait_queue_head_t dsp_wait;
+static wait_queue_head_t modem_wait;
+static bool is_modem_up;
+static bool is_initial_boot;
+/* Subsystem restart: QDSP6 data, functions */
+static struct workqueue_struct *apr_reset_workqueue;
+static void apr_reset_deregister(struct work_struct *work);
+static void dispatch_event(unsigned long code, uint16_t proc);
+struct apr_reset_work {
+ void *handle;
+ struct work_struct work;
+};
+
+static bool apr_cf_debug;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_apr_debug;
+static ssize_t apr_debug_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char cmd;
+
+ if (copy_from_user(&cmd, ubuf, 1))
+ return -EFAULT;
+
+ apr_cf_debug = (cmd == '1') ? true : false;
+
+ return cnt;
+}
+
+static const struct file_operations apr_debug_ops = {
+ .write = apr_debug_write,
+};
+#endif
+
+#define APR_PKT_INFO(x...) \
+do { \
+ if (apr_pkt_ctx) \
+ ipc_log_string(apr_pkt_ctx, "<APR>: "x); \
+} while (0)
+
+/* hab handle */
+static uint32_t hab_handle_tx;
+static uint32_t hab_handle_rx;
+static char apr_tx_buf[APR_TX_BUF_SIZE];
+static char apr_rx_buf[APR_RX_BUF_SIZE];
+
+/* apr callback thread task */
+static struct task_struct *apr_vm_cb_thread_task;
+static int pid;
+
+
+struct apr_svc_table {
+ char name[64];
+ int idx;
+ int id;
+ int dest_svc;
+ int client_id;
+ int handle;
+};
+
+/*
+ * src svc should be assigned dynamically through apr registration:
+ * 1. replace with a proper string name for registration.
+ * e.g. "qcom.apps.lnx." + name
+ * 2. register apr BE, retrieve dynamic src svc address,
+ * apr handle and store in svc tbl.
+ */
+
+static struct mutex m_lock_tbl_qdsp6;
+
+static struct apr_svc_table svc_tbl_qdsp6[] = {
+ {
+ .name = "AFE",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_AFE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ASM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_ASM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ADM",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_ADM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CORE",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CORE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_MVM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVS,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVP,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "USM",
+ .idx = 8,
+ .id = 0,
+ .dest_svc = APR_SVC_USM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "VIDC",
+ .idx = 9,
+ .id = 0,
+ .dest_svc = APR_SVC_VIDC,
+ .handle = 0,
+ },
+ {
+ .name = "LSM",
+ .idx = 10,
+ .id = 0,
+ .dest_svc = APR_SVC_LSM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+};
+
+static struct mutex m_lock_tbl_voice;
+
+static struct apr_svc_table svc_tbl_voice[] = {
+ {
+ .name = "VSM",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_VSM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "VPM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_VPM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVS",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_MVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_MVM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_CVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_CVP,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "SRD",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_SRD,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+};
+
+enum apr_subsys_state apr_get_modem_state(void)
+{
+ return atomic_read(&q6.modem_state);
+}
+
+void apr_set_modem_state(enum apr_subsys_state state)
+{
+ atomic_set(&q6.modem_state, state);
+}
+
+enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.modem_state, prev, new);
+}
+
+static void apr_modem_down(unsigned long opcode)
+{
+ apr_set_modem_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_MODEM);
+}
+
+static void apr_modem_up(void)
+{
+ if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&modem_wait);
+ is_modem_up = 1;
+}
+
+enum apr_subsys_state apr_get_q6_state(void)
+{
+ return atomic_read(&q6.q6_state);
+}
+EXPORT_SYMBOL(apr_get_q6_state);
+
+int apr_set_q6_state(enum apr_subsys_state state)
+{
+ pr_debug("%s: setting adsp state %d\n", __func__, state);
+ if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
+ return -EINVAL;
+ atomic_set(&q6.q6_state, state);
+ return 0;
+}
+EXPORT_SYMBOL(apr_set_q6_state);
+
+enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.q6_state, prev, new);
+}
+
+static void apr_adsp_down(unsigned long opcode)
+{
+ apr_set_q6_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_QDSP6);
+}
+
+static void apr_adsp_up(void)
+{
+ if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&dsp_wait);
+}
+
+int apr_wait_for_device_up(int dest_id)
+{
+ int rc = -1;
+
+ if (dest_id == APR_DEST_MODEM)
+ rc = wait_event_interruptible_timeout(modem_wait,
+ (apr_get_modem_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else if (dest_id == APR_DEST_QDSP6)
+ rc = wait_event_interruptible_timeout(dsp_wait,
+ (apr_get_q6_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else
+ pr_err("%s: unknown dest_id %d\n", __func__, dest_id);
+ /* returns left time */
+ return rc;
+}
+
+static int apr_vm_nb_receive(int32_t handle, void *dest_buff,
+ uint32_t *size_bytes, uint32_t timeout)
+{
+ int rc;
+ uint32_t dest_buff_bytes = *size_bytes;
+ unsigned long delay = jiffies + (HZ / 2);
+
+ do {
+ *size_bytes = dest_buff_bytes;
+ rc = habmm_socket_recv(handle,
+ dest_buff,
+ size_bytes,
+ timeout,
+ HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
+ } while (time_before(jiffies, delay) && (rc == -EAGAIN) &&
+ (*size_bytes == 0));
+
+ return rc;
+}
+
+static int apr_vm_cb_process_evt(char *buf, int len)
+{
+ struct apr_client_data data;
+ struct apr_client *apr_client;
+ struct apr_svc *c_svc;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t src;
+ uint16_t svc;
+ uint16_t clnt;
+ int i;
+ int temp_port = 0;
+ uint32_t *ptr;
+ uint32_t evt_id;
+
+ pr_debug("APR: len = %d\n", len);
+ ptr = (uint32_t *)buf;
+ pr_debug("\n*****************\n");
+ for (i = 0; i < len/4; i++)
+ pr_debug("%x ", ptr[i]);
+ pr_debug("\n");
+ pr_debug("\n*****************\n");
+
+ if (!buf || len <= APR_HDR_SIZE + sizeof(uint32_t)) {
+ pr_err("APR: Improper apr pkt received: %p %d\n", buf, len);
+ return -EINVAL;
+ }
+
+ evt_id = *((int32_t *)buf);
+ if (evt_id != APRV2_VM_EVT_RX_PKT_AVAILABLE) {
+ pr_err("APR: Wrong evt id: %d\n", evt_id);
+ return -EINVAL;
+ }
+ hdr = (struct apr_hdr *)(buf + sizeof(uint32_t));
+
+ ver = hdr->hdr_field;
+ ver = (ver & 0x000F);
+ if (ver > APR_PKT_VER + 1) {
+ pr_err("APR: Wrong version: %d\n", ver);
+ return -EINVAL;
+ }
+
+ hdr_size = hdr->hdr_field;
+ hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4;
+ if (hdr_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = hdr->hdr_field;
+ msg_type = (msg_type >> 0x08) & 0x0003;
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+ pr_err("APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ /*
+ * dest_svc is dynamic created by apr service
+ * no need to check the range of dest_svc
+ */
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX) {
+ pr_err("APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc = hdr->dest_svc;
+ if (hdr->src_domain == APR_DOMAIN_MODEM)
+ clnt = APR_CLIENT_VOICE;
+ else if (hdr->src_domain == APR_DOMAIN_ADSP)
+ clnt = APR_CLIENT_AUDIO;
+ else {
+ pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+ return -EINVAL;
+ }
+
+ src = apr_get_data_src(hdr);
+ if (src == APR_DEST_MAX)
+ return -EINVAL;
+
+ pr_debug("src =%d clnt = %d\n", src, clnt);
+ apr_client = &client[src][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++)
+ if (apr_client->svc[i].id == svc) {
+ pr_debug("svc_id = %d\n", apr_client->svc[i].id);
+ c_svc = &apr_client->svc[i];
+ break;
+ }
+
+ if (i == APR_SVC_MAX) {
+ pr_err("APR: service is not registered\n");
+ return -ENXIO;
+ }
+
+ pr_debug("svc_idx = %d\n", i);
+ pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id,
+ c_svc->client_id, c_svc->fn, c_svc->priv);
+
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src = src;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+ if (data.payload_size > 0)
+ data.payload = (char *)hdr + hdr_size;
+
+ if (unlikely(apr_cf_debug)) {
+ if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) {
+ uint32_t *ptr = data.payload;
+
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc,
+ hdr->opcode, hdr->token, ptr[1]);
+ } else {
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+ }
+
+ temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
+ if (c_svc->port_cnt && c_svc->port_fn[temp_port])
+ c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
+ else if (c_svc->fn)
+ c_svc->fn(&data, c_svc->priv);
+ else
+ pr_err("APR: Rxed a packet for NULL callback\n");
+
+ return 0;
+}
+
+static int apr_vm_cb_thread(void *data)
+{
+ uint32_t apr_rx_buf_len;
+ struct aprv2_vm_ack_rx_pkt_available_t apr_ack;
+ int status = 0;
+ int ret = 0;
+
+ while (1) {
+ apr_rx_buf_len = sizeof(apr_rx_buf);
+ ret = habmm_socket_recv(hab_handle_rx,
+ (void *)&apr_rx_buf,
+ &apr_rx_buf_len,
+ 0xFFFFFFFF,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: depends on the HAB error code,
+ * may need to implement
+ * a retry mechanism.
+ * break if recv failed ?
+ */
+ break;
+ }
+
+ status = apr_vm_cb_process_evt(apr_rx_buf, apr_rx_buf_len);
+
+ apr_ack.status = status;
+ ret = habmm_socket_send(hab_handle_rx,
+ (void *)&apr_ack,
+ sizeof(apr_ack),
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /* TODO: break if send failed ? */
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
+ int *svc_idx, int *svc_id, int *dest_svc, int *handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_register_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_register_t reg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (!strcmp(svc_name, tbl[i].name)) {
+ *client_id = tbl[i].client_id;
+ *svc_idx = tbl[i].idx;
+ if (!tbl[i].id && !tbl[i].handle) {
+ /* need to register a new service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_REGISTER;
+ tx_data.reg_cmd.name_size = snprintf(
+ tx_data.reg_cmd.name,
+ APRV2_VM_MAX_DNS_SIZE,
+ "qcom.apps.lnx.%s",
+ svc_name);
+ tx_data.reg_cmd.addr = 0;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* update svc table */
+ tbl[i].handle = apr_rsp.handle;
+ tbl[i].id = apr_rsp.addr &
+ APRV2_VM_PKT_SERVICE_ID_MASK;
+ }
+ *svc_id = tbl[i].id;
+ *dest_svc = tbl[i].dest_svc;
+ *handle = tbl[i].handle;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ pr_debug("%s: svc_name = %s client_id = %d domain_id = %d\n",
+ __func__, svc_name, *client_id, domain_id);
+ pr_debug("%s: src_svc = %d dest_svc = %d handle = %d\n",
+ __func__, *svc_id, *dest_svc, *handle);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc name %s\n", __func__, svc_name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_deregister_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_deregister_t dereg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (tbl[i].id == svc_id && tbl[i].handle == handle) {
+ /* need to deregister a service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_DEREGISTER;
+ tx_data.dereg_cmd.handle = handle;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret)
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: if send failed, should not wait for recv.
+ * should clear regardless?
+ */
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret)
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ }
+ /* clear svc table */
+ tbl[i].handle = 0;
+ tbl[i].id = 0;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc id %d handle %d\n",
+ __func__, svc_id, handle);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int apr_send_pkt(void *handle, uint32_t *buf)
+{
+ struct apr_svc *svc = handle;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ uint32_t *cmd_id = (uint32_t *)apr_tx_buf;
+ struct aprv2_vm_cmd_async_send_t *apr_send =
+ (struct aprv2_vm_cmd_async_send_t *)(apr_tx_buf +
+ sizeof(uint32_t));
+ uint32_t apr_send_len;
+ struct aprv2_vm_cmd_async_send_rsp_t apr_rsp;
+ uint32_t apr_rsp_len;
+ int ret = 0;
+
+ if (!handle || !buf) {
+ pr_err("APR: Wrong parameters\n");
+ return -EINVAL;
+ }
+ if (svc->need_reset) {
+ pr_err("APR: send_pkt service need reset\n");
+ return -ENETRESET;
+ }
+
+ if ((svc->dest_id == APR_DEST_QDSP6) &&
+ (apr_get_q6_state() != APR_SUBSYS_LOADED)) {
+ pr_err("%s: Still dsp is not Up\n", __func__);
+ return -ENETRESET;
+ } else if ((svc->dest_id == APR_DEST_MODEM) &&
+ (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+ pr_err("%s: Still Modem is not Up\n", __func__);
+ return -ENETRESET;
+ }
+
+ spin_lock_irqsave(&svc->w_lock, flags);
+ if (!svc->id || !svc->vm_handle) {
+ pr_err("APR: Still service is not yet opened\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ hdr = (struct apr_hdr *)buf;
+
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = svc->id;
+ hdr->dest_domain = svc->dest_domain;
+ hdr->dest_svc = svc->vm_dest_svc;
+
+ if (unlikely(apr_cf_debug)) {
+ APR_PKT_INFO(
+ "Tx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+
+ memset((void *)&apr_tx_buf, 0, sizeof(apr_tx_buf));
+ /* pkt_size + cmd_id + handle */
+ apr_send_len = hdr->pkt_size + sizeof(uint32_t) * 2;
+ *cmd_id = APRV2_VM_CMDID_ASYNC_SEND;
+ apr_send->handle = svc->vm_handle;
+
+ /* safe check */
+ if (hdr->pkt_size > APR_TX_BUF_SIZE - (sizeof(uint32_t) * 2)) {
+ pr_err("APR: Wrong pkt size %d\n", hdr->pkt_size);
+ ret = -ENOMEM;
+ goto done;
+ }
+ memcpy(&apr_send->pkt_header, buf, hdr->pkt_size);
+
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *)&apr_tx_buf,
+ apr_send_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ /* wait for response */
+ apr_rsp_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_rsp_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ /* should translate status properly */
+ ret = -ECOMM;
+ goto done;
+ }
+
+ /* upon successful send, return packet size */
+ ret = hdr->pkt_size;
+
+done:
+ spin_unlock_irqrestore(&svc->w_lock, flags);
+ return ret;
+}
+
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+ uint32_t src_port, void *priv)
+{
+ struct apr_client *clnt;
+ int client_id = 0;
+ int svc_idx = 0;
+ int svc_id = 0;
+ int dest_id = 0;
+ int domain_id = 0;
+ int temp_port = 0;
+ struct apr_svc *svc = NULL;
+ int rc = 0;
+ bool can_open_channel = true;
+ int dest_svc = 0;
+ int handle = 0;
+
+ if (!dest || !svc_name || !svc_fn)
+ return NULL;
+
+ if (!strcmp(dest, "ADSP"))
+ domain_id = APR_DOMAIN_ADSP;
+ else if (!strcmp(dest, "MODEM")) {
+ /* Don't request for SMD channels if destination is MODEM,
+ * as these channels are no longer used and these clients
+ * are to listen only for MODEM SSR events
+ */
+ can_open_channel = false;
+ domain_id = APR_DOMAIN_MODEM;
+ } else {
+ pr_err("APR: wrong destination\n");
+ goto done;
+ }
+
+ dest_id = apr_get_dest_id(dest);
+
+ if (dest_id == APR_DEST_QDSP6) {
+ if (apr_get_q6_state() != APR_SUBSYS_LOADED) {
+ pr_err("%s: adsp not up\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: adsp Up\n", __func__);
+ } else if (dest_id == APR_DEST_MODEM) {
+ if (apr_get_modem_state() == APR_SUBSYS_DOWN) {
+ if (is_modem_up) {
+ pr_err("%s: modem shutdown due to SSR, ret",
+ __func__);
+ return NULL;
+ }
+ pr_debug("%s: Wait for modem to bootup\n", __func__);
+ rc = apr_wait_for_device_up(APR_DEST_MODEM);
+ if (rc == 0) {
+ pr_err("%s: Modem is not Up\n", __func__);
+ return NULL;
+ }
+ }
+ pr_debug("%s: modem Up\n", __func__);
+ }
+
+ if (apr_vm_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id,
+ &dest_svc, &handle)) {
+ pr_err("%s: apr_vm_get_svc failed\n", __func__);
+ goto done;
+ }
+
+ clnt = &client[dest_id][client_id];
+ svc = &clnt->svc[svc_idx];
+ mutex_lock(&svc->m_lock);
+ clnt->id = client_id;
+ if (svc->need_reset) {
+ mutex_unlock(&svc->m_lock);
+ pr_err("APR: Service needs reset\n");
+ goto done;
+ }
+ svc->id = svc_id;
+ svc->vm_dest_svc = dest_svc;
+ svc->dest_id = dest_id;
+ svc->client_id = client_id;
+ svc->dest_domain = domain_id;
+ svc->pkt_owner = APR_PKT_OWNER_DRIVER;
+ svc->vm_handle = handle;
+
+ if (src_port != 0xFFFFFFFF) {
+ temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", src_port, temp_port);
+ if (temp_port >= APR_MAX_PORTS || temp_port < 0) {
+ pr_err("APR: temp_port out of bounds\n");
+ mutex_unlock(&svc->m_lock);
+ return NULL;
+ }
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->port_cnt++;
+ svc->port_fn[temp_port] = svc_fn;
+ svc->port_priv[temp_port] = priv;
+ } else {
+ if (!svc->fn) {
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->fn = svc_fn;
+ if (svc->port_cnt)
+ svc->svc_cnt++;
+ svc->priv = priv;
+ }
+ }
+
+ mutex_unlock(&svc->m_lock);
+done:
+ return svc;
+}
+
+static void apr_reset_deregister(struct work_struct *work)
+{
+ struct apr_svc *handle = NULL;
+ struct apr_reset_work *apr_reset =
+ container_of(work, struct apr_reset_work, work);
+
+ handle = apr_reset->handle;
+ pr_debug("%s:handle[%pK]\n", __func__, handle);
+ apr_deregister(handle);
+ kfree(apr_reset);
+}
+
+int apr_deregister(void *handle)
+{
+ struct apr_svc *svc = handle;
+ struct apr_client *clnt;
+ uint16_t dest_id;
+ uint16_t client_id;
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&svc->m_lock);
+ dest_id = svc->dest_id;
+ client_id = svc->client_id;
+ clnt = &client[dest_id][client_id];
+
+ if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+ if (svc->port_cnt)
+ svc->port_cnt--;
+ else if (svc->svc_cnt)
+ svc->svc_cnt--;
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ client[dest_id][client_id].svc_cnt--;
+ svc->need_reset = 0x0;
+ }
+ } else if (client[dest_id][client_id].svc_cnt > 0) {
+ client[dest_id][client_id].svc_cnt--;
+ if (!client[dest_id][client_id].svc_cnt) {
+ svc->need_reset = 0x0;
+ pr_debug("%s: service is reset %p\n", __func__, svc);
+ }
+ }
+
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ if (apr_vm_rel_svc(svc->dest_domain, svc->id, svc->vm_handle))
+ pr_err("%s: apr_vm_rel_svc failed\n", __func__);
+ svc->priv = NULL;
+ svc->id = 0;
+ svc->vm_dest_svc = 0;
+ svc->fn = NULL;
+ svc->dest_id = 0;
+ svc->client_id = 0;
+ svc->need_reset = 0x0;
+ svc->vm_handle = 0;
+ }
+ mutex_unlock(&svc->m_lock);
+
+ return 0;
+}
+
+void apr_reset(void *handle)
+{
+ struct apr_reset_work *apr_reset_worker = NULL;
+
+ if (!handle)
+ return;
+ pr_debug("%s: handle[%pK]\n", __func__, handle);
+
+ if (apr_reset_workqueue == NULL) {
+ pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
+ return;
+ }
+
+ apr_reset_worker = kzalloc(sizeof(struct apr_reset_work),
+ GFP_ATOMIC);
+
+ if (apr_reset_worker == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return;
+ }
+
+ apr_reset_worker->handle = handle;
+ INIT_WORK(&apr_reset_worker->work, apr_reset_deregister);
+ queue_work(apr_reset_workqueue, &apr_reset_worker->work);
+}
+
+/* Dispatch the Reset events to Modem and audio clients */
+static void dispatch_event(unsigned long code, uint16_t proc)
+{
+ struct apr_client *apr_client;
+ struct apr_client_data data;
+ struct apr_svc *svc;
+ uint16_t clnt;
+ int i, j;
+
+ memset(&data, 0, sizeof(data));
+ data.opcode = RESET_EVENTS;
+ data.reset_event = code;
+
+ /* Service domain can be different from the processor */
+ data.reset_proc = apr_get_reset_domain(proc);
+
+ clnt = APR_CLIENT_AUDIO;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+
+ clnt = APR_CLIENT_VOICE;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+}
+
+static int apr_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ struct audio_notifier_cb_data *cb_data = data;
+
+ if (cb_data == NULL) {
+ pr_err("%s: Callback data is NULL!\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: Service opcode 0x%lx, domain %d\n",
+ __func__, opcode, cb_data->domain);
+
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ /*
+ * Use flag to ignore down notifications during
+ * initial boot. There is no benefit from error
+ * recovery notifications during initial boot
+ * up since everything is expected to be down.
+ */
+ if (is_initial_boot) {
+ is_initial_boot = false;
+ break;
+ }
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_down(opcode);
+ else
+ apr_adsp_down(opcode);
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ is_initial_boot = false;
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_up();
+ else
+ apr_adsp_up();
+ break;
+ default:
+ break;
+ }
+done:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block adsp_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static struct notifier_block modem_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static void apr_vm_set_subsys_state(void)
+{
+ /* set default subsys state in vm env.
+ * Both q6 and modem should be in LOADED state,
+ * since vm boots up at late stage after pm.
+ */
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ apr_set_modem_state(APR_SUBSYS_LOADED);
+}
+
+static int __init apr_init(void)
+{
+ int i, j, k;
+ int ret;
+
+ /* open apr channel tx and rx, store as global */
+ ret = habmm_socket_open(&hab_handle_tx,
+ MM_AUD_1,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open tx failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = habmm_socket_open(&hab_handle_rx,
+ MM_AUD_2,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open rx failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ return ret;
+ }
+ pr_info("%s: hab_handle_tx %x hab_handle_rx %x\n",
+ __func__, hab_handle_tx, hab_handle_rx);
+
+ /* create apr ch rx cb thread */
+ apr_vm_cb_thread_task = kthread_run(apr_vm_cb_thread,
+ NULL,
+ APR_VM_CB_THREAD_NAME);
+ if (IS_ERR(apr_vm_cb_thread_task)) {
+ ret = PTR_ERR(apr_vm_cb_thread_task);
+ pr_err("%s: kthread_run failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ return ret;
+ }
+ pid = apr_vm_cb_thread_task->pid;
+ pr_info("%s: apr_vm_cb_thread started pid %d\n",
+ __func__, pid);
+
+ mutex_init(&m_lock_tbl_qdsp6);
+ mutex_init(&m_lock_tbl_voice);
+
+ for (i = 0; i < APR_DEST_MAX; i++)
+ for (j = 0; j < APR_CLIENT_MAX; j++) {
+ mutex_init(&client[i][j].m_lock);
+ for (k = 0; k < APR_SVC_MAX; k++) {
+ mutex_init(&client[i][j].svc[k].m_lock);
+ spin_lock_init(&client[i][j].svc[k].w_lock);
+ }
+ }
+
+ apr_vm_set_subsys_state();
+ mutex_init(&q6.lock);
+ apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
+ if (!apr_reset_workqueue) {
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+ return -ENOMEM;
+ }
+
+ apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT,
+ "apr", 0);
+ if (!apr_pkt_ctx)
+ pr_err("%s: Unable to create ipc log context\n", __func__);
+
+ is_initial_boot = true;
+ subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &adsp_service_nb);
+ subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
+ &modem_service_nb);
+
+ return 0;
+}
+device_initcall(apr_init);
+
+static int __init apr_late_init(void)
+{
+ int ret = 0;
+
+ init_waitqueue_head(&dsp_wait);
+ init_waitqueue_head(&modem_wait);
+
+ return ret;
+}
+late_initcall(apr_late_init);
+
+static void __exit apr_exit(void)
+{
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+}
+__exitcall(apr_exit);
+
+#ifdef CONFIG_DEBUG_FS
+static int __init apr_debug_init(void)
+{
+ debugfs_apr_debug = debugfs_create_file("msm_apr_debug",
+ S_IFREG | S_IRUGO, NULL, NULL,
+ &apr_debug_ops);
+ return 0;
+}
+device_initcall(apr_debug_init);
+#endif
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
new file mode 100644
index 000000000000..a3aa8823d8ce
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/habmm.h>
+#include "../../../staging/android/ion/ion_priv.h"
+#include "../../../staging/android/ion/ion_hvenv_driver.h"
+
+#define MSM_AUDIO_ION_PROBED (1 << 0)
+
+#define MSM_AUDIO_SMMU_VM_CMD_MAP 0x00000001
+#define MSM_AUDIO_SMMU_VM_CMD_UNMAP 0x00000002
+#define MSM_AUDIO_SMMU_VM_HAB_MINOR_ID 1
+
+struct msm_audio_ion_private {
+ bool smmu_enabled;
+ bool audioheap_enabled;
+ u8 device_status;
+ struct list_head smmu_map_list;
+ struct mutex smmu_map_mutex;
+};
+
+struct msm_audio_smmu_map_data {
+ struct ion_client *client;
+ struct ion_handle *handle;
+ u32 export_id;
+ struct list_head list;
+};
+
+struct msm_audio_smmu_vm_map_cmd {
+ int cmd_id;
+ u32 export_id;
+ u32 buf_size;
+};
+
+struct msm_audio_smmu_vm_map_cmd_rsp {
+ int status;
+ u64 addr;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd {
+ int cmd_id;
+ u32 export_id;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd_rsp {
+ int status;
+};
+
+static struct msm_audio_ion_private msm_audio_ion_data = {0,};
+static u32 msm_audio_ion_hab_handle;
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len,
+ void *vaddr);
+
+static int msm_audio_ion_smmu_map(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc;
+ u32 export_id;
+ u32 cmd_rsp_size;
+ bool exported = false;
+ struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data = NULL;
+ struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
+
+ rc = ion_handle_get_size(client, handle, len);
+ if (rc) {
+ pr_err("%s: ion_handle_get_size failed, client = %pK, handle = %pK, rc = %d\n",
+ __func__, client, handle, rc);
+ goto err;
+ }
+
+ /* Data required to track per buffer mapping */
+ map_data = kzalloc(sizeof(*map_data), GFP_KERNEL);
+ if (!map_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* Export the buffer to physical VM */
+ rc = habmm_export(msm_audio_ion_hab_handle, vaddr, *len,
+ &export_id, 0);
+ if (rc) {
+ pr_err("%s: habmm_export failed vaddr = %pK, len = %zd, rc = %d\n",
+ __func__, vaddr, *len, rc);
+ goto err;
+ }
+
+ exported = true;
+ smmu_map_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_MAP;
+ smmu_map_cmd.export_id = export_id;
+ smmu_map_cmd.buf_size = *len;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu, expected %lu\n",
+ __func__, cmd_rsp_size, sizeof(cmd_rsp));
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU map command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ *addr = (ion_phys_addr_t)cmd_rsp.addr;
+
+ map_data->client = client;
+ map_data->handle = handle;
+ map_data->export_id = export_id;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_add_tail(&(map_data->list),
+ &(msm_audio_ion_data.smmu_map_list));
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ return 0;
+
+err:
+ if (exported)
+ (void)habmm_unexport(msm_audio_ion_hab_handle, export_id, 0);
+
+ kfree(map_data);
+
+ return rc;
+}
+
+static int msm_audio_ion_smmu_unmap(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ int rc;
+ bool found = false;
+ u32 cmd_rsp_size;
+ struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data, *next;
+ struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
+
+ /*
+ * Though list_for_each_entry_safe is delete safe, lock
+ * should be explicitly acquired to avoid race condition
+ * on adding elements to the list.
+ */
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_for_each_entry_safe(map_data, next,
+ &(msm_audio_ion_data.smmu_map_list), list) {
+
+ if (map_data->handle == handle && map_data->client == client) {
+ found = true;
+ smmu_unmap_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_UNMAP;
+ smmu_unmap_cmd.export_id = map_data->export_id;
+
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_unmap_cmd,
+ sizeof(smmu_unmap_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu\n",
+ __func__, cmd_rsp_size);
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU unmap command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ rc = habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ if (rc) {
+ pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
+ __func__, map_data->export_id, rc);
+ }
+
+ list_del(&(map_data->list));
+ kfree(map_data);
+ break;
+ }
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (!found) {
+ pr_err("%s: cannot find map_data ion_handle %pK, ion_client %pK\n",
+ __func__, handle, client);
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ if (found) {
+ (void)habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ list_del(&(map_data->list));
+ kfree(map_data);
+ }
+
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ return rc;
+}
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = -EINVAL;
+ unsigned long err_ion_ptr = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!name || !client || !handle || !paddr || !vaddr
+ || !bufsz || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ goto err;
+ }
+
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (msm_audio_ion_data.smmu_enabled == true) {
+ pr_debug("system heap is used");
+ msm_audio_ion_data.audioheap_enabled = 0;
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+ }
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (IS_ERR((void *)(*handle)))
+ err_ion_ptr = PTR_ERR((int *)(*handle));
+ pr_err("%s:ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
+ __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
+ rc = -ENOMEM;
+ goto err_ion_client;
+ }
+ } else {
+ pr_debug("audio heap is used");
+ msm_audio_ion_data.audioheap_enabled = 1;
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ if (bufsz != 0) {
+ pr_debug("%s: memset to 0 %pK %zd\n", __func__, *vaddr, bufsz);
+ memset((void *)*vaddr, 0, bufsz);
+ }
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return rc;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_ion_client:
+ msm_audio_ion_client_destroy(*client);
+ *handle = NULL;
+ *client = NULL;
+err:
+ return rc;
+}
+EXPORT_SYMBOL(msm_audio_ion_alloc);
+
+int msm_audio_ion_phys_free(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ ion_free(client, handle);
+ ion_client_destroy(client);
+
+ return 0;
+}
+
+int msm_audio_ion_phys_assign(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ int ret;
+
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client failed\n", __func__);
+ return -EINVAL;
+ }
+
+ *handle = ion_import_dma_buf(*client, fd);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ ret = ion_phys(*client, *handle, paddr, pa_len);
+ if (ret) {
+ pr_err("%s: could not get physical address for handle, ret = %d\n",
+ __func__, ret);
+ goto err_ion_handle;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ return ret;
+
+err_ion_handle:
+ ion_free(*client, *handle);
+
+err_destroy_client:
+ ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+
+ return ret;
+}
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(*client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(*client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ goto err_ion_handle;
+ }
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -ENOMEM;
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_destroy_client:
+ msm_audio_ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+err:
+ return rc;
+}
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ int ret = 0;
+
+ if (!client || !handle) {
+ pr_err("%s Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ ret = msm_audio_ion_smmu_unmap(client, handle);
+ if (ret)
+ pr_err("%s: smmu unmap failed with ret %d\n",
+ __func__, ret);
+ }
+
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ msm_audio_ion_client_destroy(client);
+ return ret;
+}
+EXPORT_SYMBOL(msm_audio_ion_free);
+
+int msm_audio_ion_mmap(struct audio_buffer *ab,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned int i;
+ struct page *page;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ table = ion_sg_table(ab->client, ab->handle);
+
+ if (IS_ERR(table)) {
+ pr_err("%s: Unable to get sg_table from ion: %ld\n",
+ __func__, PTR_ERR(table));
+ return PTR_ERR(table);
+ } else if (!table) {
+ pr_err("%s: sg_list is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* uncached */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* We need to check if a page is associated with this sg list because:
+ * If the allocation came from a carveout we currently don't have
+ * pages associated with carved out memory. This might change in the
+ * future and we can remove this check and the else statement.
+ */
+ page = sg_page(table->sgl);
+ if (page) {
+ pr_debug("%s: page is NOT null\n", __func__);
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ page = sg_page(sg);
+
+ if (offset >= len) {
+ offset -= len;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len -= offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+ vma, (unsigned int)addr, len,
+ (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end,
+ (unsigned long int)vma->vm_page_prot);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ } else {
+ ion_phys_addr_t phys_addr;
+ size_t phys_len;
+ size_t va_len = 0;
+
+ pr_debug("%s: page is NULL\n", __func__);
+
+ ret = ion_phys(ab->client, ab->handle, &phys_addr, &phys_len);
+ if (ret) {
+ pr_err("%s: Unable to get phys address from ION buffer: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ pr_debug("phys=%pK len=%zd\n", &phys_addr, phys_len);
+ pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+ vma, (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end, vma->vm_pgoff,
+ (unsigned long int)vma->vm_page_prot);
+ va_len = vma->vm_end - vma->vm_start;
+ if ((offset > phys_len) || (va_len > phys_len-offset)) {
+ pr_err("wrong offset size %ld, lens= %zd, va_len=%zd\n",
+ offset, phys_len, va_len);
+ return -EINVAL;
+ }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(phys_addr) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return 0;
+}
+
+
+bool msm_audio_ion_is_smmu_available(void)
+{
+ return msm_audio_ion_data.smmu_enabled;
+}
+
+/* move to static section again */
+struct ion_client *msm_audio_ion_client_create(const char *name)
+{
+ struct ion_client *pclient = NULL;
+
+ pclient = hvenv_ion_client_create(name);
+ return pclient;
+}
+
+
+void msm_audio_ion_client_destroy(struct ion_client *client)
+{
+ pr_debug("%s: client = %pK smmu_enabled = %d\n", __func__,
+ client, msm_audio_ion_data.smmu_enabled);
+
+ ion_client_destroy(client);
+}
+
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+ /* client is already created for legacy and given*/
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *)(*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ rc = msm_audio_ion_get_phys(client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(client, *handle);
+err_ion_handle:
+ ion_free(client, *handle);
+err:
+ return rc;
+}
+
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ /* no client_destrody in legacy*/
+ return 0;
+}
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc = 0;
+
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ rc = msm_audio_ion_smmu_map(client, handle, addr, len, vaddr);
+ if (rc) {
+ pr_err("%s: failed to do smmu map, err = %d\n",
+ __func__, rc);
+ goto err;
+ }
+ } else {
+ rc = ion_phys(client, handle, addr, len);
+ }
+
+ pr_debug("%s: phys=%pK, len=%zd, rc=%d\n",
+ __func__, &(*addr), *len, rc);
+err:
+ return rc;
+}
+
+static const struct of_device_id msm_audio_ion_dt_match[] = {
+ { .compatible = "qcom,msm-audio-ion-vm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
+
+u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa)
+{
+ return upper_32_bits(pa);
+}
+
+static int msm_audio_ion_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *msm_audio_ion_dt = "qcom,smmu-enabled";
+ bool smmu_enabled;
+ struct device *dev = &pdev->dev;
+
+ if (dev->of_node == NULL) {
+ pr_err("%s: device tree is not found\n",
+ __func__);
+ msm_audio_ion_data.smmu_enabled = 0;
+ return 0;
+ }
+
+ smmu_enabled = of_property_read_bool(dev->of_node,
+ msm_audio_ion_dt);
+ msm_audio_ion_data.smmu_enabled = smmu_enabled;
+
+ pr_info("%s: SMMU is %s\n", __func__,
+ (smmu_enabled) ? "Enabled" : "Disabled");
+
+ if (smmu_enabled) {
+ rc = habmm_socket_open(&msm_audio_ion_hab_handle,
+ HAB_MMID_CREATE(MM_AUD_3,
+ MSM_AUDIO_SMMU_VM_HAB_MINOR_ID),
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (rc) {
+ pr_err("%s: habmm_socket_open failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pr_info("%s: msm_audio_ion_hab_handle %x\n",
+ __func__, msm_audio_ion_hab_handle);
+
+ INIT_LIST_HEAD(&msm_audio_ion_data.smmu_map_list);
+ mutex_init(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+
+ if (!rc)
+ msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
+
+ return rc;
+}
+
+static int msm_audio_ion_remove(struct platform_device *pdev)
+{
+ if (msm_audio_ion_data.smmu_enabled) {
+ if (msm_audio_ion_hab_handle)
+ habmm_socket_close(msm_audio_ion_hab_handle);
+
+ mutex_destroy(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+ msm_audio_ion_data.smmu_enabled = 0;
+ msm_audio_ion_data.device_status = 0;
+
+ return 0;
+}
+
+static struct platform_driver msm_audio_ion_driver = {
+ .driver = {
+ .name = "msm-audio-ion-vm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_audio_ion_dt_match,
+ },
+ .probe = msm_audio_ion_probe,
+ .remove = msm_audio_ion_remove,
+};
+
+static int __init msm_audio_ion_init(void)
+{
+ return platform_driver_register(&msm_audio_ion_driver);
+}
+module_init(msm_audio_ion_init);
+
+static void __exit msm_audio_ion_exit(void)
+{
+ platform_driver_unregister(&msm_audio_ion_driver);
+}
+module_exit(msm_audio_ion_exit);
+
+MODULE_DESCRIPTION("MSM Audio ION VM module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index a49848808078..68199d9adb02 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -493,13 +493,10 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
ch->glink_state = event;
- /*
- * if spcom_notify_state() is called within glink_open()
- * then ch->glink_handle is not updated yet.
- */
- if (!ch->glink_handle) {
- pr_debug("update glink_handle, ch [%s].\n", ch->name);
- ch->glink_handle = handle;
+ if (!handle) {
+ pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+ mutex_unlock(&ch->lock);
+ return;
}
/* signal before unlock mutex & before calling glink */
@@ -512,8 +509,7 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
*/
pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
- ret = glink_queue_rx_intent(ch->glink_handle,
- ch, ch->rx_buf_size);
+ ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
if (ret) {
pr_err("glink_queue_rx_intent() err [%d]\n", ret);
} else {
@@ -1028,10 +1024,12 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
ch->name, ch->actual_rx_size);
goto exit_ready;
}
+ mutex_unlock(&ch->lock); /* unlock while waiting */
pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
wait_for_completion(&ch->rx_done);
+ mutex_lock(&ch->lock); /* re-lock after waiting */
/* Check Rx Abort on SP reset */
if (ch->rx_abort) {
pr_err("rx aborted.\n");
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index f85c4ba06b47..8bf5f8eb64ad 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -850,7 +850,7 @@ static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
if (!enable)
return 0;
- return pil_do_ramdump(&d->desc, d->ramdump_dev);
+ return pil_do_ramdump(&d->desc, d->ramdump_dev, NULL);
}
static void subsys_free_memory(const struct subsys_desc *subsys)
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 85c2b92f5474..ee88a8aaf850 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -634,6 +634,21 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size);
payload += ch_cfg_size;
+ /* check ch name is valid string or not */
+ for (j = 0; j < WDSP_CH_NAME_MAX_LEN; j++) {
+ if (ch[i]->ch_cfg.name[j] == '\0')
+ break;
+ }
+
+ if (j == WDSP_CH_NAME_MAX_LEN) {
+ dev_err_ratelimited(wpriv->dev, "%s: Wrong channel name\n",
+ __func__);
+ kfree(ch[i]);
+ ch[i] = NULL;
+ ret = -EINVAL;
+ goto err_ch_mem;
+ }
+
mutex_init(&ch[i]->mutex);
ch[i]->wpriv = wpriv;
INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
@@ -906,8 +921,6 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
- dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
- __func__, cpkt->ch_name, pkt_max_size);
for (i = 0; i < wpriv->no_of_channels; i++) {
if (wpriv->ch && wpriv->ch[i] &&
(!strcmp(cpkt->ch_name,
@@ -922,6 +935,8 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
+ dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
+ __func__, cpkt->ch_name, pkt_max_size);
ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
(tx_buf->ch->channel_state ==
diff --git a/drivers/soundwire/swr-wcd-ctrl.c b/drivers/soundwire/swr-wcd-ctrl.c
index cdaf009c5b1f..1dcaba2e79f6 100644
--- a/drivers/soundwire/swr-wcd-ctrl.c
+++ b/drivers/soundwire/swr-wcd-ctrl.c
@@ -397,11 +397,17 @@ static int swrm_clk_request(struct swr_mstr_ctrl *swrm, bool enable)
return -EINVAL;
if (enable) {
- swrm->clk(swrm->handle, true);
- swrm->state = SWR_MSTR_UP;
- } else {
+ swrm->clk_ref_count++;
+ if (swrm->clk_ref_count == 1) {
+ swrm->clk(swrm->handle, true);
+ swrm->state = SWR_MSTR_UP;
+ }
+ } else if (--swrm->clk_ref_count == 0) {
swrm->clk(swrm->handle, false);
swrm->state = SWR_MSTR_DOWN;
+ } else if (swrm->clk_ref_count < 0) {
+ pr_err("%s: swrm clk count mismatch\n", __func__);
+ swrm->clk_ref_count = 0;
}
return 0;
}
@@ -1169,7 +1175,10 @@ static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
u8 devnum = 0;
int ret = IRQ_HANDLED;
- pm_runtime_get_sync(&swrm->pdev->dev);
+ mutex_lock(&swrm->reslock);
+ swrm_clk_request(swrm, true);
+ mutex_unlock(&swrm->reslock);
+
intr_sts = swrm->read(swrm->handle, SWRM_INTERRUPT_STATUS);
intr_sts &= SWRM_INTERRUPT_STATUS_RMSK;
for (i = 0; i < SWRM_INTERRUPT_MAX; i++) {
@@ -1257,8 +1266,10 @@ static irqreturn_t swr_mstr_interrupt(int irq, void *dev)
break;
}
}
- pm_runtime_mark_last_busy(&swrm->pdev->dev);
- pm_runtime_put_autosuspend(&swrm->pdev->dev);
+
+ mutex_lock(&swrm->reslock);
+ swrm_clk_request(swrm, false);
+ mutex_unlock(&swrm->reslock);
return ret;
}
@@ -1448,6 +1459,7 @@ static int swrm_probe(struct platform_device *pdev)
swrm->wcmd_id = 0;
swrm->slave_status = 0;
swrm->num_rx_chs = 0;
+ swrm->clk_ref_count = 0;
swrm->state = SWR_MSTR_RESUME;
init_completion(&swrm->reset);
init_completion(&swrm->broadcast);
diff --git a/drivers/soundwire/swr-wcd-ctrl.h b/drivers/soundwire/swr-wcd-ctrl.h
index b7a3edac3e00..104ac8f6f510 100755
--- a/drivers/soundwire/swr-wcd-ctrl.h
+++ b/drivers/soundwire/swr-wcd-ctrl.h
@@ -78,6 +78,7 @@ struct swr_mstr_ctrl {
struct device *dev;
struct resource *supplies;
struct clk *mclk;
+ int clk_ref_count;
struct completion reset;
struct completion broadcast;
struct mutex mlock;
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
index 15ff4e149d75..08113a342eed 100644
--- a/drivers/staging/android/fiq_debugger/fiq_debugger.c
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -402,7 +402,7 @@ static void fiq_debugger_work(struct work_struct *work)
cmd += 6;
while (*cmd == ' ')
cmd++;
- if ((cmd != '\0') && sysrq_on())
+ if ((*cmd != '\0') && sysrq_on())
kernel_restart(cmd);
else
kernel_restart(NULL);
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index 90ae7eb65b65..6102b1765182 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -3,7 +3,7 @@
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -344,7 +344,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
}
-void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
{
struct list_head *entry, *_n;
unsigned long drained_size = 0, skipped_size = 0;
@@ -368,6 +369,7 @@ void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
}
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+ return drained_size;
}
int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -385,6 +387,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
struct shrink_control *sc)
{
+ unsigned long freed;
struct ion_cma_secure_heap *sheap = container_of(shrinker,
struct ion_cma_secure_heap, shrinker);
int nr_to_scan = sc->nr_to_scan;
@@ -397,11 +400,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
if (!mutex_trylock(&sheap->chunk_lock))
return -1;
- __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+ freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
mutex_unlock(&sheap->chunk_lock);
- return atomic_read(&sheap->total_pool_size);
+ return freed;
}
static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 513d015a5ace..c19b87d10df0 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -183,7 +183,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
freed += (1 << pool->order);
}
- return ion_page_pool_total(pool, high);
+ return freed;
}
struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 60871f3022b1..12a3893b98fd 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index b80e75fc3521..416006a3384c 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -218,6 +218,7 @@ struct msm_hs_wakeup {
};
struct msm_hs_port {
+ bool startup_locked;
struct uart_port uport;
unsigned long imr_reg; /* shadow value of UARTDM_IMR */
struct clk *clk;
@@ -292,6 +293,8 @@ static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
static int msm_hs_pm_resume(struct device *dev);
+static void msm_hs_pm_suspend(struct device *dev);
+
#define UARTDM_TO_MSM(uart_port) \
container_of((uart_port), struct msm_hs_port, uport)
@@ -392,6 +395,8 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
{
struct uart_port *uport = &(msm_uport->uport);
int rc = atomic_read(&msm_uport->resource_count);
+ struct msm_hs_tx *tx = &msm_uport->tx;
+ struct msm_hs_rx *rx = &msm_uport->rx;
MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
if (rc <= 0) {
@@ -400,8 +405,15 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
return;
}
atomic_dec(&msm_uport->resource_count);
- pm_runtime_mark_last_busy(uport->dev);
- pm_runtime_put_autosuspend(uport->dev);
+
+ if (pm_runtime_enabled(uport->dev)) {
+ pm_runtime_mark_last_busy(uport->dev);
+ pm_runtime_put_autosuspend(uport->dev);
+ } else {
+ MSM_HS_DBG("%s():tx.flush:%d,in_flight:%d,rx.flush:%d\n",
+ __func__, tx->flush, tx->dma_in_flight, rx->flush);
+ msm_hs_pm_suspend(uport->dev);
+ }
}
/* Vote for resources before accessing them */
@@ -585,6 +597,8 @@ static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
char buf[(BUF_DUMP_SIZE * 3) + 2];
int len = 0;
+ if (msm_uport->ipc_debug_mask == FATAL_LEV)
+ return;
len = min(size, BUF_DUMP_SIZE);
/*
* Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
@@ -635,6 +649,7 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
unsigned long flags;
int ret = 0;
+ msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
if (val) {
@@ -654,7 +669,7 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
}
/* Calling CLOCK API. Hence mb() requires here. */
mb();
-
+ msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
return 0;
}
@@ -666,11 +681,13 @@ static int msm_serial_loopback_enable_get(void *data, u64 *val)
unsigned long flags;
int ret = 0;
+ msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
spin_unlock_irqrestore(&uport->lock, flags);
+ msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
@@ -828,6 +845,11 @@ static int msm_hs_spsconnect_rx(struct uart_port *uport)
struct sps_register_event *sps_event = &rx->prod.event;
unsigned long flags;
+ if (msm_uport->rx.pending_flag) {
+ MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
+ __func__, msm_uport->rx.pending_flag);
+ }
+
/* Establish connection between peripheral and memory endpoint */
ret = sps_connect(sps_pipe_handle, sps_config);
if (ret) {
@@ -843,9 +865,6 @@ static int msm_hs_spsconnect_rx(struct uart_port *uport)
goto reg_event_err;
}
spin_lock_irqsave(&uport->lock, flags);
- if (msm_uport->rx.pending_flag)
- MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
- __func__, msm_uport->rx.pending_flag);
msm_uport->rx.queued_flag = 0;
msm_uport->rx.pending_flag = 0;
msm_uport->rx.rx_inx = 0;
@@ -1284,6 +1303,8 @@ static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
int ret = 0;
ret = sps_rx_disconnect(sps_pipe_handle);
+ if (ret)
+ MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
if (msm_uport->rx.pending_flag)
MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
@@ -1293,8 +1314,6 @@ static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
msm_uport->rx.pending_flag = 0;
msm_uport->rx.rx_inx = 0;
- if (ret)
- MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
msm_uport->rx.flush = FLUSH_SHUTDOWN;
MSM_HS_DBG("%s: Calling Completion\n", __func__);
wake_up(&msm_uport->bam_disconnect_wait);
@@ -1352,9 +1371,14 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
+ if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
- else
+ /* Make sure resource_on doesn't get called */
+ if (msm_hs_clk_bus_vote(msm_uport))
+ MSM_HS_ERR("%s:Failed clock vote\n", __func__);
+ msm_hs_disable_rx(uport);
+ msm_hs_clk_bus_unvote(msm_uport);
+ } else
msm_hs_disable_rx(uport);
if (msm_uport->rx.flush == FLUSH_NONE)
@@ -1364,11 +1388,19 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport)
static void msm_hs_disconnect_rx(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
+ struct msm_hs_rx *rx = &msm_uport->rx;
+ struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
+ u32 prod_empty = 0;
msm_hs_disable_rx(uport);
/* Disconnect the BAM RX pipe */
if (msm_uport->rx.flush == FLUSH_NONE)
msm_uport->rx.flush = FLUSH_STOP;
+
+ if (sps_is_pipe_empty(sps_pipe_handle, &prod_empty)) {
+ MSM_HS_WARN("%s():Pipe Not Empty, ret=%d, flush=%d\n",
+ __func__, prod_empty, msm_uport->rx.flush);
+ }
disconnect_rx_endpoint(msm_uport);
MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
}
@@ -1389,6 +1421,8 @@ void tx_timeout_handler(unsigned long arg)
if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
dump_uart_hs_registers(msm_uport);
+ /* Stop further loging */
+ MSM_HS_ERR("%s(): Stop IPC logging\n", __func__);
}
/* Transmit the next chunk of data */
@@ -1832,11 +1866,27 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_tx *tx = &msm_uport->tx;
+ unsigned int isr;
+
+ if (msm_uport->startup_locked) {
+ MSM_HS_DBG("%s(): No Tx Request, startup_locked=%d\n",
+ __func__, msm_uport->startup_locked);
+ return;
+ }
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
__func__, tx->flush, tx->dma_in_flight);
+
+ if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+ isr = msm_hs_read(uport, UART_DM_ISR);
+ if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
+ MSM_HS_DBG("%s():CTS 1: Peer is Busy, ISR 0x%x",
+ __func__, isr);
+ } else
+ MSM_HS_WARN("%s(): Clocks are off\n", __func__);
+
return;
}
@@ -2269,16 +2319,34 @@ void msm_hs_resource_off(struct msm_hs_port *msm_uport)
{
struct uart_port *uport = &(msm_uport->uport);
unsigned int data;
+ int ret = 0;
MSM_HS_DBG("%s(): begin", __func__);
msm_hs_disable_flow_control(uport, false);
if (msm_uport->rx.flush == FLUSH_NONE)
msm_hs_disconnect_rx(uport);
+ else if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
+ MSM_HS_WARN("%s():Rx Flush=%d Not Expected\n",
+ __func__, msm_uport->rx.flush);
+ /* disable and disconnect rx */
+ ret = wait_event_timeout(msm_uport->rx.wait,
+ !msm_uport->rx.pending_flag, 500);
+ if (!ret)
+ MSM_HS_WARN("%s(): rx disconnect not complete",
+ __func__);
+ msm_hs_disconnect_rx(uport);
+ } else
+ MSM_HS_DBG("%s():Rx Flush=%d In Proper State\n",
+ __func__, msm_uport->rx.flush);
/* disable dlink */
- if (msm_uport->tx.flush == FLUSH_NONE)
- wait_event_timeout(msm_uport->tx.wait,
+ if (msm_uport->tx.flush == FLUSH_NONE) {
+ ret = wait_event_timeout(msm_uport->tx.wait,
msm_uport->tx.flush == FLUSH_STOP, 500);
+ if (!ret)
+ MSM_HS_WARN("%s(): tx disconnect not complete",
+ __func__);
+ }
if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
data = msm_hs_read(uport, UART_DM_DMEN);
@@ -2296,21 +2364,29 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport)
unsigned int data;
unsigned long flags;
+ if (msm_uport->startup_locked) {
+ MSM_HS_WARN("%s(): startup_locked=%d\n",
+ __func__, msm_uport->startup_locked);
+ return;
+ }
+
if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
msm_uport->rx.flush == FLUSH_STOP) {
msm_hs_write(uport, UART_DM_CR, RESET_RX);
data = msm_hs_read(uport, UART_DM_DMEN);
data |= UARTDM_RX_BAM_ENABLE_BMSK;
msm_hs_write(uport, UART_DM_DMEN, data);
- }
+ } else
+ MSM_HS_DBG("%s():rx.flush=%d, Rx is not enabled\n",
+ __func__, msm_uport->rx.flush);
- msm_hs_spsconnect_tx(msm_uport);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
msm_hs_spsconnect_rx(uport);
spin_lock_irqsave(&uport->lock, flags);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
}
+ msm_hs_spsconnect_tx(msm_uport);
}
/* Request to turn off uart clock once pending TX is flushed */
@@ -2603,6 +2679,7 @@ static int msm_hs_startup(struct uart_port *uport)
struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
+ msm_uport->startup_locked = true;
rfr_level = uport->fifosize;
if (rfr_level > 16)
rfr_level -= 16;
@@ -2654,6 +2731,9 @@ static int msm_hs_startup(struct uart_port *uport)
flush_kthread_worker(&msm_uport->rx.kworker);
if (rx->flush != FLUSH_SHUTDOWN)
disconnect_rx_endpoint(msm_uport);
+ else
+ MSM_HS_DBG("%s(): Rx Flush=%d In Proper state\n",
+ __func__, rx->flush);
ret = msm_hs_spsconnect_rx(uport);
if (ret) {
MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
@@ -2729,6 +2809,7 @@ static int msm_hs_startup(struct uart_port *uport)
atomic_set(&msm_uport->client_req_state, 0);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: Client_Count 0\n", __func__);
+ msm_uport->startup_locked = false;
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -3157,6 +3238,8 @@ static void msm_hs_pm_suspend(struct device *dev)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
msm_hs_resource_off(msm_uport);
obs_manage_irq(msm_uport, false);
+ if (!atomic_read(&msm_uport->client_req_state))
+ enable_wakeup_interrupt(msm_uport);
msm_hs_clk_bus_unvote(msm_uport);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
@@ -3168,8 +3251,6 @@ static void msm_hs_pm_suspend(struct device *dev)
__func__);
}
- if (!atomic_read(&msm_uport->client_req_state))
- enable_wakeup_interrupt(msm_uport);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: PM State Suspended client_count %d\n", __func__,
client_count);
@@ -3691,9 +3772,14 @@ static void msm_hs_shutdown(struct uart_port *uport)
MSM_HS_WARN("%s(): rx disconnect not complete",
__func__);
msm_hs_disconnect_rx(uport);
+ } else {
+ MSM_HS_DBG("%s(): Rx Flush is in Proper state=%d\n",
+ __func__, msm_uport->rx.flush);
}
- cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
+ if (cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work))
+ MSM_HS_DBG("%s(): Work was pending, canceled it\n",
+ __func__);
flush_workqueue(msm_uport->hsuart_wq);
/* BAM Disconnect for TX */
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index fb31eecb708d..8f3566cde3eb 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -362,6 +362,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
+ * __tty_insert_flip_char - Add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte to the tty buffering, with an optional flag.
+ * This is the slow path of tty_insert_flip_char.
+ */
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
+{
+ struct tty_buffer *tb;
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+
+ if (!__tty_buffer_request_room(port, 1, flags))
+ return 0;
+
+ tb = port->buf.tail;
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+
+ return 1;
+}
+EXPORT_SYMBOL(__tty_insert_flip_char);
+
+/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 54d2d6b604c0..873ba02d59e6 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -519,6 +519,8 @@ static void async_completed(struct urb *urb)
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
+
+ wake_up(&ps->wait);
spin_unlock(&ps->lock);
if (signr) {
@@ -526,8 +528,6 @@ static void async_completed(struct urb *urb)
put_pid(pid);
put_cred(cred);
}
-
- wake_up(&ps->wait);
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 574da2b4529c..82806e311202 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920 and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair Strafe RGB */
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 3c0f68deee34..d92a33097461 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -58,7 +58,7 @@
/* time out to wait for USB cable status notification (in ms)*/
#define SM_INIT_TIMEOUT 30000
-
+#define DWC3_WAKEUP_SRC_TIMEOUT 5000
/* AHB2PHY register offsets */
#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
@@ -216,6 +216,7 @@ struct dwc3_msm {
struct notifier_block usbdev_nb;
bool hc_died;
bool xhci_ss_compliance_enable;
+ bool no_wakeup_src_in_hostmode;
struct extcon_dev *extcon_vbus;
struct extcon_dev *extcon_id;
@@ -2350,6 +2351,7 @@ static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
clear_bit(B_SUSPEND, &mdwc->inputs);
}
+ pm_stay_awake(mdwc->dev);
schedule_delayed_work(&mdwc->sm_work, 0);
}
@@ -2638,6 +2640,7 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
if (mdwc->id_state != id) {
mdwc->id_state = id;
dbg_event(0xFF, "id_state", mdwc->id_state);
+ pm_stay_awake(mdwc->dev);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
@@ -2700,6 +2703,7 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart) {
dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
+ pm_stay_awake(mdwc->dev);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
done:
@@ -3099,6 +3103,11 @@ static int dwc3_msm_probe(struct platform_device *pdev)
mdwc->disable_host_mode_pm = of_property_read_bool(node,
"qcom,disable-host-mode-pm");
+ mdwc->no_wakeup_src_in_hostmode = of_property_read_bool(node,
+ "qcom,no-wakeup-src-in-hostmode");
+ if (mdwc->no_wakeup_src_in_hostmode)
+ dev_dbg(&pdev->dev, "dwc3 host not using wakeup source\n");
+
dwc3_set_notifier(&dwc3_msm_notify_event);
/* Assumes dwc3 is the first DT child of dwc3-msm */
@@ -3705,20 +3714,20 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
union power_supply_propval pval = {0};
int ret, psy_type;
- if (mdwc->max_power == mA)
- return 0;
-
psy_type = get_psy_type(mdwc);
- if (psy_type == POWER_SUPPLY_TYPE_USB) {
- dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
- /* Set max current limit in uA */
- pval.intval = 1000 * mA;
- } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
pval.intval = -ETIMEDOUT;
- } else {
- return 0;
+ goto set_prop;
}
+ if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+ return 0;
+
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+
+set_prop:
ret = power_supply_set_property(mdwc->usb_psy,
POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
if (ret) {
@@ -3892,12 +3901,14 @@ static void dwc3_otg_sm_work(struct work_struct *w)
mdwc->otg_state = OTG_STATE_A_IDLE;
goto ret;
}
+ pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
}
break;
case OTG_STATE_A_HOST:
if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
- dev_dbg(mdwc->dev, "id || hc_died\n");
+ dbg_event(0xFF, "id || hc_died", 0);
+ dev_dbg(mdwc->dev, "%s state id || hc_died\n", state);
dwc3_otg_start_host(mdwc, 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->vbus_retry_count = 0;
@@ -3908,6 +3919,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
dbg_event(0xFF, "XHCIResume", 0);
if (dwc)
pm_runtime_resume(&dwc->xhci->dev);
+ pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
}
break;
@@ -3923,6 +3935,34 @@ ret:
return;
}
+static int dwc3_msm_pm_prepare(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ dev_dbg(dev, "dwc3-msm PM prepare,lpm:%u\n", atomic_read(&dwc->in_lpm));
+ dbg_event(0xFF, "PM Prep", 0);
+ if (!mdwc->in_host_mode || !mdwc->no_wakeup_src_in_hostmode)
+ return 0;
+
+ hcd = dev_get_drvdata(&dwc->xhci->dev);
+ xhci = hcd_to_xhci(hcd);
+ flush_delayed_work(&mdwc->sm_work);
+
+ /* If in lpm then prevent usb core to runtime_resume from pm_suspend */
+ if (atomic_read(&dwc->in_lpm)) {
+ hcd_to_bus(hcd)->skip_resume = true;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+ } else {
+ hcd_to_bus(hcd)->skip_resume = false;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = false;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int dwc3_msm_pm_suspend(struct device *dev)
{
@@ -3934,7 +3974,7 @@ static int dwc3_msm_pm_suspend(struct device *dev)
dbg_event(0xFF, "PM Sus", 0);
flush_workqueue(mdwc->dwc3_wq);
- if (!atomic_read(&dwc->in_lpm)) {
+ if (!atomic_read(&dwc->in_lpm) && !mdwc->no_wakeup_src_in_hostmode) {
dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
return -EBUSY;
}
@@ -3958,8 +3998,13 @@ static int dwc3_msm_pm_resume(struct device *dev)
flush_workqueue(mdwc->dwc3_wq);
atomic_set(&mdwc->pm_suspended, 0);
+ /* Resume h/w in host mode as it may not be runtime suspended */
+ if (mdwc->no_wakeup_src_in_hostmode && !test_bit(ID, &mdwc->inputs))
+ dwc3_msm_resume(mdwc);
+
/* kick in otg state machine */
- queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ if (mdwc->vbus_active || !mdwc->id_state)
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
return 0;
}
@@ -3996,6 +4041,7 @@ static int dwc3_msm_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+ .prepare = dwc3_msm_pm_prepare,
SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
dwc3_msm_runtime_idle)
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
index 1a281833eadd..9523d67dfb15 100644
--- a/drivers/usb/gadget/function/f_ccid.c
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -206,6 +206,71 @@ static struct usb_descriptor_header *ccid_hs_descs[] = {
NULL,
};
+/* Super speed support: */
+static struct usb_endpoint_descriptor ccid_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_notify_comp_desc = {
+ .bLength = sizeof(ccid_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_in_comp_desc = {
+ .bLength = sizeof(ccid_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_out_comp_desc = {
+ .bLength = sizeof(ccid_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ccid_ss_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_comp_desc,
+ NULL,
+};
+
static inline struct f_ccid *func_to_ccid(struct usb_function *f)
{
return container_of(f, struct f_ccid, function);
@@ -503,10 +568,7 @@ free_notify:
static void ccid_function_unbind(struct usb_configuration *c,
struct usb_function *f)
{
- if (gadget_is_dualspeed(c->cdev->gadget))
- usb_free_descriptors(f->hs_descriptors);
- usb_free_descriptors(f->fs_descriptors);
-
+ usb_free_all_descriptors(f);
}
static int ccid_function_bind(struct usb_configuration *c,
@@ -551,23 +613,26 @@ static int ccid_function_bind(struct usb_configuration *c,
ccid_dev->out = ep;
ep->driver_data = cdev;
- f->fs_descriptors = usb_copy_descriptors(ccid_fs_descs);
- if (!f->fs_descriptors)
- goto ep_auto_out_fail;
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ ccid_hs_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_hs_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_hs_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
- if (gadget_is_dualspeed(cdev->gadget)) {
- ccid_hs_in_desc.bEndpointAddress =
- ccid_fs_in_desc.bEndpointAddress;
- ccid_hs_out_desc.bEndpointAddress =
- ccid_fs_out_desc.bEndpointAddress;
- ccid_hs_notify_desc.bEndpointAddress =
- ccid_fs_notify_desc.bEndpointAddress;
-
- /* copy descriptors, and track endpoint copies */
- f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
- if (!f->hs_descriptors)
- goto ep_auto_out_fail;
- }
+
+ ccid_ss_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_ss_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_ss_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
+ ccid_ss_descs);
+ if (ret)
+ goto ep_auto_out_fail;
pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
@@ -972,6 +1037,7 @@ static int ccid_bind_config(struct f_ccid *ccid_dev)
ccid_dev->function.name = FUNCTION_NAME;
ccid_dev->function.fs_descriptors = ccid_fs_descs;
ccid_dev->function.hs_descriptors = ccid_hs_descs;
+ ccid_dev->function.ss_descriptors = ccid_ss_descs;
ccid_dev->function.bind = ccid_function_bind;
ccid_dev->function.unbind = ccid_function_unbind;
ccid_dev->function.set_alt = ccid_function_set_alt;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 1900870eee39..18e3c5cdcc24 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -40,6 +40,15 @@ MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data");
static struct workqueue_struct *ipa_usb_wq;
+static struct gsi_inst_status {
+ struct mutex gsi_lock;
+ bool inst_exist;
+ struct gsi_opts *opts;
+} inst_status[IPA_USB_MAX_TETH_PROT_SIZE];
+
+/* Deregister misc device and free instance structures */
+static void gsi_inst_clean(struct gsi_opts *opts);
+
static void ipa_disconnect_handler(struct gsi_data_port *d_port);
static int gsi_ctrl_send_notification(struct f_gsi *gsi);
static int gsi_alloc_trb_buffer(struct f_gsi *gsi);
@@ -885,8 +894,9 @@ static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q)
{
struct gsi_ctrl_pkt *cpkt = NULL;
struct list_head *act, *tmp;
+ unsigned long flags;
- spin_lock(&gsi->c_port.lock);
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
if (skip_req_q)
goto clean_resp_q;
@@ -901,7 +911,7 @@ clean_resp_q:
list_del(&cpkt->list);
gsi_ctrl_pkt_free(cpkt);
}
- spin_unlock(&gsi->c_port.lock);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
}
static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len)
@@ -945,38 +955,71 @@ static int gsi_ctrl_dev_open(struct inode *ip, struct file *fp)
struct gsi_ctrl_port *c_port = container_of(fp->private_data,
struct gsi_ctrl_port,
ctrl_device);
+ struct f_gsi *gsi;
+ struct gsi_inst_status *inst_cur;
if (!c_port) {
- log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ pr_err_ratelimited("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
}
- log_event_dbg("%s: open ctrl dev %s", __func__, c_port->name);
+ pr_devel_ratelimited("%s: open ctrl dev %s", __func__, c_port->name);
+
+ gsi = container_of(c_port, struct f_gsi, c_port);
+ inst_cur = &inst_status[gsi->prot_id];
+
+ mutex_lock(&inst_cur->gsi_lock);
+
+ fp->private_data = &gsi->prot_id;
+
+ if (!inst_cur->inst_exist) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], GSI instance freed already\n",
+ __func__, gsi->prot_id);
+ return -ENODEV;
+ }
if (c_port->is_open) {
- log_event_err("%s: Already opened", __func__);
+ mutex_unlock(&inst_cur->gsi_lock);
+ log_event_err("%s: Already opened\n", __func__);
return -EBUSY;
}
c_port->is_open = true;
+ mutex_unlock(&inst_cur->gsi_lock);
+
return 0;
}
static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
- if (!c_port) {
- log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
+ mutex_lock(&inst_cur->gsi_lock);
+
+ if (unlikely(inst_cur->inst_exist == false)) {
+ if (inst_cur->opts) {
+ /* GSI instance clean up */
+ gsi_inst_clean(inst_cur->opts);
+ inst_cur->opts = NULL;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: [prot_id = %d], Delayed free instance memory\n",
+ __func__, prot_id);
return -ENODEV;
}
- log_event_dbg("close ctrl dev %s", c_port->name);
+ inst_cur->opts->gsi->c_port.is_open = false;
- c_port->is_open = false;
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ log_event_dbg("close ctrl dev %s\n",
+ inst_cur->opts->gsi->c_port.name);
return 0;
}
@@ -984,16 +1027,27 @@ static int gsi_ctrl_dev_release(struct inode *ip, struct file *fp)
static ssize_t
gsi_ctrl_dev_read(struct file *fp, char __user *buf, size_t count, loff_t *pos)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
-
+ struct gsi_ctrl_port *c_port;
struct gsi_ctrl_pkt *cpkt = NULL;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
unsigned long flags;
int ret = 0;
log_event_dbg("%s: Enter %zu", __func__, count);
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1061,14 +1115,29 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
int ret = 0;
unsigned long flags;
struct gsi_ctrl_pkt *cpkt;
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
- struct f_gsi *gsi = c_port_to_gsi(c_port);
- struct usb_request *req = c_port->notify_req;
+ struct gsi_ctrl_port *c_port;
+ struct usb_request *req;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
+ struct f_gsi *gsi;
log_event_dbg("Enter %zu", count);
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+ req = c_port->notify_req;
+
if (!c_port || !req || !req->buf) {
log_event_err("%s: c_port %pK req %pK req->buf %pK",
__func__, c_port, req, req ? req->buf : req);
@@ -1125,15 +1194,29 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned cmd,
unsigned long arg)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
- struct f_gsi *gsi = c_port_to_gsi(c_port);
+ struct gsi_ctrl_port *c_port;
+ struct f_gsi *gsi;
struct gsi_ctrl_pkt *cpkt;
struct ep_info info;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
int val, ret = 0;
unsigned long flags;
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ gsi = inst_cur->opts->gsi;
+ c_port = &gsi->c_port;
+
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1254,12 +1337,24 @@ exit_ioctl:
static unsigned int gsi_ctrl_dev_poll(struct file *fp, poll_table *wait)
{
- struct gsi_ctrl_port *c_port = container_of(fp->private_data,
- struct gsi_ctrl_port,
- ctrl_device);
+ struct gsi_ctrl_port *c_port;
+ enum ipa_usb_teth_prot prot_id =
+ *(enum ipa_usb_teth_prot *)(fp->private_data);
+ struct gsi_inst_status *inst_cur = &inst_status[prot_id];
unsigned long flags;
unsigned int mask = 0;
+ mutex_lock(&inst_cur->gsi_lock);
+ if (unlikely(inst_cur->inst_exist == false)) {
+ mutex_unlock(&inst_cur->gsi_lock);
+ pr_err_ratelimited(
+ "%s: free_inst is called, free memory until dev is closed\n",
+ __func__);
+ return -ENODEV;
+ }
+ mutex_unlock(&inst_cur->gsi_lock);
+
+ c_port = &inst_cur->opts->gsi->c_port;
if (!c_port) {
log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
return -ENODEV;
@@ -1577,6 +1672,7 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct f_gsi *rndis = req->context;
+ rndis_init_msg_type *buf;
int status;
if (req->status != 0) {
@@ -1589,6 +1685,16 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
if (status < 0)
log_event_err("RNDIS command error %d, %d/%d",
status, req->actual, req->length);
+
+ buf = (rndis_init_msg_type *)req->buf;
+ if (buf->MessageType == RNDIS_MSG_INIT) {
+ rndis->d_port.in_aggr_size = min_t(u32,
+ rndis->d_port.in_aggr_size,
+ rndis->params->dl_max_xfer_size);
+ log_event_dbg("RNDIS host dl_aggr_size:%d in_aggr_size:%d\n",
+ rndis->params->dl_max_xfer_size,
+ rndis->d_port.in_aggr_size);
+ }
}
static void
@@ -2748,7 +2854,7 @@ static void gsi_unbind(struct usb_configuration *c, struct usb_function *f)
static void gsi_free_func(struct usb_function *f)
{
- pr_debug("%s\n", __func__);
+ log_event_dbg("%s\n", __func__);
}
int gsi_bind_config(struct f_gsi *gsi)
@@ -2844,6 +2950,7 @@ static void gsi_opts_release(struct config_item *item)
{
struct gsi_opts *opts = to_gsi_opts(item);
+ log_event_dbg("Release GSI: %s\n", __func__);
usb_put_function_instance(&opts->func_inst);
}
@@ -3043,48 +3150,89 @@ static struct config_item_type gsi_func_type = {
.ct_owner = THIS_MODULE,
};
+static void gsi_inst_clean(struct gsi_opts *opts)
+{
+ if (opts->gsi->c_port.ctrl_device.fops)
+ misc_deregister(&opts->gsi->c_port.ctrl_device);
+
+ kfree(opts->gsi);
+ kfree(opts);
+}
+
static int gsi_set_inst_name(struct usb_function_instance *fi,
const char *name)
{
- int ret, name_len;
+ int prot_id, name_len;
struct f_gsi *gsi;
- struct gsi_opts *opts = container_of(fi, struct gsi_opts, func_inst);
+ struct gsi_opts *opts, *opts_prev;
+
+ opts = container_of(fi, struct gsi_opts, func_inst);
name_len = strlen(name) + 1;
if (name_len > MAX_INST_NAME_LEN)
return -ENAMETOOLONG;
- ret = name_to_prot_id(name);
- if (ret < 0) {
- pr_err("%s: failed to find prot id for %s instance\n",
+ prot_id = name_to_prot_id(name);
+ if (prot_id < 0) {
+ log_event_err("%s: failed to find prot id for %s instance\n",
__func__, name);
return -EINVAL;
}
- if (ret == IPA_USB_RNDIS)
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ opts_prev = inst_status[prot_id].opts;
+ if (opts_prev) {
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err("%s: prot_id = %d, prev inst do not freed yet\n",
+ __func__, prot_id);
+ return -EBUSY;
+ }
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
+ if (prot_id == IPA_USB_RNDIS)
config_group_init_type_name(&opts->func_inst.group, "",
&gsi_func_rndis_type);
- gsi = gsi_function_init(ret);
+ gsi = gsi_function_init(prot_id);
if (IS_ERR(gsi))
return PTR_ERR(gsi);
opts->gsi = gsi;
+ /* Set instance status */
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ inst_status[prot_id].inst_exist = true;
+ inst_status[prot_id].opts = opts;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+
return 0;
}
static void gsi_free_inst(struct usb_function_instance *f)
{
struct gsi_opts *opts = container_of(f, struct gsi_opts, func_inst);
+ enum ipa_usb_teth_prot prot_id;
if (!opts->gsi)
return;
- if (opts->gsi->c_port.ctrl_device.fops)
- misc_deregister(&opts->gsi->c_port.ctrl_device);
+ prot_id = opts->gsi->prot_id;
- kfree(opts->gsi);
- kfree(opts);
+ mutex_lock(&inst_status[prot_id].gsi_lock);
+ if (opts->gsi->c_port.is_open) {
+ /* Mark instance exist as false */
+ inst_status[prot_id].inst_exist = false;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
+ log_event_err(
+ "%s: [prot_id = %d] Dev is open, free mem when dev close\n",
+ __func__, prot_id);
+ return;
+ }
+
+ /* Clear instance status */
+ gsi_inst_clean(opts);
+ inst_status[prot_id].inst_exist = false;
+ inst_status[prot_id].opts = NULL;
+ mutex_unlock(&inst_status[prot_id].gsi_lock);
}
static struct usb_function_instance *gsi_alloc_inst(void)
@@ -3098,7 +3246,7 @@ static struct usb_function_instance *gsi_alloc_inst(void)
opts->func_inst.set_inst_name = gsi_set_inst_name;
opts->func_inst.free_func_inst = gsi_free_inst;
config_group_init_type_name(&opts->func_inst.group, "",
- &gsi_func_type);
+ &gsi_func_type);
return &opts->func_inst;
}
@@ -3123,6 +3271,8 @@ MODULE_DESCRIPTION("GSI function driver");
static int fgsi_init(void)
{
+ int i;
+
ipa_usb_wq = alloc_workqueue("k_ipa_usb",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
if (!ipa_usb_wq) {
@@ -3130,6 +3280,9 @@ static int fgsi_init(void)
return -ENOMEM;
}
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ mutex_init(&inst_status[i].gsi_lock);
+
return usb_function_register(&gsiusb_func);
}
module_init(fgsi_init);
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8536f10a2e35..5189f6d0f09d 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -374,7 +374,7 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_gadget *gadget = c->cdev->gadget;
struct f_qdss *qdss = func_to_qdss(f);
struct usb_ep *ep;
- int iface;
+ int iface, id;
pr_debug("qdss_bind\n");
@@ -392,6 +392,12 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
qdss_data_intf_desc.bInterfaceNumber = iface;
qdss->data_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_DATA_IDX].id = id;
+ qdss_data_intf_desc.iInterface = id;
+
if (qdss->debug_inface_enabled) {
/* Allocate ctrl I/F */
iface = usb_interface_id(c, f);
@@ -401,6 +407,11 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
}
qdss_ctrl_intf_desc.bInterfaceNumber = iface;
qdss->ctrl_iface_id = iface;
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_CTRL_IDX].id = id;
+ qdss_ctrl_intf_desc.iInterface = id;
}
ep = usb_ep_autoconfig_ss(gadget, &qdss_ss_data_desc,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 03b9a372636f..1fc6f478a02c 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -133,29 +133,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
- }
- pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x145c, NULL);
- if (pinfo->smbus_dev) {
- pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
- if (!pinfo->smbus_dev) {
- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
- return 0;
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
}
-
- rev = pinfo->smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x14)
- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
- else if (rev >= 0x15 && rev <= 0x18)
- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
- else if (rev >= 0x39 && rev <= 0x3a)
- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
}
-
pinfo->sb_type.rev = rev;
return 1;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 56a9cd62f2c4..c6998f086e12 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -325,6 +325,34 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xhci_plat_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM suspend\n");
+
+ return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM resume\n");
+
+ return xhci_resume(xhci, false);
+}
+#endif
+
#ifdef CONFIG_PM
static int xhci_plat_runtime_idle(struct device *dev)
{
@@ -373,7 +401,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
xhci_plat_runtime_idle)
};
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aab1c7903288..641e0280ad5a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -918,7 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- if (!hcd->state)
+ if (!hcd->state || xhci->suspended)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
@@ -988,6 +988,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
+ xhci->suspended = true;
return rc;
}
@@ -1007,7 +1008,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
- if (!hcd->state)
+ if (!hcd->state || !xhci->suspended)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
@@ -1141,6 +1142,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ xhci->suspended = false;
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8fcec1be6b1a..7fc97d930657 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1667,6 +1667,7 @@ struct xhci_hcd {
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+ bool suspended;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
};
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f9f47da8a88b..3c0386ee5875 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -125,6 +125,12 @@ enum usbpd_control_msg_type {
MSG_VCONN_SWAP,
MSG_WAIT,
MSG_SOFT_RESET,
+ MSG_NOT_SUPPORTED = 0x10,
+ MSG_GET_SOURCE_CAP_EXTENDED,
+ MSG_GET_STATUS,
+ MSG_FR_SWAP,
+ MSG_GET_PPS_STATUS,
+ MSG_GET_COUNTRY_CODES,
};
enum usbpd_data_msg_type {
@@ -132,9 +138,29 @@ enum usbpd_data_msg_type {
MSG_REQUEST,
MSG_BIST,
MSG_SINK_CAPABILITIES,
+ MSG_BATTERY_STATUS,
+ MSG_ALERT,
+ MSG_GET_COUNTRY_INFO,
MSG_VDM = 0xF,
};
+enum usbpd_ext_msg_type {
+ MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
+ MSG_STATUS,
+ MSG_GET_BATTERY_CAP,
+ MSG_GET_BATTERY_STATUS,
+ MSG_BATTERY_CAPABILITIES,
+ MSG_GET_MANUFACTURER_INFO,
+ MSG_MANUFACTURER_INFO,
+ MSG_SECURITY_REQUEST,
+ MSG_SECURITY_RESPONSE,
+ MSG_FIRMWARE_UPDATE_REQUEST,
+ MSG_FIRMWARE_UPDATE_RESPONSE,
+ MSG_PPS_STATUS,
+ MSG_COUNTRY_INFO,
+ MSG_COUNTRY_CODES,
+};
+
enum vdm_state {
VDM_NONE,
DISCOVERED_ID,
@@ -198,13 +224,30 @@ static void *usbpd_ipc_log;
#define PD_MAX_DATA_OBJ 7
+#define PD_SRC_CAP_EXT_DB_LEN 24
+#define PD_STATUS_DB_LEN 5
+#define PD_BATTERY_CAP_DB_LEN 9
+
+#define PD_MAX_EXT_MSG_LEN 260
+#define PD_MAX_EXT_MSG_LEGACY_LEN 26
+
#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
- (((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
+ (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
((pr) << 8) | ((id) << 9) | ((cnt) << 12))
-#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
-#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF)
-#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F)
+#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_EXTENDED BIT(15)
+#define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED)
+
+#define PD_MSG_EXT_HDR(chunked, num, req, size) \
+ (((chunked) << 15) | (((num) & 0xF) << 11) | \
+ ((req) << 10) | ((size) & 0x1FF))
+#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000)
+#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF)
+#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400)
+#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF)
#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
@@ -291,19 +334,24 @@ static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */
struct vdm_tx {
- u32 data[7];
+ u32 data[PD_MAX_DATA_OBJ];
int size;
};
struct rx_msg {
- u8 type;
- u8 len;
- u32 payload[7];
+ u16 hdr;
+ u16 data_len; /* size of payload in bytes */
struct list_head entry;
+ u8 payload[];
};
-#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t)))
-#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t)))
+#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
struct usbpd {
struct device dev;
@@ -318,8 +366,10 @@ struct usbpd {
bool hard_reset_recvd;
struct list_head rx_q;
spinlock_t rx_lock;
+ struct rx_msg *rx_ext_msg;
u32 received_pdos[PD_MAX_DATA_OBJ];
+ u32 received_ado;
u16 src_cap_id;
u8 selected_pdo;
u8 requested_pdo;
@@ -351,6 +401,8 @@ struct usbpd {
bool pd_phy_opened;
bool send_request;
struct completion is_ready;
+ struct completion tx_chunk_request;
+ u8 next_tx_chunk;
struct mutex swap_lock;
struct dual_role_phy_instance *dual_role;
@@ -377,6 +429,19 @@ struct usbpd {
struct list_head svid_handlers;
struct list_head instance;
+
+ /* ext msg support */
+ bool send_get_src_cap_ext;
+ u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
+ bool send_get_pps_status;
+ u32 pps_status_db;
+ u8 status_db[PD_STATUS_DB_LEN];
+ bool send_get_battery_cap;
+ u8 get_battery_cap_db;
+ u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN];
+ u8 get_battery_status_db;
+ bool send_get_battery_status;
+ u32 battery_sts_dobj;
};
static LIST_HEAD(_usbpd); /* useful for debugging */
@@ -498,6 +563,57 @@ static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
return 0;
}
+static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
+ const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+ int ret;
+ size_t len_remain, chunk_len;
+ u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
+ u16 hdr;
+ u16 ext_hdr;
+ u8 num_objs;
+
+ if (data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ data_len = PD_MAX_EXT_MSG_LEN;
+ }
+
+ pd->next_tx_chunk = 0;
+ len_remain = data_len;
+ do {
+ ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
+ memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
+
+ chunk_len = min_t(size_t, len_remain,
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+ memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
+
+ num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
+ len_remain -= chunk_len;
+
+ reinit_completion(&pd->tx_chunk_request);
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, num_objs, pd->spec_rev) |
+ PD_MSG_HDR_EXTENDED;
+ ret = pd_phy_write(hdr, chunked_payload,
+ num_objs * sizeof(u32), sop);
+ if (ret)
+ return ret;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+
+ /* Wait for request chunk */
+ if (len_remain &&
+ !wait_for_completion_timeout(&pd->tx_chunk_request,
+ msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
+ usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
+ return -EPROTO;
+ }
+ } while (len_remain);
+
+ return 0;
+}
+
static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
{
int curr;
@@ -629,6 +745,150 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
kick_sm(pd, 0);
}
+struct pd_request_chunk {
+ struct work_struct w;
+ struct usbpd *pd;
+ u8 msg_type;
+ u8 chunk_num;
+ enum pd_sop_type sop;
+};
+
+static void pd_request_chunk_work(struct work_struct *w)
+{
+ struct pd_request_chunk *req =
+ container_of(w, struct pd_request_chunk, w);
+ struct usbpd *pd = req->pd;
+ unsigned long flags;
+ int ret;
+ u8 payload[4] = {0}; /* ext_hdr + padding */
+ u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, 1, pd->spec_rev) | PD_MSG_HDR_EXTENDED;
+
+ *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
+
+ ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop);
+ if (!ret) {
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+ } else {
+ usbpd_err(&pd->dev, "could not send chunk request\n");
+
+ /* queue what we have anyway */
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ pd->rx_ext_msg = NULL;
+ }
+
+ kfree(req);
+}
+
+static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
+ size_t len, enum pd_sop_type sop)
+{
+ struct rx_msg *rx_msg;
+ u16 bytes_to_copy;
+ u16 ext_hdr = *(u16 *)buf;
+ u8 chunk_num;
+
+ if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
+ usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
+ return NULL;
+ }
+
+ /* request for next Tx chunk */
+ if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
+ if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
+ PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
+ pd->next_tx_chunk) {
+ usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
+ ext_hdr);
+ return NULL;
+ }
+
+ if (!completion_done(&pd->tx_chunk_request))
+ complete(&pd->tx_chunk_request);
+
+ return NULL;
+ }
+
+ chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
+ if (!chunk_num) {
+ /* allocate new message if first chunk */
+ rx_msg = kzalloc(sizeof(*rx_msg) +
+ PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
+ GFP_KERNEL);
+ if (!rx_msg)
+ return NULL;
+
+ rx_msg->hdr = header;
+ rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
+
+ if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
+ }
+ } else {
+ if (!pd->rx_ext_msg) {
+ usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
+ return NULL;
+ }
+
+ rx_msg = pd->rx_ext_msg;
+ }
+
+ /*
+ * The amount to copy is derived as follows:
+ *
+ * - if extended data_len < 26, then copy data_len bytes
+ * - for chunks 0..N-2, copy 26 bytes
+ * - for the last chunk (N-1), copy the remainder
+ */
+ bytes_to_copy =
+ min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+
+ /* check against received length to avoid overrun */
+ if (bytes_to_copy > len - sizeof(ext_hdr)) {
+ usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%zu\n",
+ bytes_to_copy, len - sizeof(ext_hdr));
+ bytes_to_copy = len - sizeof(ext_hdr);
+ }
+
+ memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
+ bytes_to_copy);
+
+ /* request next chunk? */
+ if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
+ PD_MAX_EXT_MSG_LEGACY_LEN) {
+ struct pd_request_chunk *req;
+
+ if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
+ usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
+ kfree(pd->rx_ext_msg);
+ }
+
+ pd->rx_ext_msg = rx_msg;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ goto queue_rx; /* return what we have anyway */
+
+ INIT_WORK(&req->w, pd_request_chunk_work);
+ req->pd = pd;
+ req->msg_type = PD_MSG_HDR_TYPE(header);
+ req->chunk_num = chunk_num + 1;
+ req->sop = sop;
+ queue_work(pd->wq, &req->w);
+
+ return NULL;
+ }
+
+queue_rx:
+ pd->rx_ext_msg = NULL;
+ return rx_msg; /* queue it for usbpd_sm */
+}
+
static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len)
{
@@ -676,21 +936,31 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
return;
}
- rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
- if (!rx_msg)
- return;
+ /* if spec rev differs (i.e. is older), update PHY */
+ if (PD_MSG_HDR_REV(header) < pd->spec_rev)
+ pd->spec_rev = PD_MSG_HDR_REV(header);
+
+ usbpd_dbg(&pd->dev, "received message: type(%d) num_objs(%d)\n",
+ PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header));
+
+ if (!PD_MSG_HDR_IS_EXTENDED(header)) {
+ rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_KERNEL);
+ if (!rx_msg)
+ return;
- rx_msg->type = PD_MSG_HDR_TYPE(header);
- rx_msg->len = PD_MSG_HDR_COUNT(header);
- memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
+ rx_msg->hdr = header;
+ rx_msg->data_len = len;
+ memcpy(rx_msg->payload, buf, len);
+ } else {
+ rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
+ if (!rx_msg)
+ return;
+ }
spin_lock_irqsave(&pd->rx_lock, flags);
list_add_tail(&rx_msg->entry, &pd->rx_q);
spin_unlock_irqrestore(&pd->rx_lock, flags);
- usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n",
- rx_msg->type, rx_msg->len);
-
kick_sm(pd, 0);
}
@@ -1140,11 +1410,13 @@ EXPORT_SYMBOL(usbpd_send_svdm);
static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
{
- u32 vdm_hdr = rx_msg->payload[0];
- u32 *vdos = &rx_msg->payload[1];
+ u32 vdm_hdr =
+ rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
+
+ u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
u16 svid = VDM_HDR_SVID(vdm_hdr);
u16 *psvid;
- u8 i, num_vdos = rx_msg->len - 1; /* num objects minus header */
+ u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
u8 cmd = SVDM_HDR_CMD(vdm_hdr);
u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
bool has_dp = false;
@@ -1757,7 +2029,7 @@ static void usbpd_sm(struct work_struct *w)
case PE_SRC_SEND_CAPABILITIES_WAIT:
if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (rx_msg) {
usbpd_err(&pd->dev, "Unexpected message received\n");
@@ -1780,7 +2052,7 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
}
} else if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
if (pd->vdm_state == MODE_ENTERED) {
@@ -1822,6 +2094,15 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_pr_swap) {
pd->send_pr_swap = false;
ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
@@ -2062,7 +2343,8 @@ static void usbpd_sm(struct work_struct *w)
usbpd_err(&pd->dev, "Error sending Sink Caps\n");
usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
}
- } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) &&
+ pd->spec_rev == USBPD_REV_20) {
ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
default_src_caps,
ARRAY_SIZE(default_src_caps), SOP_MSG);
@@ -2085,7 +2367,8 @@ static void usbpd_sm(struct work_struct *w)
}
dr_swap(pd);
- } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/* lock in current mode */
set_power_role(pd, pd->current_pr);
@@ -2103,7 +2386,8 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_PR_SWAP, &val);
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
- } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/*
* if VCONN is connected to VBUS, make sure we are
* not in high voltage contract, otherwise reject.
@@ -2131,6 +2415,120 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) {
+ pd->send_get_src_cap_ext = false;
+ ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_src_cap_ext\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) {
+ if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid src cap ext db\n");
+ break;
+ }
+ memcpy(&pd->src_cap_ext_db, rx_msg->payload,
+ sizeof(pd->src_cap_ext_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) {
+ pd->send_get_pps_status = false;
+ ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_pps_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_PPS_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
+ usbpd_err(&pd->dev, "Invalid pps status db\n");
+ break;
+ }
+ memcpy(&pd->pps_status_db, rx_msg->payload,
+ sizeof(pd->pps_status_db));
+ complete(&pd->is_ready);
+ } else if (IS_DATA(rx_msg, MSG_ALERT)) {
+ if (rx_msg->data_len != sizeof(pd->received_ado)) {
+ usbpd_err(&pd->dev, "Invalid ado\n");
+ break;
+ }
+ memcpy(&pd->received_ado, rx_msg->payload,
+ sizeof(pd->received_ado));
+ ret = pd_send_msg(pd, MSG_GET_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_STATUS)) {
+ if (rx_msg->data_len != PD_STATUS_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid status db\n");
+ break;
+ }
+ memcpy(&pd->status_db, rx_msg->payload,
+ sizeof(pd->status_db));
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_cap = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
+ &pd->get_battery_cap_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_cap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) {
+ if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid battery cap db\n");
+ break;
+ }
+ memcpy(&pd->battery_cap_db, rx_msg->payload,
+ sizeof(pd->battery_cap_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_status = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
+ &pd->get_battery_status_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
+ usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
+ break;
+ }
+ memcpy(&pd->battery_sts_dobj, rx_msg->payload,
+ sizeof(pd->battery_sts_dobj));
+ complete(&pd->is_ready);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_request) {
pd->send_request = false;
usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
@@ -2779,6 +3177,10 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
"explicit" : "implicit");
add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+ add_uevent_var(env, "ADO=%08x", pd->received_ado);
+ for (i = 0; i < PD_STATUS_DB_LEN; i++)
+ add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]);
+
return 0;
}
@@ -3126,6 +3528,145 @@ static ssize_t hard_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(hard_reset);
+static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
+{
+ int ret = 0;
+
+ /* Only allowed if we are already in explicit sink contract */
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "%s: Cannot send msg\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ reinit_completion(&pd->is_ready);
+ *msg_tx_flag = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(1000))) {
+ usbpd_err(&pd->dev, "%s: request timed out\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ *msg_tx_flag = false;
+ return ret;
+
+}
+
+static ssize_t get_src_cap_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->src_cap_ext_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RO(get_src_cap_ext);
+
+static ssize_t get_pps_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db);
+}
+static DEVICE_ATTR_RO(get_pps_status);
+
+static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ /* dump the ADO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado);
+}
+static DEVICE_ATTR_RO(rx_ado);
+
+static ssize_t get_battery_cap_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_cap_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_cap_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_cap_db == -EINVAL)
+ return -EINVAL;
+
+ for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->battery_cap_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RW(get_battery_cap);
+
+static ssize_t get_battery_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_status_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_status_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_status_db == -EINVAL)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj);
+}
+static DEVICE_ATTR_RW(get_battery_status);
+
static struct attribute *usbpd_attrs[] = {
&dev_attr_contract.attr,
&dev_attr_initial_pr.attr,
@@ -3145,6 +3686,11 @@ static struct attribute *usbpd_attrs[] = {
&dev_attr_rdo.attr,
&dev_attr_rdo_h.attr,
&dev_attr_hard_reset.attr,
+ &dev_attr_get_src_cap_ext.attr,
+ &dev_attr_get_pps_status.attr,
+ &dev_attr_rx_ado.attr,
+ &dev_attr_get_battery_cap.attr,
+ &dev_attr_get_battery_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(usbpd);
@@ -3375,6 +3921,7 @@ struct usbpd *usbpd_create(struct device *parent)
INIT_LIST_HEAD(&pd->rx_q);
INIT_LIST_HEAD(&pd->svid_handlers);
init_completion(&pd->is_ready);
+ init_completion(&pd->tx_chunk_request);
pd->psy_nb.notifier_call = psy_changed;
ret = power_supply_reg_notifier(&pd->psy_nb);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fe123153b1a5..2a9944326210 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2023,6 +2023,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 4c6a5e73406b..71584dff75cb 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -68,6 +68,7 @@ static int mdss_dp_process_phy_test_pattern_request(
struct mdss_dp_drv_pdata *dp);
static int mdss_dp_send_audio_notification(
struct mdss_dp_drv_pdata *dp, int val);
+static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp);
static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp)
{
@@ -421,6 +422,22 @@ static int mdss_dp_clk_init(struct mdss_dp_drv_pdata *dp_drv,
__func__);
dp_drv->pixel_parent = NULL;
}
+
+ dp_drv->pixel_clk_two_div = devm_clk_get(dev,
+ "pixel_clk_two_div");
+ if (IS_ERR(dp_drv->pixel_clk_two_div)) {
+ pr_debug("%s: Unable to get DP pixel two div clk\n",
+ __func__);
+ dp_drv->pixel_clk_two_div = NULL;
+ }
+
+ dp_drv->pixel_clk_four_div = devm_clk_get(dev,
+ "pixel_clk_four_div");
+ if (IS_ERR(dp_drv->pixel_clk_four_div)) {
+ pr_debug("%s: Unable to get DP pixel four div clk\n",
+ __func__);
+ dp_drv->pixel_clk_four_div = NULL;
+ }
} else {
if (dp_drv->pixel_parent)
devm_clk_put(dev, dp_drv->pixel_parent);
@@ -1417,6 +1434,16 @@ static int mdss_dp_enable_mainlink_clocks(struct mdss_dp_drv_pdata *dp)
return ret;
}
+ if (dp->pixel_parent && dp->pixel_clk_two_div &&
+ dp->pixel_clk_four_div) {
+ if (dp->link_rate == DP_LINK_RATE_540)
+ clk_set_parent(dp->pixel_parent,
+ dp->pixel_clk_four_div);
+ else
+ clk_set_parent(dp->pixel_parent,
+ dp->pixel_clk_two_div);
+ }
+
mdss_dp_set_clock_rate(dp, "ctrl_link_clk",
(dp->link_rate * DP_LINK_RATE_MULTIPLIER) / DP_KHZ_TO_HZ);
@@ -1489,7 +1516,12 @@ static int mdss_dp_setup_main_link(struct mdss_dp_drv_pdata *dp, bool train)
pr_debug("enter\n");
mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
- mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON);
+ ret = mdss_dp_aux_send_psm_request(dp, false);
+ if (ret) {
+ pr_err("Failed to exit low power mode, rc=%d\n", ret);
+ goto end;
+ }
+
reinit_completion(&dp->video_comp);
if (mdss_dp_is_phy_test_pattern_requested(dp))
@@ -1536,6 +1568,19 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
{
int ret = 0;
char ln_map[4];
+ bool connected;
+
+ mutex_lock(&dp_drv->attention_lock);
+ connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+
+ /*
+ * If DP cable disconnected, Avoid link training or turning on DP Path
+ */
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -EINVAL;
+ }
/* wait until link training is completed */
pr_debug("enter, lt_needed=%s\n", lt_needed ? "true" : "false");
@@ -1576,17 +1621,15 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_drv->power_on = true;
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n",
- ret);
- goto exit_loop;
+ ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
+ if (ret) {
+ if (ret == -ENODEV || ret == -EINVAL) {
+ pr_err("main link setup failed\n");
+ mutex_unlock(&dp_drv->train_mutex);
+ return ret;
}
}
- ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
-
exit_loop:
mutex_unlock(&dp_drv->train_mutex);
} while (ret == -EAGAIN);
@@ -1653,15 +1696,6 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
mdss_dp_configure_source_params(dp_drv, ln_map);
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n", ret);
- goto exit;
- }
- }
-
-
link_training:
dp_drv->power_on = true;
@@ -2195,7 +2229,7 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
ret = mdss_dp_dpcd_cap_read(dp);
if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
!mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
- if (ret == EDP_AUX_ERR_TOUT) {
+ if ((ret == -ENODEV) || (ret == EDP_AUX_ERR_TOUT)) {
pr_err("DPCD read timedout, skip connect notification\n");
goto end;
}
@@ -2227,6 +2261,9 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
read_edid:
ret = mdss_dp_edid_read(dp);
if (ret) {
+ if (ret == -ENODEV)
+ goto end;
+
pr_err("edid read error, setting default resolution\n");
goto notify;
}
@@ -2989,6 +3026,7 @@ static int mdss_dp_sysfs_create(struct mdss_dp_drv_pdata *dp,
static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
{
+ bool cable_connected;
struct mdss_dp_drv_pdata *dp_drv = NULL;
const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
@@ -3009,6 +3047,14 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
return;
}
+ /* power down the sink if cable is still connected */
+ mutex_lock(&dp_drv->attention_lock);
+ cable_connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+ if (cable_connected && dp_drv->alt_mode.dp_status.hpd_high) {
+ if (mdss_dp_aux_send_psm_request(dp_drv, true))
+ pr_err("Failed to enter low power mode\n");
+ }
reinit_completion(&dp_drv->idle_comp);
mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (!wait_for_completion_timeout(&dp_drv->idle_comp,
@@ -3129,6 +3175,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
pr_err("DP Controller not powered on\n");
break;
}
+ if (!atomic_read(&dp->notification_pending)) {
+ pr_debug("blank when cable is connected\n");
+ kthread_park(dp->ev_thread);
+ }
if (dp_is_hdcp_enabled(dp)) {
dp->hdcp_status = HDCP_STATE_INACTIVE;
@@ -3168,8 +3218,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
* when you connect DP sink while the
* device is in suspend state.
*/
- if ((!dp->power_on) && (dp->dp_initialized))
+ if ((!dp->power_on) && (dp->dp_initialized)) {
rc = mdss_dp_host_deinit(dp);
+ kthread_park(dp->ev_thread);
+ }
/*
* For DP suspend/resume use case, CHECK_PARAMS is
@@ -3181,8 +3233,11 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
dp->suspend_vic = dp->vic;
break;
case MDSS_EVENT_RESUME:
- if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN)
+ if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN) {
dp_init_panel_info(dp, dp->suspend_vic);
+ mdss_dp_reset_sw_state(dp);
+ kthread_unpark(dp->ev_thread);
+ }
break;
default:
pr_debug("unhandled event=%d\n", event);
@@ -3526,9 +3581,33 @@ static void mdss_dp_reset_event_list(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp)
{
+ int ret = 0;
+
pr_debug("enter\n");
mdss_dp_reset_event_list(dp);
+
+ /*
+ * IRQ_HPD attention event handler first turns on DP path and then
+ * notifies CONNECT_IRQ_HPD and waits for userspace to trigger UNBLANK.
+ * In such cases, before UNBLANK call, if cable is disconnected, if
+ * DISCONNECT is notified immediately, userspace might not sense any
+ * change in connection status, leaving DP controller ON.
+ *
+ * To avoid such cases, wait for the connection event to complete before
+ * sending disconnection event
+ */
+ if (atomic_read(&dp->notification_pending)) {
+ pr_debug("waiting for the pending notitfication\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_err("%s timed out\n",
+ mdss_dp_notification_status_to_string(
+ dp->hpd_notification_status));
+ }
+ }
+
atomic_set(&dp->notification_pending, 0);
+ /* complete any waiting completions */
complete_all(&dp->notification_comp);
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index afa8e3db590f..f3f9ca277762 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -218,10 +218,6 @@ struct dp_alt_mode {
#define ST_SEND_VIDEO BIT(7)
#define ST_PUSH_IDLE BIT(8)
-/* sink power state */
-#define SINK_POWER_ON 1
-#define SINK_POWER_OFF 2
-
#define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
#define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
#define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */
@@ -618,6 +614,8 @@ struct mdss_dp_drv_pdata {
/* DP Pixel clock RCG and PLL parent */
struct clk *pixel_clk_rcg;
struct clk *pixel_parent;
+ struct clk *pixel_clk_two_div;
+ struct clk *pixel_clk_four_div;
/* regulators */
struct dss_module_power power_data[DP_MAX_PM];
@@ -1181,11 +1179,9 @@ void dp_aux_native_handler(struct mdss_dp_drv_pdata *dp, u32 isr);
void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep);
void mdss_dp_fill_link_cfg(struct mdss_dp_drv_pdata *ep);
-void mdss_dp_sink_power_down(struct mdss_dp_drv_pdata *ep);
void mdss_dp_lane_power_ctrl(struct mdss_dp_drv_pdata *ep, int up);
void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *ep);
char mdss_dp_gen_link_clk(struct mdss_dp_drv_pdata *dp);
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state);
int mdss_dp_aux_send_psm_request(struct mdss_dp_drv_pdata *dp, bool enable);
void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *ep);
void *mdss_dp_get_hdcp_data(struct device *dev);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index c0632e8241a0..407f230ca71e 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -411,7 +411,8 @@ retry:
if (!connected) {
pr_err("dp cable disconnected\n");
- break;
+ ret = -ENODEV;
+ goto end;
}
dp->aux_error_num = EDP_AUX_ERR_NONE;
@@ -877,7 +878,7 @@ void dp_extract_edid_detailed_timing_description(struct edp_edid *edid,
static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
{
- int cnt, ret;
+ int cnt, ret = 0;
char data = 0;
for (cnt = 5; cnt; cnt--) {
@@ -886,6 +887,10 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
ret, mdss_dp_get_aux_error(ep->aux_error_num));
if (ret >= 0)
break;
+
+ if (ret == -ENODEV)
+ return ret;
+
msleep(100);
}
@@ -973,6 +978,7 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u32 checksum = 0;
bool phy_aux_update_requested = false;
bool ext_block_parsing_done = false;
+ bool connected = false;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -992,6 +998,15 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u8 segment;
u8 edid_buf[EDID_BLOCK_SIZE] = {0};
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -ENODEV;
+ }
+
/*
* Write the segment first.
* Segment = 0, for blocks 0 and 1
@@ -1243,7 +1258,7 @@ int mdss_dp_aux_link_status_read(struct mdss_dp_drv_pdata *ep, int len)
rlen = dp_aux_read_buf(ep, 0x202, len, 0);
if (rlen < len) {
pr_err("edp aux read failed\n");
- return 0;
+ return rlen;
}
rp = &ep->rxp;
bp = rp->data;
@@ -2459,21 +2474,24 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
+
if (mdss_dp_aux_clock_recovery_done(ep)) {
ret = 0;
break;
}
if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
if (old_v_level == ep->v_level) {
tries++;
if (tries >= maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
} else {
@@ -2511,7 +2529,9 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
if (mdss_dp_aux_channel_eq_done(ep)) {
ret = 0;
@@ -2519,7 +2539,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
if (tries > maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break;
}
tries++;
@@ -2556,15 +2576,6 @@ static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep)
return ret;
}
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state)
-{
- int ret;
-
- ret = dp_aux_write_buf(ep, 0x600, &state, 1, 0);
- pr_debug("state=%d ret=%d\n", state, ret);
- return ret;
-}
-
static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
{
int usleep_time;
@@ -2593,7 +2604,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_1(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2612,7 +2623,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_2(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2649,7 +2660,7 @@ int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep)
ret = mdss_dp_aux_link_status_read(ep, 6);
- if (ret) {
+ if (ret > 0) {
sp = &ep->link_status;
ret = sp->port_0_in_sync; /* 1 == sync */
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index f7b3d4664e86..037741df4382 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -835,6 +835,8 @@ void mdss_dp_sw_config_msa(struct mdss_dp_drv_pdata *dp)
pr_debug("pixel_m=0x%x, pixel_n=0x%x\n", pixel_m, pixel_n);
mvid = (pixel_m & 0xFFFF) * 5;
nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+ if (dp->link_rate == DP_LINK_RATE_540)
+ nvid *= 2;
}
pr_debug("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 5f7e7c6bcde0..7b6153503af5 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -968,7 +968,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id)
while (len >= sizeof(*dchdr)) {
dchdr = (struct dsi_ctrl_hdr *)bp;
dchdr->dlen = ntohs(dchdr->dlen);
- if (dchdr->dlen > len) {
+ if (dchdr->dlen > len || dchdr->dlen < 0) {
pr_err("%s: dtsi cmd=%x error, len=%d\n",
__func__, dchdr->dtype, dchdr->dlen);
kfree(buf);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index c766ff983045..fca1d37b40bb 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1512,6 +1512,34 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
return ret;
}
+static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int in_blanking = 0;
+ int retry_count = 0;
+
+ if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ return;
+
+ while (retry_count != MAX_BTA_WAIT_RETRY) {
+ mdss_dsi_wait4video_eng_busy(ctrl);
+ in_blanking = ctrl->mdp_callback->fxn(
+ ctrl->mdp_callback->data,
+ MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
+
+ if (in_blanking) {
+ pr_debug("%s: not in active region\n", __func__);
+ retry_count++;
+ } else
+ break;
+ };
+
+ if (retry_count == MAX_BTA_WAIT_RETRY)
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
+ "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
+ "vbif", "vbif_nrt", "dbg_bus",
+ "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+}
+
/**
* mdss_dsi_bta_status_check() - Check dsi panel status through bta check
* @ctrl_pdata: pointer to the dsi controller structure
@@ -1527,8 +1555,6 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
int ret = 0;
unsigned long flag;
int ignore_underflow = 0;
- int retry_count = 0;
- int in_blanking = 0;
if (ctrl_pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
@@ -1554,24 +1580,8 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
reinit_completion(&ctrl_pdata->bta_comp);
mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
-wait:
- mdss_dsi_wait4video_eng_busy(ctrl_pdata);
- if (ctrl_pdata->panel_mode == DSI_VIDEO_MODE) {
- in_blanking = ctrl_pdata->mdp_callback->fxn(
- ctrl_pdata->mdp_callback->data,
- MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
- /* Try for maximum of 5 attempts */
- if (in_blanking && (retry_count < MAX_BTA_WAIT_RETRY)) {
- pr_debug("%s: not in active region\n", __func__);
- retry_count++;
- goto wait;
- }
- }
- if (retry_count == MAX_BTA_WAIT_RETRY)
- MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
- "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
- "vbif", "vbif_nrt", "dbg_bus",
- "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+
+ mdss_dsi_wait4active_region(ctrl_pdata);
/* mask out overflow errors */
if (ignore_underflow)
@@ -1991,7 +2001,7 @@ do_send:
goto end;
}
- mdss_dsi_wait4video_eng_busy(ctrl);
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
if (use_dma_tpg)
@@ -2029,7 +2039,7 @@ skip_max_pkt_size:
wmb(); /* make sure the RDBK registers are cleared */
}
- mdss_dsi_wait4video_eng_busy(ctrl); /* video mode only */
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
/* transmit read comamnd to client */
if (use_dma_tpg)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index af95a4a6dccd..a5a407708334 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -113,6 +113,7 @@ static void hdmi_tx_fps_work(struct work_struct *work);
static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
enum hdmi_tx_power_module_type module, bool active);
static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params);
static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
@@ -1276,6 +1277,7 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
{
int ret = 0;
struct hdmi_tx_ctrl *ctrl = NULL;
+ u8 hdr_op;
ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
if (!ctrl) {
@@ -1296,36 +1298,43 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
goto end;
}
- memcpy(&ctrl->hdr_data, buf, sizeof(struct mdp_hdr_stream));
+ memcpy(&ctrl->hdr_ctrl, buf, sizeof(struct mdp_hdr_stream_ctrl));
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.eotf,
- ctrl->hdr_data.display_primaries_x[0],
- ctrl->hdr_data.display_primaries_y[0],
- ctrl->hdr_data.display_primaries_x[1],
- ctrl->hdr_data.display_primaries_y[1],
- ctrl->hdr_data.display_primaries_x[2],
- ctrl->hdr_data.display_primaries_y[2]);
+ ctrl->hdr_ctrl.hdr_stream.eotf,
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[2]);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.white_point_x,
- ctrl->hdr_data.white_point_y,
- ctrl->hdr_data.max_luminance,
- ctrl->hdr_data.min_luminance,
- ctrl->hdr_data.max_content_light_level,
- ctrl->hdr_data.max_average_light_level);
+ ctrl->hdr_ctrl.hdr_stream.white_point_x,
+ ctrl->hdr_ctrl.hdr_stream.white_point_y,
+ ctrl->hdr_ctrl.hdr_stream.max_luminance,
+ ctrl->hdr_ctrl.hdr_stream.min_luminance,
+ ctrl->hdr_ctrl.hdr_stream.max_content_light_level,
+ ctrl->hdr_ctrl.hdr_stream.max_average_light_level);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.pixel_encoding,
- ctrl->hdr_data.colorimetry,
- ctrl->hdr_data.range,
- ctrl->hdr_data.bits_per_component,
- ctrl->hdr_data.content_type);
+ ctrl->hdr_ctrl.hdr_stream.pixel_encoding,
+ ctrl->hdr_ctrl.hdr_stream.colorimetry,
+ ctrl->hdr_ctrl.hdr_stream.range,
+ ctrl->hdr_ctrl.hdr_stream.bits_per_component,
+ ctrl->hdr_ctrl.hdr_stream.content_type);
+ hdr_op = hdmi_hdr_get_ops(ctrl->curr_hdr_state,
+ ctrl->hdr_ctrl.hdr_state);
- hdmi_panel_set_hdr_infoframe(ctrl);
+ if (hdr_op == HDR_SEND_INFO)
+ hdmi_panel_set_hdr_infoframe(ctrl);
+ else if (hdr_op == HDR_CLEAR_INFO)
+ hdmi_panel_clear_hdr_infoframe(ctrl);
+
+ ctrl->curr_hdr_state = ctrl->hdr_ctrl.hdr_state;
ret = strnlen(buf, PAGE_SIZE);
end:
@@ -2113,6 +2122,8 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
goto err;
}
+ /* reset HDR state */
+ hdmi_ctrl->curr_hdr_state = HDR_DISABLE;
return 0;
err:
hdmi_tx_deinit_features(hdmi_ctrl, deinit_features);
@@ -2878,11 +2889,12 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
packet_header = type_code | (version << 8) | (length << 16);
DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header);
- packet_payload = (ctrl->hdr_data.eotf << 8);
+ packet_payload = (ctrl->hdr_ctrl.hdr_stream.eotf << 8);
if (hdmi_tx_metadata_type_one(ctrl)) {
- packet_payload |= (descriptor_id << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[0])
- << 24);
+ packet_payload |=
+ (descriptor_id << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[0]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload);
} else {
pr_debug("%s: Metadata Type 1 not supported\n", __func__);
@@ -2891,44 +2903,56 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
}
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[0]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[1]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_1, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[1]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[2]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_2, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[2]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_x) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_x) << 24);
DSS_REG_W(io, HDMI_GENERIC0_3, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.white_point_x))
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_y) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.white_point_y) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_luminance) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_x))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.max_luminance) << 24);
DSS_REG_W(io, HDMI_GENERIC0_4, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_luminance))
- | (HDMI_GET_LSB(ctrl->hdr_data.min_luminance) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.min_luminance) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_content_light_level) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.max_luminance))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level) << 24);
DSS_REG_W(io, HDMI_GENERIC0_5, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_content_light_level))
- | (HDMI_GET_LSB(ctrl->hdr_data.max_average_light_level) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.max_average_light_level) << 16);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 16);
DSS_REG_W(io, HDMI_GENERIC0_6, packet_payload);
enable_packet_control:
@@ -2943,6 +2967,32 @@ enable_packet_control:
DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
}
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
+{
+ u32 packet_control = 0;
+ struct dss_io_data *io = NULL;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!hdmi_tx_is_hdr_supported(ctrl)) {
+ pr_err("%s: Sink does not support HDR\n", __func__);
+ return;
+ }
+
+ io = &ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ pr_err("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+ DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params)
{
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index 3469b8a5819f..ad02003631f6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -21,6 +21,7 @@
#include "mdss_hdmi_audio.h"
#define MAX_SWITCH_NAME_SIZE 5
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7
enum hdmi_tx_io_type {
HDMI_TX_CORE_IO,
@@ -90,7 +91,7 @@ struct hdmi_tx_ctrl {
struct msm_ext_disp_audio_setup_params audio_params;
struct msm_ext_disp_init_data ext_audio_data;
struct work_struct fps_work;
- struct mdp_hdr_stream hdr_data;
+ struct mdp_hdr_stream_ctrl hdr_ctrl;
spinlock_t hpd_state_lock;
@@ -116,6 +117,7 @@ struct hdmi_tx_ctrl {
u8 hdcp_status;
u8 spd_vendor_name[9];
u8 spd_product_description[17];
+ u8 curr_hdr_state;
bool hdcp_feature_on;
bool hpd_disabled;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 827013d06412..5bc46d8c8f92 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/msm_mdp.h>
+#include <linux/msm_mdp_ext.h>
#include "mdss_hdmi_util.h"
#define RESOLUTION_NAME_STR_LEN 30
@@ -1811,3 +1812,51 @@ int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl)
return rc;
}
+
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state)
+{
+
+ /** There could be 3 valid state transitions:
+ * 1. HDR_DISABLE -> HDR_ENABLE
+ *
+ * In this transition, we shall start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 2. HDR_ENABLE -> HDR_RESET
+ *
+ * In this transition, we will keep sending
+ * HDR metadata but with EOTF and metadata as 0
+ *
+ * 3. HDR_RESET -> HDR_ENABLE
+ *
+ * In this transition, we will start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 4. HDR_RESET -> HDR_DISABLE
+ *
+ * In this transition, we will stop sending
+ * metadata to the sink and clear PKT_CTRL register
+ * bits.
+ */
+
+ if ((curr_state == HDR_DISABLE)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_DISABLE ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_RESET)) {
+ pr_debug("State changed HDR_ENABLE ---> HDR_RESET\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_DISABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_DISABLE\n");
+ return HDR_CLEAR_INFO;
+ }
+
+ pr_debug("Unsupported OR no state change\n");
+ return HDR_UNSUPPORTED_OP;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index 4fd659616bcc..fe554f8e9e67 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -425,6 +425,12 @@ enum hdmi_tx_hdcp2p2_rxstatus_intr_mask {
RXSTATUS_REAUTH_REQ = BIT(14),
};
+enum hdmi_hdr_op {
+ HDR_UNSUPPORTED_OP,
+ HDR_SEND_INFO,
+ HDR_CLEAR_INFO
+};
+
struct hdmi_tx_hdcp2p2_ddc_data {
enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
u32 timeout_ms;
@@ -518,5 +524,5 @@ void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
u32 timeout_ms);
-
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state);
#endif /* __HDMI_UTIL_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 9796121bbabf..54b792305eb5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -995,6 +995,8 @@ struct mdss_overlay_private {
struct task_struct *thread;
u8 secure_transition_state;
+
+ bool cache_null_commit; /* Cache if preceding commit was NULL */
};
struct mdss_mdp_set_ot_params {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index b07ba82fde34..9e9f37ce0b23 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -1853,9 +1853,15 @@ static int __validate_secure_session(struct mdss_overlay_private *mdp5_data)
pr_err("secure-camera cnt:%d secure video:%d secure display:%d\n",
secure_cam_pipes, secure_vid_pipes, sd_pipes);
return -EINVAL;
- } else {
- return 0;
+ } else if (mdp5_data->ctl->is_video_mode &&
+ ((sd_pipes && !mdp5_data->sd_enabled) ||
+ (!sd_pipes && mdp5_data->sd_enabled)) &&
+ !mdp5_data->cache_null_commit) {
+ pr_err("NULL commit missing before display secure session entry/exit\n");
+ return -EINVAL;
}
+
+ return 0;
}
/*
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 305fff6b5695..11c159630747 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -2375,6 +2375,8 @@ static void __overlay_set_secure_transition_state(struct msm_fb_data_type *mfd)
/* Reset the secure transition state */
mdp5_data->secure_transition_state = SECURE_TRANSITION_NONE;
+ mdp5_data->cache_null_commit = list_empty(&mdp5_data->pipes_used);
+
/*
* Secure transition would be NONE in two conditions:
* 1. All the features are already disabled and state remains
@@ -2584,6 +2586,7 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
ATRACE_BEGIN("sspp_programming");
ret = __overlay_queue_pipes(mfd);
ATRACE_END("sspp_programming");
+
mutex_unlock(&mdp5_data->list_lock);
mdp5_data->kickoff_released = false;
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
index 2028222748c3..78bccdbfee3b 100644
--- a/drivers/video/fbdev/msm/mdss_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -1124,6 +1124,7 @@ static void mdss_rotator_release_from_work_distribution(
bool free_perf = false;
u32 wb_idx = entry->queue->hw->wb_id;
+ mutex_lock(&mgr->lock);
mutex_lock(&entry->perf->work_dis_lock);
if (entry->perf->work_distribution[wb_idx])
entry->perf->work_distribution[wb_idx]--;
@@ -1147,6 +1148,7 @@ static void mdss_rotator_release_from_work_distribution(
mdss_rotator_clk_ctrl(mgr, false);
entry->perf = NULL;
}
+ mutex_unlock(&mgr->lock);
}
}
@@ -2043,7 +2045,6 @@ static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
list_del_init(&perf->list);
mutex_unlock(&perf->work_dis_lock);
mutex_unlock(&private->perf_lock);
- mutex_unlock(&mgr->lock);
if (offload_release_work)
goto done;
@@ -2056,6 +2057,7 @@ static int mdss_rotator_close_session(struct mdss_rot_mgr *mgr,
done:
pr_debug("Closed session id:%u", id);
ATRACE_END(__func__);
+ mutex_unlock(&mgr->lock);
return 0;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index 4adb93ec85ea..89ddd182f568 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -73,6 +73,8 @@ config FILE_LOCKING
for filesystems like NFS and for the flock() system
call. Disabling this option saves about 11k.
+source "fs/crypto/Kconfig"
+
source "fs/notify/Kconfig"
source "fs/quota/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index dee237540bc0..4644db462ba9 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FS_DAX) += dax.o
+obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 26bbaaefdff4..d3c296d4eb25 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -532,6 +532,7 @@ static void init_once(void *foo)
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
#endif
+ bdev->bd_bdi = &noop_backing_dev_info;
inode_init_once(&ei->vfs_inode);
/* Initialize mutex for freeze. */
mutex_init(&bdev->bd_fsfreeze_mutex);
@@ -557,6 +558,10 @@ static void bdev_evict_inode(struct inode *inode)
}
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
+ if (bdev->bd_bdi != &noop_backing_dev_info) {
+ bdi_put(bdev->bd_bdi);
+ bdev->bd_bdi = &noop_backing_dev_info;
+ }
}
static const struct super_operations bdev_sops = {
@@ -623,6 +628,21 @@ static int bdev_set(struct inode *inode, void *data)
static LIST_HEAD(all_bdevs);
+/*
+ * If there is a bdev inode for this device, unhash it so that it gets evicted
+ * as soon as last inode reference is dropped.
+ */
+void bdev_unhash_inode(dev_t dev)
+{
+ struct inode *inode;
+
+ inode = ilookup5(blockdev_superblock, hash(dev), bdev_test, &dev);
+ if (inode) {
+ remove_inode_hash(inode);
+ iput(inode);
+ }
+}
+
struct block_device *bdget(dev_t dev)
{
struct block_device *bdev;
@@ -1201,6 +1221,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
+
bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
if (!partno) {
ret = -ENXIO;
@@ -1302,6 +1325,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
bdev->bd_queue = NULL;
+ bdi_put(bdev->bd_bdi);
+ bdev->bd_bdi = &noop_backing_dev_info;
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 5d34a062ca4f..3bd2233737ac 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1727,6 +1727,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
goto restore;
}
+ btrfs_qgroup_rescan_resume(fs_info);
+
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9c62a6f9757a..600c67ef8a03 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
},
};
-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
+const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c6a1ec110c01..22bae2b434e2 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
/*
* read a single page, without unlocking it.
*/
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
- goto out;
+ return -EINPROGRESS;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
@@ -249,8 +249,11 @@ out:
static int ceph_readpage(struct file *filp, struct page *page)
{
- int r = readpage_nounlock(filp, page);
- unlock_page(page);
+ int r = ceph_do_readpage(filp, page);
+ if (r != -EINPROGRESS)
+ unlock_page(page);
+ else
+ r = 0;
return r;
}
@@ -1094,7 +1097,7 @@ retry_locked:
goto retry_locked;
r = writepage_nounlock(page, NULL);
if (r < 0)
- goto fail_nosnap;
+ goto fail_unlock;
goto retry_locked;
}
@@ -1122,11 +1125,14 @@ retry_locked:
}
/* we need to read it. */
- r = readpage_nounlock(file, page);
- if (r < 0)
- goto fail_nosnap;
+ r = ceph_do_readpage(file, page);
+ if (r < 0) {
+ if (r == -EINPROGRESS)
+ return -EAGAIN;
+ goto fail_unlock;
+ }
goto retry_locked;
-fail_nosnap:
+fail_unlock:
unlock_page(page);
return r;
}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a4766ded1ba7..ff1cfd7b1083 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
fscache_relinquish_cookie(cookie, 0);
}
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
- if (!error)
- SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
{
if (!error)
SetPageUptodate(page);
@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
return -ENOBUFS;
ret = fscache_read_or_alloc_page(ci->fscache, page,
- ceph_vfs_readpage_complete, NULL,
+ ceph_readpage_from_fscache_complete, NULL,
GFP_KERNEL);
switch (ret) {
@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
- ceph_vfs_readpage_complete_unlock,
+ ceph_readpage_from_fscache_complete,
NULL, mapping_gfp_mask(mapping));
switch (ret) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index fa8df3fef6fc..297e05c9e2b0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
int i;
if (unlikely(direntry->d_name.len >
- tcon->fsAttrInfo.MaxPathNameComponentLength))
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index b8f553b32dda..aacb15bd56fe 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -82,8 +82,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig
new file mode 100644
index 000000000000..92348faf9865
--- /dev/null
+++ b/fs/crypto/Kconfig
@@ -0,0 +1,18 @@
+config FS_ENCRYPTION
+ tristate "FS Encryption (Per-file encryption)"
+ depends on BLOCK
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_XTS
+ select CRYPTO_CTS
+ select CRYPTO_CTR
+ select CRYPTO_SHA256
+ select KEYS
+ select ENCRYPTED_KEYS
+ help
+ Enable encryption of files and directories. This
+ feature is similar to ecryptfs, but it is more memory
+ efficient since it avoids caching the encrypted and
+ decrypted pages in the page cache.
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
new file mode 100644
index 000000000000..f17684c48739
--- /dev/null
+++ b/fs/crypto/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
+
+fscrypto-y := crypto.o fname.o policy.o keyinfo.o
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
new file mode 100644
index 000000000000..2d40ab9edc9f
--- /dev/null
+++ b/fs/crypto/crypto.c
@@ -0,0 +1,568 @@
+/*
+ * This contains encryption functions for per-file encryption.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Michael Halcrow, 2014.
+ *
+ * Filename encryption additions
+ * Uday Savagaonkar, 2014
+ * Encryption policy handling additions
+ * Ildar Muslukhov, 2014
+ * Add fscrypt_pullback_bio_page()
+ * Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ *
+ * The usage of AES-XTS should conform to recommendations in NIST
+ * Special Publication 800-38E and IEEE P1619/D16.
+ */
+
+#include <linux/pagemap.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/fscrypto.h>
+
+static unsigned int num_prealloc_crypto_pages = 32;
+static unsigned int num_prealloc_crypto_ctxs = 128;
+
+module_param(num_prealloc_crypto_pages, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_pages,
+ "Number of crypto pages to preallocate");
+module_param(num_prealloc_crypto_ctxs, uint, 0444);
+MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
+ "Number of crypto contexts to preallocate");
+
+static mempool_t *fscrypt_bounce_page_pool = NULL;
+
+static LIST_HEAD(fscrypt_free_ctxs);
+static DEFINE_SPINLOCK(fscrypt_ctx_lock);
+
+static struct workqueue_struct *fscrypt_read_workqueue;
+static DEFINE_MUTEX(fscrypt_init_mutex);
+
+static struct kmem_cache *fscrypt_ctx_cachep;
+struct kmem_cache *fscrypt_info_cachep;
+
+/**
+ * fscrypt_release_ctx() - Releases an encryption context
+ * @ctx: The encryption context to release.
+ *
+ * If the encryption context was allocated from the pre-allocated pool, returns
+ * it to that pool. Else, frees it.
+ *
+ * If there's a bounce page in the context, this frees that.
+ */
+void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) {
+ mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
+ ctx->w.bounce_page = NULL;
+ }
+ ctx->w.control_page = NULL;
+ if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
+ kmem_cache_free(fscrypt_ctx_cachep, ctx);
+ } else {
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ }
+}
+EXPORT_SYMBOL(fscrypt_release_ctx);
+
+/**
+ * fscrypt_get_ctx() - Gets an encryption context
+ * @inode: The inode for which we are doing the crypto
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Allocates and initializes an encryption context.
+ *
+ * Return: An allocated and initialized encryption context on success; error
+ * value or NULL otherwise.
+ */
+struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode, gfp_t gfp_flags)
+{
+ struct fscrypt_ctx *ctx = NULL;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ unsigned long flags;
+
+ if (ci == NULL)
+ return ERR_PTR(-ENOKEY);
+
+ /*
+ * We first try getting the ctx from a free list because in
+ * the common case the ctx will have an allocated and
+ * initialized crypto tfm, so it's probably a worthwhile
+ * optimization. For the bounce page, we first try getting it
+ * from the kernel allocator because that's just about as fast
+ * as getting it from a list and because a cache of free pages
+ * should generally be a "last resort" option for a filesystem
+ * to be able to do its job.
+ */
+ spin_lock_irqsave(&fscrypt_ctx_lock, flags);
+ ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
+ struct fscrypt_ctx, free_list);
+ if (ctx)
+ list_del(&ctx->free_list);
+ spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
+ if (!ctx) {
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ } else {
+ ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
+ }
+ ctx->flags &= ~FS_WRITE_PATH_FL;
+ return ctx;
+}
+EXPORT_SYMBOL(fscrypt_get_ctx);
+
+/**
+ * page_crypt_complete() - completion callback for page crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void page_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+typedef enum {
+ FS_DECRYPT = 0,
+ FS_ENCRYPT,
+} fscrypt_direction_t;
+
+static int do_page_crypto(struct inode *inode,
+ fscrypt_direction_t rw, pgoff_t index,
+ struct page *src_page, struct page *dest_page,
+ gfp_t gfp_flags)
+{
+ struct {
+ __le64 index;
+ u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+ } xts_tweak;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist dst, src;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+
+ req = skcipher_request_alloc(tfm, gfp_flags);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ skcipher_request_set_callback(
+ req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ page_crypt_complete, &ecr);
+
+ BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+ xts_tweak.index = cpu_to_le64(index);
+ memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
+
+ sg_init_table(&dst, 1);
+ sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
+ sg_init_table(&src, 1);
+ sg_set_page(&src, src_page, PAGE_SIZE, 0);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
+ if (rw == FS_DECRYPT)
+ res = crypto_skcipher_decrypt(req);
+ else
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ BUG_ON(req->base.data != &ecr);
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_skcipher_encrypt() returned %d\n",
+ __func__, res);
+ return res;
+ }
+ return 0;
+}
+
+static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags)
+{
+ ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
+ if (ctx->w.bounce_page == NULL)
+ return ERR_PTR(-ENOMEM);
+ ctx->flags |= FS_WRITE_PATH_FL;
+ return ctx->w.bounce_page;
+}
+
+/**
+ * fscypt_encrypt_page() - Encrypts a page
+ * @inode: The inode for which the encryption should take place
+ * @plaintext_page: The page to encrypt. Must be locked.
+ * @gfp_flags: The gfp flag for memory allocation
+ *
+ * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
+ * encryption context.
+ *
+ * Called on the page write path. The caller must call
+ * fscrypt_restore_control_page() on the returned ciphertext page to
+ * release the bounce buffer and the encryption context.
+ *
+ * Return: An allocated page with the encrypted content on success. Else, an
+ * error value or NULL.
+ */
+struct page *fscrypt_encrypt_page(struct inode *inode,
+ struct page *plaintext_page, gfp_t gfp_flags)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ int err;
+
+ BUG_ON(!PageLocked(plaintext_page));
+
+ ctx = fscrypt_get_ctx(inode, gfp_flags);
+ if (IS_ERR(ctx))
+ return (struct page *)ctx;
+
+ /* The encryption operation will require a bounce page. */
+ ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
+ if (IS_ERR(ciphertext_page))
+ goto errout;
+
+ ctx->w.control_page = plaintext_page;
+ err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index,
+ plaintext_page, ciphertext_page,
+ gfp_flags);
+ if (err) {
+ ciphertext_page = ERR_PTR(err);
+ goto errout;
+ }
+ SetPagePrivate(ciphertext_page);
+ set_page_private(ciphertext_page, (unsigned long)ctx);
+ lock_page(ciphertext_page);
+ return ciphertext_page;
+
+errout:
+ fscrypt_release_ctx(ctx);
+ return ciphertext_page;
+}
+EXPORT_SYMBOL(fscrypt_encrypt_page);
+
+/**
+ * f2crypt_decrypt_page() - Decrypts a page in-place
+ * @page: The page to decrypt. Must be locked.
+ *
+ * Decrypts page in-place using the ctx encryption context.
+ *
+ * Called from the read completion callback.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_decrypt_page(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+
+ return do_page_crypto(page->mapping->host,
+ FS_DECRYPT, page->index, page, page, GFP_NOFS);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_page);
+
+int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
+ sector_t pblk, unsigned int len)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *ciphertext_page = NULL;
+ struct bio *bio;
+ int ret, err = 0;
+
+ BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
+
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
+ if (IS_ERR(ciphertext_page)) {
+ err = PTR_ERR(ciphertext_page);
+ goto errout;
+ }
+
+ while (len--) {
+ err = do_page_crypto(inode, FS_ENCRYPT, lblk,
+ ZERO_PAGE(0), ciphertext_page,
+ GFP_NOFS);
+ if (err)
+ goto errout;
+
+ bio = bio_alloc(GFP_NOWAIT, 1);
+ if (!bio) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ bio->bi_bdev = inode->i_sb->s_bdev;
+ bio->bi_iter.bi_sector =
+ pblk << (inode->i_sb->s_blocksize_bits - 9);
+ ret = bio_add_page(bio, ciphertext_page,
+ inode->i_sb->s_blocksize, 0);
+ if (ret != inode->i_sb->s_blocksize) {
+ /* should never happen! */
+ WARN_ON(1);
+ bio_put(bio);
+ err = -EIO;
+ goto errout;
+ }
+ err = submit_bio_wait(WRITE, bio);
+ if ((err == 0) && bio->bi_error)
+ err = -EIO;
+ bio_put(bio);
+ if (err)
+ goto errout;
+ lblk++;
+ pblk++;
+ }
+ err = 0;
+errout:
+ fscrypt_release_ctx(ctx);
+ return err;
+}
+EXPORT_SYMBOL(fscrypt_zeroout_range);
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct dentry *dir;
+ struct fscrypt_info *ci;
+ int dir_has_key, cached_with_key;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ dir = dget_parent(dentry);
+ if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
+ dput(dir);
+ return 0;
+ }
+
+ ci = d_inode(dir)->i_crypt_info;
+ if (ci && ci->ci_keyring_key &&
+ (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD))))
+ ci = NULL;
+
+ /* this should eventually be an flag in d_flags */
+ spin_lock(&dentry->d_lock);
+ cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+ dir_has_key = (ci != NULL);
+ dput(dir);
+
+ /*
+ * If the dentry was cached without the key, and it is a
+ * negative dentry, it might be a valid name. We can't check
+ * if the key has since been made available due to locking
+ * reasons, so we fail the validation so ext4_lookup() can do
+ * this check.
+ *
+ * We also fail the validation if the dentry was created with
+ * the key present, but we no longer have the key, or vice versa.
+ */
+ if ((!cached_with_key && d_is_negative(dentry)) ||
+ (!cached_with_key && dir_has_key) ||
+ (cached_with_key && !dir_has_key))
+ return 0;
+ return 1;
+}
+
+const struct dentry_operations fscrypt_d_ops = {
+ .d_revalidate = fscrypt_d_revalidate,
+};
+EXPORT_SYMBOL(fscrypt_d_ops);
+
+/*
+ * Call fscrypt_decrypt_page on every single page, reusing the encryption
+ * context.
+ */
+static void completion_pages(struct work_struct *work)
+{
+ struct fscrypt_ctx *ctx =
+ container_of(work, struct fscrypt_ctx, r.work);
+ struct bio *bio = ctx->r.bio;
+ struct bio_vec *bv;
+ int i;
+
+ bio_for_each_segment_all(bv, bio, i) {
+ struct page *page = bv->bv_page;
+ int ret = fscrypt_decrypt_page(page);
+
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ }
+ unlock_page(page);
+ }
+ fscrypt_release_ctx(ctx);
+ bio_put(bio);
+}
+
+void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
+{
+ INIT_WORK(&ctx->r.work, completion_pages);
+ ctx->r.bio = bio;
+ queue_work(fscrypt_read_workqueue, &ctx->r.work);
+}
+EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
+
+void fscrypt_pullback_bio_page(struct page **page, bool restore)
+{
+ struct fscrypt_ctx *ctx;
+ struct page *bounce_page;
+
+ /* The bounce data pages are unmapped. */
+ if ((*page)->mapping)
+ return;
+
+ /* The bounce data page is unmapped. */
+ bounce_page = *page;
+ ctx = (struct fscrypt_ctx *)page_private(bounce_page);
+
+ /* restore control page */
+ *page = ctx->w.control_page;
+
+ if (restore)
+ fscrypt_restore_control_page(bounce_page);
+}
+EXPORT_SYMBOL(fscrypt_pullback_bio_page);
+
+void fscrypt_restore_control_page(struct page *page)
+{
+ struct fscrypt_ctx *ctx;
+
+ ctx = (struct fscrypt_ctx *)page_private(page);
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ unlock_page(page);
+ fscrypt_release_ctx(ctx);
+}
+EXPORT_SYMBOL(fscrypt_restore_control_page);
+
+static void fscrypt_destroy(void)
+{
+ struct fscrypt_ctx *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
+ kmem_cache_free(fscrypt_ctx_cachep, pos);
+ INIT_LIST_HEAD(&fscrypt_free_ctxs);
+ mempool_destroy(fscrypt_bounce_page_pool);
+ fscrypt_bounce_page_pool = NULL;
+}
+
+/**
+ * fscrypt_initialize() - allocate major buffers for fs encryption.
+ *
+ * We only call this when we start accessing encrypted files, since it
+ * results in memory getting allocated that wouldn't otherwise be used.
+ *
+ * Return: Zero on success, non-zero otherwise.
+ */
+int fscrypt_initialize(void)
+{
+ int i, res = -ENOMEM;
+
+ if (fscrypt_bounce_page_pool)
+ return 0;
+
+ mutex_lock(&fscrypt_init_mutex);
+ if (fscrypt_bounce_page_pool)
+ goto already_initialized;
+
+ for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
+ struct fscrypt_ctx *ctx;
+
+ ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
+ if (!ctx)
+ goto fail;
+ list_add(&ctx->free_list, &fscrypt_free_ctxs);
+ }
+
+ fscrypt_bounce_page_pool =
+ mempool_create_page_pool(num_prealloc_crypto_pages, 0);
+ if (!fscrypt_bounce_page_pool)
+ goto fail;
+
+already_initialized:
+ mutex_unlock(&fscrypt_init_mutex);
+ return 0;
+fail:
+ fscrypt_destroy();
+ mutex_unlock(&fscrypt_init_mutex);
+ return res;
+}
+EXPORT_SYMBOL(fscrypt_initialize);
+
+/**
+ * fscrypt_init() - Set up for fs encryption.
+ */
+static int __init fscrypt_init(void)
+{
+ fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
+ WQ_HIGHPRI, 0);
+ if (!fscrypt_read_workqueue)
+ goto fail;
+
+ fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_ctx_cachep)
+ goto fail_free_queue;
+
+ fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
+ if (!fscrypt_info_cachep)
+ goto fail_free_ctx;
+
+ return 0;
+
+fail_free_ctx:
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+fail_free_queue:
+ destroy_workqueue(fscrypt_read_workqueue);
+fail:
+ return -ENOMEM;
+}
+module_init(fscrypt_init)
+
+/**
+ * fscrypt_exit() - Shutdown the fs encryption system
+ */
+static void __exit fscrypt_exit(void)
+{
+ fscrypt_destroy();
+
+ if (fscrypt_read_workqueue)
+ destroy_workqueue(fscrypt_read_workqueue);
+ kmem_cache_destroy(fscrypt_ctx_cachep);
+ kmem_cache_destroy(fscrypt_info_cachep);
+}
+module_exit(fscrypt_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
new file mode 100644
index 000000000000..9b774f4b50c8
--- /dev/null
+++ b/fs/crypto/fname.c
@@ -0,0 +1,414 @@
+/*
+ * This contains functions for filename crypto management
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ *
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ *
+ * This has not yet undergone a rigorous security audit.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/ratelimit.h>
+#include <linux/fscrypto.h>
+
+/**
+ * fname_crypt_complete() - completion callback for filename crypto
+ * @req: The asynchronous cipher request context
+ * @res: The result of the cipher operation
+ */
+static void fname_crypt_complete(struct crypto_async_request *req, int res)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (res == -EINPROGRESS)
+ return;
+ ecr->res = res;
+ complete(&ecr->completion);
+}
+
+/**
+ * fname_encrypt() - encrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_encrypt(struct inode *inode,
+ const struct qstr *iname, struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ struct scatterlist sg;
+ int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ unsigned int lim;
+ unsigned int cryptlen;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /*
+ * Copy the filename to the output buffer for encrypting in-place and
+ * pad it with the needed number of NUL bytes.
+ */
+ cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
+ cryptlen = round_up(cryptlen, padding);
+ cryptlen = min(cryptlen, lim);
+ memcpy(oname->name, iname->name, iname->len);
+ memset(oname->name + iname->len, 0, cryptlen - iname->len);
+
+ /* Initialize the IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Set up the encryption request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: skcipher_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+ sg_init_one(&sg, oname->name, cryptlen);
+ skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
+
+ /* Do the encryption */
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ /* Request is being completed asynchronously; wait for it */
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = cryptlen;
+ return 0;
+}
+
+/**
+ * fname_decrypt() - decrypt a filename
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int fname_decrypt(struct inode *inode,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+ struct crypto_skcipher *tfm = ci->ci_ctfm;
+ int res = 0;
+ char iv[FS_CRYPTO_BLOCK_SIZE];
+ unsigned lim;
+
+ lim = inode->i_sb->s_cop->max_namelen(inode);
+ if (iname->len <= 0 || iname->len > lim)
+ return -EIO;
+
+ /* Allocate request */
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ printk_ratelimited(KERN_ERR
+ "%s: crypto_request_alloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ fname_crypt_complete, &ecr);
+
+ /* Initialize IV */
+ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
+
+ /* Create decryption request */
+ sg_init_one(&src_sg, iname->name, iname->len);
+ sg_init_one(&dst_sg, oname->name, oname->len);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+ res = crypto_skcipher_decrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+ skcipher_request_free(req);
+ if (res < 0) {
+ printk_ratelimited(KERN_ERR
+ "%s: Error (error code %d)\n", __func__, res);
+ return res;
+ }
+
+ oname->len = strnlen(oname->name, iname->len);
+ return 0;
+}
+
+static const char *lookup_table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+/**
+ * digest_encode() -
+ *
+ * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
+ * The encoded string is roughly 4/3 times the size of the input string.
+ */
+static int digest_encode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ char *cp = dst;
+
+ while (i < len) {
+ ac += (((unsigned char) src[i]) << bits);
+ bits += 8;
+ do {
+ *cp++ = lookup_table[ac & 0x3f];
+ ac >>= 6;
+ bits -= 6;
+ } while (bits >= 6);
+ i++;
+ }
+ if (bits)
+ *cp++ = lookup_table[ac & 0x3f];
+ return cp - dst;
+}
+
+static int digest_decode(const char *src, int len, char *dst)
+{
+ int i = 0, bits = 0, ac = 0;
+ const char *p;
+ char *cp = dst;
+
+ while (i < len) {
+ p = strchr(lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+ ac += (p - lookup_table) << bits;
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = ac & 0xff;
+ ac >>= 8;
+ bits -= 8;
+ }
+ i++;
+ }
+ if (ac)
+ return -1;
+ return cp - dst;
+}
+
+u32 fscrypt_fname_encrypted_size(struct inode *inode, u32 ilen)
+{
+ int padding = 32;
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (ci)
+ padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
+ ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
+ return round_up(ilen, padding);
+}
+EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
+
+/**
+ * fscrypt_fname_crypto_alloc_obuff() -
+ *
+ * Allocates an output buffer that is sufficient for the crypto operation
+ * specified by the context and the direction.
+ */
+int fscrypt_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
+
+ crypto_str->len = olen;
+ if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
+ olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
+ /*
+ * Allocated buffer can hold one more character to null-terminate the
+ * string
+ */
+ crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
+ if (!(crypto_str->name))
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
+
+/**
+ * fscrypt_fname_crypto_free_buffer() -
+ *
+ * Frees the buffer allocated for crypto operation.
+ */
+void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
+{
+ if (!crypto_str)
+ return;
+ kfree(crypto_str->name);
+ crypto_str->name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_fname_free_buffer);
+
+/**
+ * fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ const struct qstr qname = FSTR_TO_QSTR(iname);
+ char buf[24];
+
+ if (fscrypt_is_dot_dotdot(&qname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+
+ if (iname->len < FS_CRYPTO_BLOCK_SIZE)
+ return -EUCLEAN;
+
+ if (inode->i_crypt_info)
+ return fname_decrypt(inode, iname, oname);
+
+ if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
+ oname->len = digest_encode(iname->name, iname->len,
+ oname->name);
+ return 0;
+ }
+ if (hash) {
+ memcpy(buf, &hash, 4);
+ memcpy(buf + 4, &minor_hash, 4);
+ } else {
+ memset(buf, 0, 8);
+ }
+ memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ oname->name[0] = '_';
+ oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
+
+/**
+ * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
+ * space
+ *
+ * The caller must have allocated sufficient memory for the @oname string.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int fscrypt_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ if (fscrypt_is_dot_dotdot(iname)) {
+ oname->name[0] = '.';
+ oname->name[iname->len - 1] = '.';
+ oname->len = iname->len;
+ return 0;
+ }
+ if (inode->i_crypt_info)
+ return fname_encrypt(inode, iname, oname);
+ /*
+ * Without a proper key, a user is not allowed to modify the filenames
+ * in a directory. Consequently, a user space name cannot be mapped to
+ * a disk-space name
+ */
+ return -EACCES;
+}
+EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
+
+int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ int ret = 0, bigname = 0;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+
+ if (!dir->i_sb->s_cop->is_encrypted(dir) ||
+ fscrypt_is_dot_dotdot(iname)) {
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+ }
+ ret = get_crypt_info(dir);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ if (dir->i_crypt_info) {
+ ret = fscrypt_fname_alloc_buffer(dir, iname->len,
+ &fname->crypto_buf);
+ if (ret)
+ return ret;
+ ret = fname_encrypt(dir, iname, &fname->crypto_buf);
+ if (ret)
+ goto errout;
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ return 0;
+ }
+ if (!lookup)
+ return -EACCES;
+
+ /*
+ * We don't have the key and we are doing a lookup; decode the
+ * user-supplied name
+ */
+ if (iname->name[0] == '_')
+ bigname = 1;
+ if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
+ return -ENOENT;
+
+ fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
+ if (fname->crypto_buf.name == NULL)
+ return -ENOMEM;
+
+ ret = digest_decode(iname->name + bigname, iname->len - bigname,
+ fname->crypto_buf.name);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto errout;
+ }
+ fname->crypto_buf.len = ret;
+ if (bigname) {
+ memcpy(&fname->hash, fname->crypto_buf.name, 4);
+ memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
+ } else {
+ fname->disk_name.name = fname->crypto_buf.name;
+ fname->disk_name.len = fname->crypto_buf.len;
+ }
+ return 0;
+
+errout:
+ fscrypt_fname_free_buffer(&fname->crypto_buf);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_setup_filename);
+
+void fscrypt_free_filename(struct fscrypt_name *fname)
+{
+ kfree(fname->crypto_buf.name);
+ fname->crypto_buf.name = NULL;
+ fname->usr_fname = NULL;
+ fname->disk_name.name = NULL;
+}
+EXPORT_SYMBOL(fscrypt_free_filename);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
new file mode 100644
index 000000000000..67fb6d8876d0
--- /dev/null
+++ b/fs/crypto/keyinfo.c
@@ -0,0 +1,333 @@
+/*
+ * key management facility for FS encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * This contains encryption key functions.
+ *
+ * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
+ */
+
+#include <keys/user-type.h>
+#include <linux/scatterlist.h>
+#include <linux/fscrypto.h>
+
+static void derive_crypt_complete(struct crypto_async_request *req, int rc)
+{
+ struct fscrypt_completion_result *ecr = req->data;
+
+ if (rc == -EINPROGRESS)
+ return;
+
+ ecr->res = rc;
+ complete(&ecr->completion);
+}
+
+/**
+ * derive_key_aes() - Derive a key using AES-128-ECB
+ * @deriving_key: Encryption key used for derivation.
+ * @source_key: Source key to which to apply derivation.
+ * @derived_key: Derived key.
+ *
+ * Return: Zero on success; non-zero otherwise.
+ */
+static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
+ u8 source_key[FS_AES_256_XTS_KEY_SIZE],
+ u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
+{
+ int res = 0;
+ struct skcipher_request *req = NULL;
+ DECLARE_FS_COMPLETION_RESULT(ecr);
+ struct scatterlist src_sg, dst_sg;
+ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
+
+ if (IS_ERR(tfm)) {
+ res = PTR_ERR(tfm);
+ tfm = NULL;
+ goto out;
+ }
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ req = skcipher_request_alloc(tfm, GFP_NOFS);
+ if (!req) {
+ res = -ENOMEM;
+ goto out;
+ }
+ skcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ derive_crypt_complete, &ecr);
+ res = crypto_skcipher_setkey(tfm, deriving_key,
+ FS_AES_128_ECB_KEY_SIZE);
+ if (res < 0)
+ goto out;
+
+ sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
+ sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
+ skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+ FS_AES_256_XTS_KEY_SIZE, NULL);
+ res = crypto_skcipher_encrypt(req);
+ if (res == -EINPROGRESS || res == -EBUSY) {
+ wait_for_completion(&ecr.completion);
+ res = ecr.res;
+ }
+out:
+ skcipher_request_free(req);
+ crypto_free_skcipher(tfm);
+ return res;
+}
+
+static int validate_user_key(struct fscrypt_info *crypt_info,
+ struct fscrypt_context *ctx, u8 *raw_key,
+ u8 *prefix, int prefix_size)
+{
+ u8 *full_key_descriptor;
+ struct key *keyring_key;
+ struct fscrypt_key *master_key;
+ const struct user_key_payload *ukp;
+ int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1;
+ int res;
+
+ full_key_descriptor = kmalloc(full_key_len, GFP_NOFS);
+ if (!full_key_descriptor)
+ return -ENOMEM;
+
+ memcpy(full_key_descriptor, prefix, prefix_size);
+ sprintf(full_key_descriptor + prefix_size,
+ "%*phN", FS_KEY_DESCRIPTOR_SIZE,
+ ctx->master_key_descriptor);
+ full_key_descriptor[full_key_len - 1] = '\0';
+ keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
+ kfree(full_key_descriptor);
+ if (IS_ERR(keyring_key))
+ return PTR_ERR(keyring_key);
+
+ if (keyring_key->type != &key_type_logon) {
+ printk_once(KERN_WARNING
+ "%s: key type must be logon\n", __func__);
+ res = -ENOKEY;
+ goto out;
+ }
+ down_read(&keyring_key->sem);
+ ukp = user_key_payload(keyring_key);
+ if (ukp->datalen != sizeof(struct fscrypt_key)) {
+ res = -EINVAL;
+ up_read(&keyring_key->sem);
+ goto out;
+ }
+ master_key = (struct fscrypt_key *)ukp->data;
+ BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
+
+ if (master_key->size != FS_AES_256_XTS_KEY_SIZE) {
+ printk_once(KERN_WARNING
+ "%s: key size incorrect: %d\n",
+ __func__, master_key->size);
+ res = -ENOKEY;
+ up_read(&keyring_key->sem);
+ goto out;
+ }
+ res = derive_key_aes(ctx->nonce, master_key->raw, raw_key);
+ up_read(&keyring_key->sem);
+ if (res)
+ goto out;
+
+ crypt_info->ci_keyring_key = keyring_key;
+ return 0;
+out:
+ key_put(keyring_key);
+ return res;
+}
+
+static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
+ const char **cipher_str_ret, int *keysize_ret)
+{
+ if (S_ISREG(inode->i_mode)) {
+ if (ci->ci_data_mode == FS_ENCRYPTION_MODE_AES_256_XTS) {
+ *cipher_str_ret = "xts(aes)";
+ *keysize_ret = FS_AES_256_XTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported contents encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_data_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
+ if (ci->ci_filename_mode == FS_ENCRYPTION_MODE_AES_256_CTS) {
+ *cipher_str_ret = "cts(cbc(aes))";
+ *keysize_ret = FS_AES_256_CTS_KEY_SIZE;
+ return 0;
+ }
+ pr_warn_once("fscrypto: unsupported filenames encryption mode "
+ "%d for inode %lu\n",
+ ci->ci_filename_mode, inode->i_ino);
+ return -ENOKEY;
+ }
+
+ pr_warn_once("fscrypto: unsupported file type %d for inode %lu\n",
+ (inode->i_mode & S_IFMT), inode->i_ino);
+ return -ENOKEY;
+}
+
+static void put_crypt_info(struct fscrypt_info *ci)
+{
+ if (!ci)
+ return;
+
+ key_put(ci->ci_keyring_key);
+ crypto_free_skcipher(ci->ci_ctfm);
+ kmem_cache_free(fscrypt_info_cachep, ci);
+}
+
+int get_crypt_info(struct inode *inode)
+{
+ struct fscrypt_info *crypt_info;
+ struct fscrypt_context ctx;
+ struct crypto_skcipher *ctfm;
+ const char *cipher_str;
+ int keysize;
+ u8 *raw_key = NULL;
+ int res;
+
+ res = fscrypt_initialize();
+ if (res)
+ return res;
+
+ if (!inode->i_sb->s_cop->get_context)
+ return -EOPNOTSUPP;
+retry:
+ crypt_info = ACCESS_ONCE(inode->i_crypt_info);
+ if (crypt_info) {
+ if (!crypt_info->ci_keyring_key ||
+ key_validate(crypt_info->ci_keyring_key) == 0)
+ return 0;
+ fscrypt_put_encryption_info(inode, crypt_info);
+ goto retry;
+ }
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res < 0) {
+ if (!fscrypt_dummy_context_enabled(inode))
+ return res;
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
+ } else if (res != sizeof(ctx)) {
+ return -EINVAL;
+ }
+
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
+ if (!crypt_info)
+ return -ENOMEM;
+
+ crypt_info->ci_flags = ctx.flags;
+ crypt_info->ci_data_mode = ctx.contents_encryption_mode;
+ crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
+ crypt_info->ci_ctfm = NULL;
+ crypt_info->ci_keyring_key = NULL;
+ memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
+ sizeof(crypt_info->ci_master_key));
+
+ res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
+ if (res)
+ goto out;
+
+ /*
+ * This cannot be a stack buffer because it is passed to the scatterlist
+ * crypto API as part of key derivation.
+ */
+ res = -ENOMEM;
+ raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
+ if (!raw_key)
+ goto out;
+
+ if (fscrypt_dummy_context_enabled(inode)) {
+ memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+ goto got_key;
+ }
+
+ res = validate_user_key(crypt_info, &ctx, raw_key,
+ FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE);
+ if (res && inode->i_sb->s_cop->key_prefix) {
+ u8 *prefix = NULL;
+ int prefix_size, res2;
+
+ prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix);
+ res2 = validate_user_key(crypt_info, &ctx, raw_key,
+ prefix, prefix_size);
+ if (res2) {
+ if (res2 == -ENOKEY)
+ res = -ENOKEY;
+ goto out;
+ }
+ } else if (res) {
+ goto out;
+ }
+got_key:
+ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
+ if (!ctfm || IS_ERR(ctfm)) {
+ res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
+ printk(KERN_DEBUG
+ "%s: error %d (inode %u) allocating crypto tfm\n",
+ __func__, res, (unsigned) inode->i_ino);
+ goto out;
+ }
+ crypt_info->ci_ctfm = ctfm;
+ crypto_skcipher_clear_flags(ctfm, ~0);
+ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+ res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
+ if (res)
+ goto out;
+
+ kzfree(raw_key);
+ raw_key = NULL;
+ if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) {
+ put_crypt_info(crypt_info);
+ goto retry;
+ }
+ return 0;
+
+out:
+ if (res == -ENOKEY)
+ res = 0;
+ put_crypt_info(crypt_info);
+ kzfree(raw_key);
+ return res;
+}
+
+void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
+{
+ struct fscrypt_info *prev;
+
+ if (ci == NULL)
+ ci = ACCESS_ONCE(inode->i_crypt_info);
+ if (ci == NULL)
+ return;
+
+ prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
+ if (prev != ci)
+ return;
+
+ put_crypt_info(ci);
+}
+EXPORT_SYMBOL(fscrypt_put_encryption_info);
+
+int fscrypt_get_encryption_info(struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (!ci ||
+ (ci->ci_keyring_key &&
+ (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))))
+ return get_crypt_info(inode);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_encryption_info);
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
new file mode 100644
index 000000000000..6865663aac69
--- /dev/null
+++ b/fs/crypto/policy.c
@@ -0,0 +1,250 @@
+/*
+ * Encryption policy functions for per-file encryption support.
+ *
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/fscrypto.h>
+#include <linux/mount.h>
+
+static int inode_has_encryption_context(struct inode *inode)
+{
+ if (!inode->i_sb->s_cop->get_context)
+ return 0;
+ return (inode->i_sb->s_cop->get_context(inode, NULL, 0L) > 0);
+}
+
+/*
+ * check whether the policy is consistent with the encryption context
+ * for the inode
+ */
+static int is_encryption_context_consistent_with_policy(struct inode *inode,
+ const struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->get_context)
+ return 0;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return 0;
+
+ return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx.flags == policy->flags) &&
+ (ctx.contents_encryption_mode ==
+ policy->contents_encryption_mode) &&
+ (ctx.filenames_encryption_mode ==
+ policy->filenames_encryption_mode));
+}
+
+static int create_encryption_context_from_policy(struct inode *inode,
+ const struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->set_context)
+ return -EOPNOTSUPP;
+
+ if (inode->i_sb->s_cop->prepare_context) {
+ res = inode->i_sb->s_cop->prepare_context(inode);
+ if (res)
+ return res;
+ }
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+
+ if (!fscrypt_valid_contents_enc_mode(
+ policy->contents_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid contents encryption mode %d\n", __func__,
+ policy->contents_encryption_mode);
+ return -EINVAL;
+ }
+
+ if (!fscrypt_valid_filenames_enc_mode(
+ policy->filenames_encryption_mode)) {
+ printk(KERN_WARNING
+ "%s: Invalid filenames encryption mode %d\n", __func__,
+ policy->filenames_encryption_mode);
+ return -EINVAL;
+ }
+
+ if (policy->flags & ~FS_POLICY_FLAGS_VALID)
+ return -EINVAL;
+
+ ctx.contents_encryption_mode = policy->contents_encryption_mode;
+ ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ ctx.flags = policy->flags;
+ BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE);
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+
+ return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL);
+}
+
+int fscrypt_process_policy(struct file *filp,
+ const struct fscrypt_policy *policy)
+{
+ struct inode *inode = file_inode(filp);
+ int ret;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (policy->version != 0)
+ return -EINVAL;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
+ if (!inode_has_encryption_context(inode)) {
+ if (!S_ISDIR(inode->i_mode))
+ ret = -EINVAL;
+ else if (!inode->i_sb->s_cop->empty_dir)
+ ret = -EOPNOTSUPP;
+ else if (!inode->i_sb->s_cop->empty_dir(inode))
+ ret = -ENOTEMPTY;
+ else
+ ret = create_encryption_context_from_policy(inode,
+ policy);
+ } else if (!is_encryption_context_consistent_with_policy(inode,
+ policy)) {
+ printk(KERN_WARNING
+ "%s: Policy inconsistent with encryption context\n",
+ __func__);
+ ret = -EINVAL;
+ }
+
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+ return ret;
+}
+EXPORT_SYMBOL(fscrypt_process_policy);
+
+int fscrypt_get_policy(struct inode *inode, struct fscrypt_policy *policy)
+{
+ struct fscrypt_context ctx;
+ int res;
+
+ if (!inode->i_sb->s_cop->get_context ||
+ !inode->i_sb->s_cop->is_encrypted(inode))
+ return -ENODATA;
+
+ res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
+ if (res != sizeof(ctx))
+ return -ENODATA;
+ if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
+ return -EINVAL;
+
+ policy->version = 0;
+ policy->contents_encryption_mode = ctx.contents_encryption_mode;
+ policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy->flags = ctx.flags;
+ memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
+ FS_KEY_DESCRIPTOR_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(fscrypt_get_policy);
+
+int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
+{
+ struct fscrypt_info *parent_ci, *child_ci;
+ int res;
+
+ if ((parent == NULL) || (child == NULL)) {
+ printk(KERN_ERR "parent %p child %p\n", parent, child);
+ BUG_ON(1);
+ }
+
+ /* no restrictions if the parent directory is not encrypted */
+ if (!parent->i_sb->s_cop->is_encrypted(parent))
+ return 1;
+ /* if the child directory is not encrypted, this is always a problem */
+ if (!parent->i_sb->s_cop->is_encrypted(child))
+ return 0;
+ res = fscrypt_get_encryption_info(parent);
+ if (res)
+ return 0;
+ res = fscrypt_get_encryption_info(child);
+ if (res)
+ return 0;
+ parent_ci = parent->i_crypt_info;
+ child_ci = child->i_crypt_info;
+ if (!parent_ci && !child_ci)
+ return 1;
+ if (!parent_ci || !child_ci)
+ return 0;
+
+ return (memcmp(parent_ci->ci_master_key,
+ child_ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
+ (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
+ (parent_ci->ci_flags == child_ci->ci_flags));
+}
+EXPORT_SYMBOL(fscrypt_has_permitted_context);
+
+/**
+ * fscrypt_inherit_context() - Sets a child context from its parent
+ * @parent: Parent inode from which the context is inherited.
+ * @child: Child inode that inherits the context from @parent.
+ * @fs_data: private data given by FS.
+ * @preload: preload child i_crypt_info
+ *
+ * Return: Zero on success, non-zero otherwise
+ */
+int fscrypt_inherit_context(struct inode *parent, struct inode *child,
+ void *fs_data, bool preload)
+{
+ struct fscrypt_context ctx;
+ struct fscrypt_info *ci;
+ int res;
+
+ if (!parent->i_sb->s_cop->set_context)
+ return -EOPNOTSUPP;
+
+ res = fscrypt_get_encryption_info(parent);
+ if (res < 0)
+ return res;
+
+ ci = parent->i_crypt_info;
+ if (ci == NULL)
+ return -ENOKEY;
+
+ ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
+ if (fscrypt_dummy_context_enabled(parent)) {
+ ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
+ memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
+ res = 0;
+ } else {
+ ctx.contents_encryption_mode = ci->ci_data_mode;
+ ctx.filenames_encryption_mode = ci->ci_filename_mode;
+ ctx.flags = ci->ci_flags;
+ memcpy(ctx.master_key_descriptor, ci->ci_master_key,
+ FS_KEY_DESCRIPTOR_SIZE);
+ }
+ get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
+ res = parent->i_sb->s_cop->set_context(child, &ctx,
+ sizeof(ctx), fs_data);
+ if (res)
+ return res;
+ return preload ? fscrypt_get_encryption_info(child): 0;
+}
+EXPORT_SYMBOL(fscrypt_inherit_context);
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 173b3873a4f4..e40c440a4555 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
error = misc_register(&ls->ls_device);
if (error) {
kfree(ls->ls_device.name);
+ /* this has to be set to NULL
+ * to avoid a double-free in dlm_device_deregister
+ */
+ ls->ls_device.name = NULL;
}
fail:
return error;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3ab9c68b8bce..066df649a6b0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -519,8 +519,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
wait_queue_head_t *whead;
rcu_read_lock();
- /* If it is cleared by POLLFREE, it should be rcu-safe */
- whead = rcu_dereference(pwq->whead);
+ /*
+ * If it is cleared by POLLFREE, it should be rcu-safe.
+ * If we read NULL we need a barrier paired with
+ * smp_store_release() in ep_poll_callback(), otherwise
+ * we rely on whead->lock.
+ */
+ whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
@@ -1004,17 +1009,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
- if ((unsigned long)key & POLLFREE) {
- ep_pwq_from_wait(wait)->whead = NULL;
- /*
- * whead = NULL above can race with ep_remove_wait_queue()
- * which can do another remove_wait_queue() after us, so we
- * can't use __remove_wait_queue(). whead->lock is held by
- * the caller.
- */
- list_del_init(&wait->task_list);
- }
-
spin_lock_irqsave(&ep->lock, flags);
/*
@@ -1079,6 +1073,23 @@ out_unlock:
if (pwake)
ep_poll_safewake(&ep->poll_wait);
+
+ if ((unsigned long)key & POLLFREE) {
+ /*
+ * If we race with ep_remove_wait_queue() it can miss
+ * ->whead = NULL and do another remove_wait_queue() after
+ * us, so we can't use __remove_wait_queue().
+ */
+ list_del_init(&wait->task_list);
+ /*
+ * ->whead != NULL protects us from the race with ep_free()
+ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+ * held by the caller. Once we nullify it, nothing protects
+ * ep/epi or even wait.
+ */
+ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+ }
+
return 1;
}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index b9f838af5a72..f5099a3386ec 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -389,14 +389,12 @@ int ext4_decrypt(struct page *page)
page->index, page, page, GFP_NOFS);
}
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex)
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len)
{
struct ext4_crypto_ctx *ctx;
struct page *ciphertext_page = NULL;
struct bio *bio;
- ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
- ext4_fsblk_t pblk = ext4_ext_pblock(ex);
- unsigned int len = ext4_ext_get_actual_len(ex);
int ret, err = 0;
#if 0
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6c910f127f1f..abc9e169cb44 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2271,7 +2271,8 @@ struct page *ext4_encrypt(struct inode *inode,
struct page *plaintext_page,
gfp_t gfp_flags);
int ext4_decrypt(struct page *page);
-int ext4_encrypted_zeroout(struct inode *inode, struct ext4_extent *ex);
+int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
extern const struct dentry_operations ext4_encrypted_d_ops;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -2539,6 +2540,8 @@ extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern void ext4_da_update_reserve_space(struct inode *inode,
int used, int quota_claim);
+extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
+ ext4_fsblk_t pblk, ext4_lblk_t len);
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 61d5bfc7318c..fc496c646d12 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3127,19 +3127,11 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
ext4_fsblk_t ee_pblock;
unsigned int ee_len;
- int ret;
ee_len = ext4_ext_get_actual_len(ex);
ee_pblock = ext4_ext_pblock(ex);
-
- if (ext4_encrypted_inode(inode))
- return ext4_encrypted_zeroout(inode, ex);
-
- ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
- if (ret > 0)
- ret = 0;
-
- return ret;
+ return ext4_issue_zeroout(inode, le32_to_cpu(ex->ee_block), ee_pblock,
+ ee_len);
}
/*
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 46d30bc18c0d..c1d7b3ab8b43 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -390,6 +390,21 @@ static int __check_block_validity(struct inode *inode, const char *func,
return 0;
}
+int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ ext4_lblk_t len)
+{
+ int ret;
+
+ if (ext4_encrypted_inode(inode))
+ return ext4_encrypted_zeroout(inode, lblk, pblk, len);
+
+ ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
#define check_block_validity(inode, map) \
__check_block_validity((inode), __func__, __LINE__, (map))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index bd8831bfbafe..83a72da67df7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2204,6 +2204,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
+ int quota_update = 0;
int i;
#endif
if (!es->s_last_orphan) {
@@ -2242,14 +2243,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
+ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ int ret = ext4_enable_quotas(sb);
+
+ if (!ret)
+ quota_update = 1;
+ else
+ ext4_msg(sb, KERN_ERR,
+ "Cannot turn on quotas: error %d", ret);
+ }
+
+ /* Turn on journaled quotas used for old sytle */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
- if (ret < 0)
+
+ if (!ret)
+ quota_update = 1;
+ else
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
- "quota: error %d", ret);
+ "quota: type %d: error %d", i, ret);
}
}
#endif
@@ -2308,10 +2327,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
- /* Turn quotas off */
- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
- if (sb_dqopt(sb)->files[i])
- dquot_quota_off(sb, i);
+ /* Turn off quotas if they were enabled for orphan cleanup */
+ if (quota_update) {
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (sb_dqopt(sb)->files[i])
+ dquot_quota_off(sb, i);
+ }
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -5123,6 +5144,9 @@ static int ext4_enable_quotas(struct super_block *sb)
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED);
if (err) {
+ for (type--; type >= 0; type--)
+ dquot_quota_off(sb, type);
+
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index b0a9dc929f88..1852d99df97b 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -1,6 +1,9 @@
config F2FS_FS
tristate "F2FS filesystem support"
depends on BLOCK
+ select CRYPTO
+ select KEYS
+ select CRYPTO_CRC32
help
F2FS is based on Log-structured File System (LFS), which supports
versatile "flash-friendly" features. The design has been focused on
@@ -76,15 +79,7 @@ config F2FS_FS_ENCRYPTION
bool "F2FS Encryption"
depends on F2FS_FS
depends on F2FS_FS_XATTR
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_XTS
- select CRYPTO_CTS
- select CRYPTO_CTR
- select CRYPTO_SHA256
- select KEYS
- select ENCRYPTED_KEYS
+ select FS_ENCRYPTION
help
Enable encryption of f2fs files and directories. This
feature is similar to ecryptfs, but it is more memory
@@ -100,3 +95,11 @@ config F2FS_IO_TRACE
information and block IO patterns in the filesystem level.
If unsure, say N.
+
+config F2FS_FAULT_INJECTION
+ bool "F2FS fault injection facility"
+ depends on F2FS_FS
+ help
+ Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on.
+
+ If unsure, say N.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 08e101ed914c..ca949ea7c02f 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -7,5 +7,3 @@ f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
-f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
- crypto_key.o crypto_fname.o
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 83dcf7bfd7b8..a45d1f4b7b0f 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -109,14 +109,16 @@ fail:
return ERR_PTR(-EINVAL);
}
-static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
+ const struct posix_acl *acl, size_t *size)
{
struct f2fs_acl_header *f2fs_acl;
struct f2fs_acl_entry *entry;
int i;
- f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count *
- sizeof(struct f2fs_acl_entry), GFP_NOFS);
+ f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
+ acl->a_count * sizeof(struct f2fs_acl_entry),
+ GFP_NOFS);
if (!f2fs_acl)
return ERR_PTR(-ENOMEM);
@@ -175,7 +177,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage);
if (retval > 0) {
- value = kmalloc(retval, GFP_F2FS_ZERO);
+ value = f2fs_kmalloc(F2FS_I_SB(inode), retval, GFP_F2FS_ZERO);
if (!value)
return ERR_PTR(-ENOMEM);
retval = f2fs_getxattr(inode, name_index, "", value,
@@ -204,7 +206,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -213,11 +214,11 @@ static int __f2fs_set_acl(struct inode *inode, int type,
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl && !ipage) {
+ if (acl) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
- set_acl_inode(fi, inode->i_mode);
+ set_acl_inode(inode, inode->i_mode);
}
break;
@@ -232,9 +233,9 @@ static int __f2fs_set_acl(struct inode *inode, int type,
}
if (acl) {
- value = f2fs_acl_to_disk(acl, &size);
+ value = f2fs_acl_to_disk(F2FS_I_SB(inode), acl, &size);
if (IS_ERR(value)) {
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);
}
}
@@ -245,7 +246,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (!error)
set_cached_acl(inode, type, acl);
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return error;
}
@@ -386,6 +387,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error)
return error;
+ f2fs_mark_inode_dirty_sync(inode, true);
+
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 997ca8edb6cb..2c685185c24d 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -37,11 +37,10 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
-extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
-#define f2fs_check_acl NULL
#define f2fs_get_acl NULL
#define f2fs_set_acl NULL
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f661d80474be..640f28576e88 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -26,6 +26,14 @@
static struct kmem_cache *ino_entry_slab;
struct kmem_cache *inode_entry_slab;
+void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
+{
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ sbi->sb->s_flags |= MS_RDONLY;
+ if (!end_io)
+ f2fs_flush_merged_bios(sbi);
+}
+
/*
* We guarantee no failure on the returned page.
*/
@@ -34,13 +42,14 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
struct address_space *mapping = META_MAPPING(sbi);
struct page *page = NULL;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
}
- f2fs_wait_on_page_writeback(page, META);
- SetPageUptodate(page);
+ f2fs_wait_on_page_writeback(page, META, true);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
return page;
}
@@ -56,14 +65,15 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.sbi = sbi,
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = index,
+ .old_blkaddr = index,
+ .new_blkaddr = index,
.encrypted_page = NULL,
};
if (unlikely(!is_meta))
fio.rw &= ~REQ_META;
repeat:
- page = grab_cache_page(mapping, index);
+ page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
cond_resched();
goto repeat;
@@ -90,7 +100,7 @@ repeat:
* meta page.
*/
if (unlikely(!PageUptodate(page)))
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
out:
return page;
}
@@ -143,7 +153,6 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
{
- block_t prev_blk_addr = 0;
struct page *page;
block_t blkno = start;
struct f2fs_io_info fio = {
@@ -152,10 +161,12 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
.encrypted_page = NULL,
};
+ struct blk_plug plug;
if (unlikely(type == META_POR))
fio.rw &= ~REQ_META;
+ blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
if (!is_valid_blkaddr(sbi, blkno, type))
@@ -167,27 +178,25 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
blkno = 0;
/* get nat block addr */
- fio.blk_addr = current_nat_addr(sbi,
+ fio.new_blkaddr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
- fio.blk_addr = current_sit_addr(sbi,
+ fio.new_blkaddr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
- if (blkno != start && prev_blk_addr + 1 != fio.blk_addr)
- goto out;
- prev_blk_addr = fio.blk_addr;
break;
case META_SSA:
case META_CP:
case META_POR:
- fio.blk_addr = blkno;
+ fio.new_blkaddr = blkno;
break;
default:
BUG();
}
- page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr);
+ page = f2fs_grab_cache_page(META_MAPPING(sbi),
+ fio.new_blkaddr, false);
if (!page)
continue;
if (PageUptodate(page)) {
@@ -196,11 +205,13 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
}
fio.page = page;
+ fio.old_blkaddr = fio.new_blkaddr;
f2fs_submit_page_mbio(&fio);
f2fs_put_page(page, 0);
}
out:
f2fs_submit_merged_bio(sbi, META, READ);
+ blk_finish_plug(&plug);
return blkno - start;
}
@@ -210,12 +221,12 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
bool readahead = false;
page = find_get_page(META_MAPPING(sbi), index);
- if (!page || (page && !PageUptodate(page)))
+ if (!page || !PageUptodate(page))
readahead = true;
f2fs_put_page(page, 0);
if (readahead)
- ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
+ ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}
static int f2fs_write_meta_page(struct page *page,
@@ -232,13 +243,17 @@ static int f2fs_write_meta_page(struct page *page,
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, META);
write_meta_page(sbi, page);
dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
if (wbc->for_reclaim)
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
+
+ unlock_page(page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, META, WRITE);
+
return 0;
redirty_out:
@@ -252,13 +267,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
long diff, written;
- trace_f2fs_writepages(mapping->host, wbc, META);
-
/* collect a number of dirty meta pages and write together */
if (wbc->for_kupdate ||
get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
goto skip_write;
+ trace_f2fs_writepages(mapping->host, wbc, META);
+
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
@@ -269,6 +284,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+ trace_f2fs_writepages(mapping->host, wbc, META);
return 0;
}
@@ -276,15 +292,18 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
long nr_to_write)
{
struct address_space *mapping = META_MAPPING(sbi);
- pgoff_t index = 0, end = LONG_MAX, prev = LONG_MAX;
+ pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
struct pagevec pvec;
long nwritten = 0;
struct writeback_control wbc = {
.for_reclaim = 0,
};
+ struct blk_plug plug;
pagevec_init(&pvec, 0);
+ blk_start_plug(&plug);
+
while (index <= end) {
int i, nr_pages;
nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
@@ -296,7 +315,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
- if (prev == LONG_MAX)
+ if (prev == ULONG_MAX)
prev = page->index - 1;
if (nr_to_write != LONG_MAX && page->index != prev + 1) {
pagevec_release(&pvec);
@@ -315,6 +334,9 @@ continue_unlock:
goto continue_unlock;
}
+ f2fs_wait_on_page_writeback(page, META, true);
+
+ BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
@@ -334,6 +356,8 @@ stop:
if (nwritten)
f2fs_submit_merged_bio(sbi, type, WRITE);
+ blk_finish_plug(&plug);
+
return nwritten;
}
@@ -341,9 +365,10 @@ static int f2fs_set_meta_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, META);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -358,6 +383,9 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -410,13 +438,13 @@ static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
spin_unlock(&im->ino_lock);
}
-void add_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* add new dirty ino entry into list */
__add_ino_entry(sbi, ino, type);
}
-void remove_dirty_inode(struct f2fs_sb_info *sbi, nid_t ino, int type)
+void remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
/* remove dirty ino entry from list */
__remove_ino_entry(sbi, ino, type);
@@ -434,12 +462,12 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
return e ? true : false;
}
-void release_dirty_inode(struct f2fs_sb_info *sbi)
+void release_ino_entry(struct f2fs_sb_info *sbi, bool all)
{
struct ino_entry *e, *tmp;
int i;
- for (i = APPEND_INO; i <= UPDATE_INO; i++) {
+ for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) {
struct inode_management *im = &sbi->im[i];
spin_lock(&im->ino_lock);
@@ -459,6 +487,13 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
int err = 0;
spin_lock(&im->ino_lock);
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ORPHAN)) {
+ spin_unlock(&im->ino_lock);
+ return -ENOSPC;
+ }
+#endif
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
@@ -478,10 +513,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
spin_unlock(&im->ino_lock);
}
-void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
- __add_ino_entry(sbi, ino, ORPHAN_INO);
+ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+ update_inode_page(inode);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
@@ -493,8 +529,20 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
+ struct node_info ni;
+ int err = acquire_orphan_inode(sbi);
+
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return err;
+ }
- inode = f2fs_iget(sbi->sb, ino);
+ __add_ino_entry(sbi, ino, ORPHAN_INO);
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
/*
* there should be a bug that we can't find the entry
@@ -508,6 +556,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
+
+ get_node_info(sbi, ino, &ni);
+
+ /* ENOMEM was fully retried in f2fs_evict_inode. */
+ if (ni.blk_addr != NULL_ADDR) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: orphan failed (ino=%x), run fsck to fix.",
+ __func__, ino);
+ return -EIO;
+ }
+ __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
}
@@ -516,7 +576,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
block_t start_blk, orphan_blocks, i, j;
int err;
- if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
+ if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
@@ -540,7 +600,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
f2fs_put_page(page, 1);
}
/* clear Orphan Flag */
- clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
+ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
return 0;
}
@@ -601,45 +661,55 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
- block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ unsigned long long *version)
{
- struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
- struct f2fs_checkpoint *cp_block;
- unsigned long long cur_version = 0, pre_version = 0;
- size_t crc_offset;
+ size_t crc_offset = 0;
__u32 crc = 0;
- /* Read the 1st cp block in this CP pack */
- cp_page_1 = get_meta_page(sbi, cp_addr);
+ *cp_page = get_meta_page(sbi, cp_addr);
+ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
- /* get the version number */
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp1;
+ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+ if (crc_offset >= blk_size) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid crc_offset: %zu", crc_offset);
+ return -EINVAL;
+ }
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
- goto invalid_cp1;
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+ + crc_offset)));
+ if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+ return -EINVAL;
+ }
- pre_version = cur_cp_version(cp_block);
+ *version = cur_cp_version(*cp_block);
+ return 0;
+}
- /* Read the 2nd cp block in this CP pack */
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
- cp_page_2 = get_meta_page(sbi, cp_addr);
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct f2fs_checkpoint *cp_block = NULL;
+ unsigned long long cur_version = 0, pre_version = 0;
+ int err;
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp2;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_1, version);
+ if (err)
+ goto invalid_cp1;
+ pre_version = *version;
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_2, version);
+ if (err)
goto invalid_cp2;
-
- cur_version = cur_cp_version(cp_block);
+ cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
@@ -696,6 +766,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
+ /* Sanity checking of checkpoint */
+ if (sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
+ if (cur_page == cp1)
+ sbi->cur_cp_pack = 1;
+ else
+ sbi->cur_cp_pack = 2;
+
if (cp_blks <= 1)
goto done;
@@ -717,123 +796,102 @@ done:
f2fs_put_page(cp2, 1);
return 0;
+free_fail_no_cp:
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
fail_no_cp:
kfree(sbi->ckpt);
return -EINVAL;
}
-static int __add_dirty_inode(struct inode *inode, struct inode_entry *new)
+static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
- if (is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR))
- return -EEXIST;
+ if (is_inode_flag_set(inode, flag))
+ return;
- set_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- F2FS_I(inode)->dirty_dir = new;
- list_add_tail(&new->list, &sbi->dir_inode_list);
- stat_inc_dirty_dir(sbi);
- return 0;
+ set_inode_flag(inode, flag);
+ list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
+ stat_inc_dirty_inode(sbi, type);
+}
+
+static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
+{
+ int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
+
+ if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
+ return;
+
+ list_del_init(&F2FS_I(inode)->dirty_list);
+ clear_inode_flag(inode, flag);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
void update_dirty_page(struct inode *inode, struct page *page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new;
- int ret = 0;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
!S_ISLNK(inode->i_mode))
return;
- if (!S_ISDIR(inode->i_mode)) {
- inode_inc_dirty_pages(inode);
- goto out;
- }
-
- new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
+ spin_lock(&sbi->inode_lock[type]);
+ if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
+ __add_dirty_inode(inode, type);
inode_inc_dirty_pages(inode);
- spin_unlock(&sbi->dir_inode_lock);
+ spin_unlock(&sbi->inode_lock[type]);
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-out:
SetPagePrivate(page);
f2fs_trace_pid(page);
}
-void add_dirty_dir_inode(struct inode *inode)
+void remove_dirty_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new =
- f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- int ret = 0;
+ enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
- spin_unlock(&sbi->dir_inode_lock);
-
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-}
-
-void remove_dirty_dir_inode(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *entry;
-
- if (!S_ISDIR(inode->i_mode))
+ if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+ !S_ISLNK(inode->i_mode))
return;
- spin_lock(&sbi->dir_inode_lock);
- if (get_dirty_pages(inode) ||
- !is_inode_flag_set(F2FS_I(inode), FI_DIRTY_DIR)) {
- spin_unlock(&sbi->dir_inode_lock);
+ if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
return;
- }
- entry = F2FS_I(inode)->dirty_dir;
- list_del(&entry->list);
- F2FS_I(inode)->dirty_dir = NULL;
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_DIR);
- stat_dec_dirty_dir(sbi);
- spin_unlock(&sbi->dir_inode_lock);
- kmem_cache_free(inode_entry_slab, entry);
-
- /* Only from the recovery routine */
- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
- iput(inode);
- }
+ spin_lock(&sbi->inode_lock[type]);
+ __remove_dirty_inode(inode, type);
+ spin_unlock(&sbi->inode_lock[type]);
}
-void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
+int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
{
struct list_head *head;
- struct inode_entry *entry;
struct inode *inode;
+ struct f2fs_inode_info *fi;
+ bool is_dir = (type == DIR_INODE);
+
+ trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
retry:
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- spin_lock(&sbi->dir_inode_lock);
+ spin_lock(&sbi->inode_lock[type]);
- head = &sbi->dir_inode_list;
+ head = &sbi->inode_list[type];
if (list_empty(head)) {
- spin_unlock(&sbi->dir_inode_lock);
- return;
+ spin_unlock(&sbi->inode_lock[type]);
+ trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
+ get_pages(sbi, is_dir ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
+ return 0;
}
- entry = list_entry(head->next, struct inode_entry, list);
- inode = igrab(entry->inode);
- spin_unlock(&sbi->dir_inode_lock);
+ fi = list_entry(head->next, struct f2fs_inode_info, dirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[type]);
if (inode) {
filemap_fdatawrite(inode->i_mapping);
iput(inode);
@@ -848,6 +906,38 @@ retry:
goto retry;
}
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->inode_list[DIRTY_META];
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
+
+ while (total--) {
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return 0;
+ }
+ fi = list_entry(head->next, struct f2fs_inode_info,
+ gdirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ if (inode) {
+ sync_inode_metadata(inode, 0);
+
+ /* it's on eviction */
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE))
+ update_inode_page(inode);
+ iput(inode);
+ }
+ };
+ return 0;
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -868,11 +958,17 @@ retry_flush_dents:
/* write all the dirty dentry pages */
if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
f2fs_unlock_all(sbi);
- sync_dirty_dir_inodes(sbi);
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
+ err = sync_dirty_inodes(sbi, DIR_INODE);
+ if (err)
+ goto out;
+ goto retry_flush_dents;
+ }
+
+ if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+ f2fs_unlock_all(sbi);
+ err = f2fs_sync_inode_meta(sbi);
+ if (err)
goto out;
- }
goto retry_flush_dents;
}
@@ -885,10 +981,9 @@ retry_flush_nodes:
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
up_write(&sbi->node_write);
- sync_node_pages(sbi, 0, &wbc);
- if (unlikely(f2fs_cp_error(sbi))) {
+ err = sync_node_pages(sbi, &wbc);
+ if (err) {
f2fs_unlock_all(sbi);
- err = -EIO;
goto out;
}
goto retry_flush_nodes;
@@ -901,6 +996,8 @@ out:
static void unblock_operations(struct f2fs_sb_info *sbi)
{
up_write(&sbi->node_write);
+
+ build_free_nids(sbi, false);
f2fs_unlock_all(sbi);
}
@@ -911,18 +1008,48 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
- if (!get_pages(sbi, F2FS_WRITEBACK))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
- io_schedule();
+ io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
}
-static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+{
+ unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+
+ spin_lock(&sbi->cp_lock);
+
+ if (cpc->reason == CP_UMOUNT)
+ __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
+
+ if (cpc->reason == CP_FASTBOOT)
+ __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
+
+ if (orphan_num)
+ __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+ else
+ __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
+
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+
+ /* set this flag to activate crc|cp_ver for recovery */
+ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
+ spin_unlock(&sbi->cp_lock);
+}
+
+static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
@@ -931,21 +1058,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
- bool invalidate = false;
-
- /*
- * This avoids to conduct wrong roll-forward operations and uses
- * metapages, so should be called prior to sync_meta_pages below.
- */
- if (discard_next_dnode(sbi, discard_blk))
- invalidate = true;
+ struct super_block *sb = sbi->sb;
+ struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ u64 kbytes_written;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
}
next_free_nid(sbi, &last_nid);
@@ -980,10 +1101,12 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* 2 cp + n data seg summary + orphan inode blocks */
data_sum_blocks = npages_for_summary_flush(sbi, false);
+ spin_lock(&sbi->cp_lock);
if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
- set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
else
- clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
+ spin_unlock(&sbi->cp_lock);
orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
@@ -998,39 +1121,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
cp_payload_blks + data_sum_blocks +
orphan_blocks);
- if (cpc->reason == CP_UMOUNT)
- set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
-
- if (cpc->reason == CP_FASTBOOT)
- set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
-
- if (orphan_num)
- set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
- else
- clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
-
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ /* update ckpt flag for checkpoint */
+ update_ckpt_flags(sbi, cpc);
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
- crc32 = f2fs_crc32(ckpt, le32_to_cpu(ckpt->checksum_offset));
+ crc32 = f2fs_crc32(sbi, ckpt, le32_to_cpu(ckpt->checksum_offset));
*((__le32 *)((unsigned char *)ckpt +
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
- start_blk = __start_cp_addr(sbi);
+ start_blk = __start_cp_next_addr(sbi);
/* need to wait for end_io results */
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
/* write out checkpoint buffer at block 0 */
update_meta_page(sbi, ckpt, start_blk++);
@@ -1046,6 +1154,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
write_data_summaries(sbi, start_blk);
start_blk += data_sum_blocks;
+
+ /* Record write statistics in the hot node summary */
+ kbytes_written = sbi->kbytes_written;
+ if (sb->s_bdev->bd_part)
+ kbytes_written += BD_PART_WRITTEN(sbi);
+
+ seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
+
if (__remain_node_summaries(cpc->reason)) {
write_node_summaries(sbi, start_blk);
start_blk += NR_CURSEG_NODE_TYPE;
@@ -1058,14 +1174,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX);
- filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX);
+ filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX);
+ filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX);
/* update user_block_counts */
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
+ percpu_counter_set(&sbi->alloc_valid_block_count, 0);
/* Here, we only have one bio having CP pack */
sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
@@ -1073,30 +1189,36 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
- /*
- * invalidate meta page which is used temporarily for zeroing out
- * block at the end of warm node chain.
- */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
- discard_blk);
-
- release_dirty_inode(sbi);
+ release_ino_entry(sbi, false);
if (unlikely(f2fs_cp_error(sbi)))
- return;
+ return -EIO;
- clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
+ clear_sbi_flag(sbi, SBI_NEED_CP);
+ __set_cp_next_pack(sbi);
+
+ /*
+ * redirty superblock if metadata like node page or inode cache is
+ * updated during writing checkpoint.
+ */
+ if (get_pages(sbi, F2FS_DIRTY_NODES) ||
+ get_pages(sbi, F2FS_DIRTY_IMETA))
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+
+ f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
+
+ return 0;
}
/*
* We guarantee that this checkpoint procedure will not fail.
*/
-void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
+int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned long long ckpt_ver;
+ int err = 0;
mutex_lock(&sbi->cp_mutex);
@@ -1104,21 +1226,35 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
goto out;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ err = -EIO;
goto out;
- if (f2fs_readonly(sbi->sb))
+ }
+ if (f2fs_readonly(sbi->sb)) {
+ err = -EROFS;
goto out;
+ }
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");
- if (block_operations(sbi))
+ err = block_operations(sbi);
+ if (err)
goto out;
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops");
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- f2fs_submit_merged_bio(sbi, NODE, WRITE);
- f2fs_submit_merged_bio(sbi, META, WRITE);
+ f2fs_flush_merged_bios(sbi);
+
+ /* this is the case of multiple fstrims without any changes */
+ if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) {
+ f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt);
+ f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries);
+ f2fs_bug_on(sbi, prefree_segments(sbi));
+ flush_sit_entries(sbi, cpc);
+ clear_prefree_segments(sbi, cpc);
+ unblock_operations(sbi);
+ goto out;
+ }
/*
* update checkpoint pack index
@@ -1133,7 +1269,11 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
- do_checkpoint(sbi, cpc);
+ err = do_checkpoint(sbi, cpc);
+ if (err)
+ release_discard_addrs(sbi);
+ else
+ clear_prefree_segments(sbi, cpc);
unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info);
@@ -1143,10 +1283,11 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
"checkpoint: version = %llx", ckpt_ver);
/* do checkpoint periodically */
- sbi->cp_expires = round_jiffies_up(jiffies + HZ * sbi->cp_interval);
+ f2fs_update_time(sbi, CP_TIME);
+ trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
mutex_unlock(&sbi->cp_mutex);
- trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+ return err;
}
void init_ino_entry_info(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/crypto.c b/fs/f2fs/crypto.c
deleted file mode 100644
index 4a62ef14e932..000000000000
--- a/fs/f2fs/crypto.c
+++ /dev/null
@@ -1,491 +0,0 @@
-/*
- * linux/fs/f2fs/crypto.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains encryption functions for f2fs
- *
- * Written by Michael Halcrow, 2014.
- *
- * Filename encryption additions
- * Uday Savagaonkar, 2014
- * Encryption policy handling additions
- * Ildar Muslukhov, 2014
- * Remove ext4_encrypted_zeroout(),
- * add f2fs_restore_and_release_control_page()
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- *
- * The usage of AES-XTS should conform to recommendations in NIST
- * Special Publication 800-38E and IEEE P1619/D16.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/user-type.h>
-#include <keys/encrypted-type.h>
-#include <linux/crypto.h>
-#include <linux/ecryptfs.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-#include <linux/bio.h>
-
-#include "f2fs.h"
-#include "xattr.h"
-
-/* Encryption added and removed here! (L: */
-
-static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
-
-module_param(num_prealloc_crypto_pages, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_pages,
- "Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
-
-static mempool_t *f2fs_bounce_page_pool;
-
-static LIST_HEAD(f2fs_free_crypto_ctxs);
-static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);
-
-static struct workqueue_struct *f2fs_read_workqueue;
-static DEFINE_MUTEX(crypto_init);
-
-static struct kmem_cache *f2fs_crypto_ctx_cachep;
-struct kmem_cache *f2fs_crypt_info_cachep;
-
-/**
- * f2fs_release_crypto_ctx() - Releases an encryption context
- * @ctx: The encryption context to release.
- *
- * If the encryption context was allocated from the pre-allocated pool, returns
- * it to that pool. Else, frees it.
- *
- * If there's a bounce page in the context, this frees that.
- */
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
- mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
- ctx->w.bounce_page = NULL;
- }
- ctx->w.control_page = NULL;
- if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- }
-}
-
-/**
- * f2fs_get_crypto_ctx() - Gets an encryption context
- * @inode: The inode for which we are doing the crypto
- *
- * Allocates and initializes an encryption context.
- *
- * Return: An allocated and initialized encryption context on success; error
- * value or NULL otherwise.
- */
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
-{
- struct f2fs_crypto_ctx *ctx = NULL;
- unsigned long flags;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci == NULL)
- return ERR_PTR(-ENOKEY);
-
- /*
- * We first try getting the ctx from a free list because in
- * the common case the ctx will have an allocated and
- * initialized crypto tfm, so it's probably a worthwhile
- * optimization. For the bounce page, we first try getting it
- * from the kernel allocator because that's just about as fast
- * as getting it from a list and because a cache of free pages
- * should generally be a "last resort" option for a filesystem
- * to be able to do its job.
- */
- spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
- ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
- struct f2fs_crypto_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->flags &= ~F2FS_WRITE_PATH_FL;
- return ctx;
-}
-
-/*
- * Call f2fs_decrypt on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
-{
- struct f2fs_crypto_ctx *ctx =
- container_of(work, struct f2fs_crypto_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
- int ret = f2fs_decrypt(ctx, page);
-
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else
- SetPageUptodate(page);
- unlock_page(page);
- }
- f2fs_release_crypto_ctx(ctx);
- bio_put(bio);
-}
-
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
-{
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(f2fs_read_workqueue, &ctx->r.work);
-}
-
-static void f2fs_crypto_destroy(void)
-{
- struct f2fs_crypto_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
- kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
- INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
- if (f2fs_bounce_page_pool)
- mempool_destroy(f2fs_bounce_page_pool);
- f2fs_bounce_page_pool = NULL;
-}
-
-/**
- * f2fs_crypto_initialize() - Set up for f2fs encryption.
- *
- * We only call this when we start accessing encrypted files, since it
- * results in memory getting allocated that wouldn't otherwise be used.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_crypto_initialize(void)
-{
- int i, res = -ENOMEM;
-
- if (f2fs_bounce_page_pool)
- return 0;
-
- mutex_lock(&crypto_init);
- if (f2fs_bounce_page_pool)
- goto already_initialized;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct f2fs_crypto_ctx *ctx;
-
- ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
- if (!ctx)
- goto fail;
- list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
- }
-
- /* must be allocated at the last step to avoid race condition above */
- f2fs_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!f2fs_bounce_page_pool)
- goto fail;
-
-already_initialized:
- mutex_unlock(&crypto_init);
- return 0;
-fail:
- f2fs_crypto_destroy();
- mutex_unlock(&crypto_init);
- return res;
-}
-
-/**
- * f2fs_exit_crypto() - Shutdown the f2fs encryption system
- */
-void f2fs_exit_crypto(void)
-{
- f2fs_crypto_destroy();
-
- if (f2fs_read_workqueue)
- destroy_workqueue(f2fs_read_workqueue);
- if (f2fs_crypto_ctx_cachep)
- kmem_cache_destroy(f2fs_crypto_ctx_cachep);
- if (f2fs_crypt_info_cachep)
- kmem_cache_destroy(f2fs_crypt_info_cachep);
-}
-
-int __init f2fs_init_crypto(void)
-{
- int res = -ENOMEM;
-
- f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
- if (!f2fs_read_workqueue)
- goto fail;
-
- f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypto_ctx_cachep)
- goto fail;
-
- f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
- SLAB_RECLAIM_ACCOUNT);
- if (!f2fs_crypt_info_cachep)
- goto fail;
-
- return 0;
-fail:
- f2fs_exit_crypto();
- return res;
-}
-
-void f2fs_restore_and_release_control_page(struct page **page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *bounce_page;
-
- /* The bounce data pages are unmapped. */
- if ((*page)->mapping)
- return;
-
- /* The bounce data page is unmapped. */
- bounce_page = *page;
- ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);
-
- /* restore control page */
- *page = ctx->w.control_page;
-
- f2fs_restore_control_page(bounce_page);
-}
-
-void f2fs_restore_control_page(struct page *data_page)
-{
- struct f2fs_crypto_ctx *ctx =
- (struct f2fs_crypto_ctx *)page_private(data_page);
-
- set_page_private(data_page, (unsigned long)NULL);
- ClearPagePrivate(data_page);
- unlock_page(data_page);
- f2fs_release_crypto_ctx(ctx);
-}
-
-/**
- * f2fs_crypt_complete() - The completion callback for page encryption
- * @req: The asynchronous encryption request context
- * @res: The result of the encryption operation
- */
-static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-typedef enum {
- F2FS_DECRYPT = 0,
- F2FS_ENCRYPT,
-} f2fs_direction_t;
-
-static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
- struct inode *inode,
- f2fs_direction_t rw,
- pgoff_t index,
- struct page *src_page,
- struct page *dest_page)
-{
- u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist dst, src;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
-
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n",
- __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(
- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_crypt_complete, &ecr);
-
- BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- F2FS_XTS_TWEAK_SIZE - sizeof(index));
-
- sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
- sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
- ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
- xts_tweak);
- if (rw == F2FS_DECRYPT)
- res = crypto_ablkcipher_decrypt(req);
- else
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_ablkcipher_encrypt() returned %d\n",
- __func__, res);
- return res;
- }
- return 0;
-}
-
-static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
-{
- ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
- if (ctx->w.bounce_page == NULL)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= F2FS_WRITE_PATH_FL;
- return ctx->w.bounce_page;
-}
-
-/**
- * f2fs_encrypt() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
- *
- * Called on the page write path. The caller must call
- * f2fs_restore_control_page() on the returned ciphertext page to
- * release the bounce buffer and the encryption context.
- *
- * Return: An allocated page with the encrypted content on success. Else, an
- * error value or NULL.
- */
-struct page *f2fs_encrypt(struct inode *inode,
- struct page *plaintext_page)
-{
- struct f2fs_crypto_ctx *ctx;
- struct page *ciphertext_page = NULL;
- int err;
-
- BUG_ON(!PageLocked(plaintext_page));
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- return (struct page *)ctx;
-
- /* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx);
- if (IS_ERR(ciphertext_page))
- goto err_out;
-
- ctx->w.control_page = plaintext_page;
- err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- goto err_out;
- }
-
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
-
-err_out:
- f2fs_release_crypto_ctx(ctx);
- return ciphertext_page;
-}
-
-/**
- * f2fs_decrypt() - Decrypts a page in-place
- * @ctx: The encryption context.
- * @page: The page to decrypt. Must be locked.
- *
- * Decrypts page in-place using the ctx encryption context.
- *
- * Called from the read completion callback.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
-{
- BUG_ON(!PageLocked(page));
-
- return f2fs_page_crypto(ctx, page->mapping->host,
- F2FS_DECRYPT, page->index, page, page);
-}
-
-/*
- * Convenience function which takes care of allocating and
- * deallocating the encryption context
- */
-int f2fs_decrypt_one(struct inode *inode, struct page *page)
-{
- struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
- int ret;
-
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
- ret = f2fs_decrypt(ctx, page);
- f2fs_release_crypto_ctx(ctx);
- return ret;
-}
-
-bool f2fs_valid_contents_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-/**
- * f2fs_validate_encryption_key_size() - Validate the encryption key size
- * @mode: The key mode.
- * @size: The key size to validate.
- *
- * Return: The validated key size for @mode. Zero if invalid.
- */
-uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
-{
- if (size == f2fs_encryption_key_size(mode))
- return size;
- return 0;
-}
diff --git a/fs/f2fs/crypto_fname.c b/fs/f2fs/crypto_fname.c
deleted file mode 100644
index 38349ed5ea51..000000000000
--- a/fs/f2fs/crypto_fname.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * linux/fs/f2fs/crypto_fname.c
- *
- * Copied from linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- * Copyright (C) 2015, Motorola Mobility
- *
- * This contains functions for filename crypto management in f2fs
- *
- * Written by Uday Savagaonkar, 2014.
- *
- * Adjust f2fs dentry structure
- * Jaegeuk Kim, 2015.
- *
- * This has not yet undergone a rigorous security audit.
- */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/crypto.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/f2fs_fs.h>
-#include <linux/ratelimit.h>
-
-#include "f2fs.h"
-#include "f2fs_crypto.h"
-#include "xattr.h"
-
-/**
- * f2fs_dir_crypt_complete() -
- */
-static void f2fs_dir_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct f2fs_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-bool f2fs_valid_filenames_enc_mode(uint32_t mode)
-{
- return (mode == F2FS_ENCRYPTION_MODE_AES_256_CTS);
-}
-
-static unsigned max_name_len(struct inode *inode)
-{
- return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
- F2FS_NAME_LEN;
-}
-
-/**
- * f2fs_fname_encrypt() -
- *
- * This function encrypts the input filename, and returns the length of the
- * ciphertext. Errors are returned as negative numbers. We trust the caller to
- * allocate sufficient memory to oname string.
- */
-static int f2fs_fname_encrypt(struct inode *inode,
- const struct qstr *iname, struct f2fs_str *oname)
-{
- u32 ciphertext_len;
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- struct scatterlist src_sg, dst_sg;
- int padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- char *workbuf, buf[32], *alloc_buf = NULL;
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- ciphertext_len = (iname->len < F2FS_CRYPTO_BLOCK_SIZE) ?
- F2FS_CRYPTO_BLOCK_SIZE : iname->len;
- ciphertext_len = f2fs_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > lim) ? lim : ciphertext_len;
-
- if (ciphertext_len <= sizeof(buf)) {
- workbuf = buf;
- } else {
- alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
- if (!alloc_buf)
- return -ENOMEM;
- workbuf = alloc_buf;
- }
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- kfree(alloc_buf);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
- if (iname->len < ciphertext_len)
- memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, workbuf, ciphertext_len);
- sg_init_one(&dst_sg, oname->name, ciphertext_len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
- res = crypto_ablkcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- kfree(alloc_buf);
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error (error code %d)\n", __func__, res);
- }
- oname->len = ciphertext_len;
- return res;
-}
-
-/*
- * f2fs_fname_decrypt()
- * This function decrypts the input filename, and returns
- * the length of the plaintext.
- * Errors are returned as negative numbers.
- * We trust the caller to allocate sufficient memory to oname string.
- */
-static int f2fs_fname_decrypt(struct inode *inode,
- const struct f2fs_str *iname, struct f2fs_str *oname)
-{
- struct ablkcipher_request *req = NULL;
- DECLARE_F2FS_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
- struct crypto_ablkcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[F2FS_CRYPTO_BLOCK_SIZE];
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- /* Allocate request */
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n", __func__);
- return -ENOMEM;
- }
- ablkcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- f2fs_dir_crypt_complete, &ecr);
-
- /* Initialize IV */
- memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE);
-
- /* Create decryption request */
- sg_init_one(&src_sg, iname->name, iname->len);
- sg_init_one(&dst_sg, oname->name, oname->len);
- ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_ablkcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- BUG_ON(req->base.data != &ecr);
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- ablkcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(KERN_ERR
- "%s: Error in f2fs_fname_decrypt (error code %d)\n",
- __func__, res);
- return res;
- }
-
- oname->len = strnlen(oname->name, iname->len);
- return oname->len;
-}
-
-static const char *lookup_table =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
-
-/**
- * f2fs_fname_encode_digest() -
- *
- * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
- * The encoded string is roughly 4/3 times the size of the input string.
- */
-static int digest_encode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- char *cp = dst;
-
- while (i < len) {
- ac += (((unsigned char) src[i]) << bits);
- bits += 8;
- do {
- *cp++ = lookup_table[ac & 0x3f];
- ac >>= 6;
- bits -= 6;
- } while (bits >= 6);
- i++;
- }
- if (bits)
- *cp++ = lookup_table[ac & 0x3f];
- return cp - dst;
-}
-
-static int digest_decode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- const char *p;
- char *cp = dst;
-
- while (i < len) {
- p = strchr(lookup_table, src[i]);
- if (p == NULL || src[i] == 0)
- return -2;
- ac += (p - lookup_table) << bits;
- bits += 6;
- if (bits >= 8) {
- *cp++ = ac & 0xff;
- ac >>= 8;
- bits -= 8;
- }
- i++;
- }
- if (ac)
- return -1;
- return cp - dst;
-}
-
-/**
- * f2fs_fname_crypto_round_up() -
- *
- * Return: The next multiple of block size
- */
-u32 f2fs_fname_crypto_round_up(u32 size, u32 blksize)
-{
- return ((size + blksize - 1) / blksize) * blksize;
-}
-
-/**
- * f2fs_fname_crypto_alloc_obuff() -
- *
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
- */
-int f2fs_fname_crypto_alloc_buffer(struct inode *inode,
- u32 ilen, struct f2fs_str *crypto_str)
-{
- unsigned int olen;
- int padding = 16;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (ci)
- padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK);
- if (padding < F2FS_CRYPTO_BLOCK_SIZE)
- padding = F2FS_CRYPTO_BLOCK_SIZE;
- olen = f2fs_fname_crypto_round_up(ilen, padding);
- crypto_str->len = olen;
- if (olen < F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
- olen = F2FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
- /* Allocated buffer can hold one more character to null-terminate the
- * string */
- crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
- if (!(crypto_str->name))
- return -ENOMEM;
- return 0;
-}
-
-/**
- * f2fs_fname_crypto_free_buffer() -
- *
- * Frees the buffer allocated for crypto operation.
- */
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *crypto_str)
-{
- if (!crypto_str)
- return;
- kfree(crypto_str->name);
- crypto_str->name = NULL;
-}
-
-/**
- * f2fs_fname_disk_to_usr() - converts a filename from disk space to user space
- */
-int f2fs_fname_disk_to_usr(struct inode *inode,
- f2fs_hash_t *hash,
- const struct f2fs_str *iname,
- struct f2fs_str *oname)
-{
- const struct qstr qname = FSTR_TO_QSTR(iname);
- char buf[24];
- int ret;
-
- if (is_dot_dotdot(&qname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (F2FS_I(inode)->i_crypt_info)
- return f2fs_fname_decrypt(inode, iname, oname);
-
- if (iname->len <= F2FS_FNAME_CRYPTO_DIGEST_SIZE) {
- ret = digest_encode(iname->name, iname->len, oname->name);
- oname->len = ret;
- return ret;
- }
- if (hash) {
- memcpy(buf, hash, 4);
- memset(buf + 4, 0, 4);
- } else
- memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
- oname->name[0] = '_';
- ret = digest_encode(buf, 24, oname->name + 1);
- oname->len = ret + 1;
- return ret + 1;
-}
-
-/**
- * f2fs_fname_usr_to_disk() - converts a filename from user space to disk space
- */
-int f2fs_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct f2fs_str *oname)
-{
- int res;
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-
- if (is_dot_dotdot(iname)) {
- oname->name[0] = '.';
- oname->name[iname->len - 1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
-
- if (ci) {
- res = f2fs_fname_encrypt(inode, iname, oname);
- return res;
- }
- /* Without a proper key, a user is not allowed to modify the filenames
- * in a directory. Consequently, a user space name cannot be mapped to
- * a disk-space name */
- return -EACCES;
-}
-
-int f2fs_fname_setup_filename(struct inode *dir, const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
-{
- struct f2fs_crypt_info *ci;
- int ret = 0, bigname = 0;
-
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
-
- if (!f2fs_encrypted_inode(dir) || is_dot_dotdot(iname)) {
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
- return 0;
- }
- ret = f2fs_get_encryption_info(dir);
- if (ret)
- return ret;
- ci = F2FS_I(dir)->i_crypt_info;
- if (ci) {
- ret = f2fs_fname_crypto_alloc_buffer(dir, iname->len,
- &fname->crypto_buf);
- if (ret < 0)
- return ret;
- ret = f2fs_fname_encrypt(dir, iname, &fname->crypto_buf);
- if (ret < 0)
- goto errout;
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- return 0;
- }
- if (!lookup)
- return -EACCES;
-
- /* We don't have the key and we are doing a lookup; decode the
- * user-supplied name
- */
- if (iname->name[0] == '_')
- bigname = 1;
- if ((bigname && (iname->len != 33)) ||
- (!bigname && (iname->len > 43)))
- return -ENOENT;
-
- fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
- if (fname->crypto_buf.name == NULL)
- return -ENOMEM;
- ret = digest_decode(iname->name + bigname, iname->len - bigname,
- fname->crypto_buf.name);
- if (ret < 0) {
- ret = -ENOENT;
- goto errout;
- }
- fname->crypto_buf.len = ret;
- if (bigname) {
- memcpy(&fname->hash, fname->crypto_buf.name, 4);
- } else {
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- }
- return 0;
-errout:
- f2fs_fname_crypto_free_buffer(&fname->crypto_buf);
- return ret;
-}
-
-void f2fs_fname_free_filename(struct f2fs_filename *fname)
-{
- kfree(fname->crypto_buf.name);
- fname->crypto_buf.name = NULL;
- fname->usr_fname = NULL;
- fname->disk_name.name = NULL;
-}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 4fb5709256fd..283fc9de4762 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,6 +19,8 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
@@ -28,16 +30,41 @@
#include <trace/events/f2fs.h>
#include <trace/events/android_fs.h>
+static bool __is_cp_guaranteed(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ struct f2fs_sb_info *sbi;
+
+ if (!mapping)
+ return false;
+
+ inode = mapping->host;
+ sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_META_INO(sbi) ||
+ inode->i_ino == F2FS_NODE_INO(sbi) ||
+ S_ISDIR(inode->i_mode) ||
+ is_cold_data(page))
+ return true;
+ return false;
+}
+
static void f2fs_read_end_io(struct bio *bio)
{
struct bio_vec *bvec;
int i;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO))
+ bio->bi_error = -EIO;
+#endif
+
if (f2fs_bio_encrypted(bio)) {
if (bio->bi_error) {
- f2fs_release_crypto_ctx(bio->bi_private);
+ fscrypt_release_ctx(bio->bi_private);
} else {
- f2fs_end_io_crypto_work(bio->bi_private, bio);
+ fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
@@ -46,7 +73,8 @@ static void f2fs_read_end_io(struct bio *bio)
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
@@ -64,26 +92,66 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
+ enum count_type type = WB_DATA_TYPE(page);
- f2fs_restore_and_release_control_page(&page);
+ fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) {
- set_page_dirty(page);
set_bit(AS_EIO, &page->mapping->flags);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, true);
}
+ dec_page_count(sbi, type);
+ clear_cold_data(page);
end_page_writeback(page);
- dec_page_count(sbi, F2FS_WRITEBACK);
}
-
- if (!get_pages(sbi, F2FS_WRITEBACK) &&
- !list_empty(&sbi->cp_wait.task_list))
+ if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
+ wq_has_sleeper(&sbi->cp_wait))
wake_up(&sbi->cp_wait);
bio_put(bio);
}
/*
+ * Return true, if pre_bio's bdev is same as its target device.
+ */
+struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ if (FDEV(i).start_blk <= blk_addr &&
+ FDEV(i).end_blk >= blk_addr) {
+ blk_addr -= FDEV(i).start_blk;
+ bdev = FDEV(i).bdev;
+ break;
+ }
+ }
+ if (bio) {
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ }
+ return bdev;
+}
+
+int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
+ return i;
+ return 0;
+}
+
+static bool __same_bdev(struct f2fs_sb_info *sbi,
+ block_t blk_addr, struct bio *bio)
+{
+ return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev;
+}
+
+/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
@@ -93,14 +161,24 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
bio = f2fs_bio_alloc(npages);
- bio->bi_bdev = sbi->sb->s_bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
+ f2fs_target_device(sbi, blk_addr, bio);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
bio->bi_private = is_read ? NULL : sbi;
return bio;
}
+static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
+ struct bio *bio, enum page_type type)
+{
+ if (!is_read_io(rw)) {
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ current->plug && (type == DATA || type == NODE))
+ blk_finish_plug(current->plug);
+ }
+ submit_bio(rw, bio);
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -113,12 +191,58 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
- submit_bio(fio->rw, io->bio);
+ __submit_bio(io->sbi, fio->rw, io->bio, fio->type);
io->bio = NULL;
}
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
- enum page_type type, int rw)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+ struct page *page, nid_t ino)
+{
+ struct bio_vec *bvec;
+ struct page *target;
+ int i;
+
+ if (!io->bio)
+ return false;
+
+ if (!inode && !page && !ino)
+ return true;
+
+ bio_for_each_segment_all(bvec, io->bio, i) {
+
+ if (bvec->bv_page->mapping)
+ target = bvec->bv_page;
+ else
+ target = fscrypt_control_page(bvec->bv_page);
+
+ if (inode && inode == target->mapping->host)
+ return true;
+ if (page && page == target)
+ return true;
+ if (ino && ino == ino_of_node(target))
+ return true;
+ }
+
+ return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct page *page, nid_t ino,
+ enum page_type type)
+{
+ enum page_type btype = PAGE_TYPE_OF_BIO(type);
+ struct f2fs_bio_info *io = &sbi->write_io[btype];
+ bool ret;
+
+ down_read(&io->io_rwsem);
+ ret = __has_merged_page(io, inode, page, ino);
+ up_read(&io->io_rwsem);
+ return ret;
+}
+
+static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, int rw)
{
enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io;
@@ -127,6 +251,9 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
down_write(&io->io_rwsem);
+ if (!__has_merged_page(io, inode, page, ino))
+ goto out;
+
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
@@ -136,9 +263,31 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
}
__submit_merged_bio(io);
+out:
up_write(&io->io_rwsem);
}
+void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+ int rw)
+{
+ __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
+}
+
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+ struct inode *inode, struct page *page,
+ nid_t ino, enum page_type type, int rw)
+{
+ if (has_merged_page(sbi, inode, page, ino, type))
+ __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
+}
+
+void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
+{
+ f2fs_submit_merged_bio(sbi, DATA, WRITE);
+ f2fs_submit_merged_bio(sbi, NODE, WRITE);
+ f2fs_submit_merged_bio(sbi, META, WRITE);
+}
+
/*
* Fill the locked page with data located in the block address.
* Return unlocked page.
@@ -146,20 +295,21 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
int f2fs_submit_page_bio(struct f2fs_io_info *fio)
{
struct bio *bio;
- struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ struct page *page = fio->encrypted_page ?
+ fio->encrypted_page : fio->page;
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
- submit_bio(fio->rw, bio);
+ __submit_bio(fio->sbi, fio->rw, bio, fio->type);
return 0;
}
@@ -173,39 +323,51 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
- verify_block_addr(sbi, fio->blk_addr);
+ if (fio->old_blkaddr != NEW_ADDR)
+ verify_block_addr(sbi, fio->old_blkaddr);
+ verify_block_addr(sbi, fio->new_blkaddr);
- down_write(&io->io_rwsem);
+ bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
if (!is_read)
- inc_page_count(sbi, F2FS_WRITEBACK);
+ inc_page_count(sbi, WB_DATA_TYPE(bio_page));
- if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 ||
- io->fio.rw != fio->rw))
+ down_write(&io->io_rwsem);
+
+ if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
+ (io->fio.rw != fio->rw) ||
+ !__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
-
- io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read);
+ io->bio = __bio_alloc(sbi, fio->new_blkaddr,
+ BIO_MAX_PAGES, is_read);
io->fio = *fio;
}
- bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
-
- if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) <
- PAGE_CACHE_SIZE) {
+ if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
+ PAGE_SIZE) {
__submit_merged_bio(io);
goto alloc_new;
}
- io->last_block_in_bio = fio->blk_addr;
+ io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0);
up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio);
}
+static void __set_data_blkaddr(struct dnode_of_data *dn)
+{
+ struct f2fs_node *rn = F2FS_NODE(dn->node_page);
+ __le32 *addr_array;
+
+ /* Get physical address of data block */
+ addr_array = blkaddr_in_node(rn);
+ addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+}
+
/*
* Lock ordering for the change of data block address:
* ->data_page
@@ -214,39 +376,63 @@ alloc_new:
*/
void set_data_blkaddr(struct dnode_of_data *dn)
{
- struct f2fs_node *rn;
- __le32 *addr_array;
- struct page *node_page = dn->node_page;
- unsigned int ofs_in_node = dn->ofs_in_node;
-
- f2fs_wait_on_page_writeback(node_page, NODE);
-
- rn = F2FS_NODE(node_page);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+ __set_data_blkaddr(dn);
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
+}
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
- set_page_dirty(node_page);
+void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
+{
+ dn->data_blkaddr = blkaddr;
+ set_data_blkaddr(dn);
+ f2fs_update_extent_cache(dn);
}
-int reserve_new_block(struct dnode_of_data *dn)
+/* dn->ofs_in_node will be returned with up-to-date last block pointer */
+int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (!count)
+ return 0;
+
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+ if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
- trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
+ trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
+ dn->ofs_in_node, count);
- dn->data_blkaddr = NEW_ADDR;
- set_data_blkaddr(dn);
- mark_inode_dirty(dn->inode);
- sync_inode_page(dn);
+ f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
+
+ for (; count > 0; dn->ofs_in_node++) {
+ block_t blkaddr =
+ datablock_addr(dn->node_page, dn->ofs_in_node);
+ if (blkaddr == NULL_ADDR) {
+ dn->data_blkaddr = NEW_ADDR;
+ __set_data_blkaddr(dn);
+ count--;
+ }
+ }
+
+ if (set_page_dirty(dn->node_page))
+ dn->node_changed = true;
return 0;
}
+/* Should keep dn->ofs_in_node unchanged */
+int reserve_new_block(struct dnode_of_data *dn)
+{
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ int ret;
+
+ ret = reserve_new_blocks(dn, 1);
+ dn->ofs_in_node = ofs_in_node;
+ return ret;
+}
+
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
{
bool need_put = dn->inode_page ? false : true;
@@ -326,13 +512,14 @@ got_it:
* see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
*/
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
return page;
}
- fio.blk_addr = dn.data_blkaddr;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
fio.page = page;
err = f2fs_submit_page_bio(&fio);
if (err)
@@ -386,14 +573,14 @@ repeat:
/* wait for read completion */
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
@@ -413,7 +600,7 @@ struct page *get_new_data_page(struct inode *inode,
struct page *page;
struct dnode_of_data dn;
int err;
-repeat:
+
page = f2fs_grab_cache_page(mapping, index, true);
if (!page) {
/*
@@ -437,125 +624,99 @@ repeat:
goto got_it;
if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
- page = get_read_data_page(inode, index, READ_SYNC, true);
+ /* if ipage exists, blkaddr should be NEW_ADDR */
+ f2fs_bug_on(F2FS_I_SB(inode), ipage);
+ page = get_lock_data_page(inode, index, true);
if (IS_ERR(page))
- goto repeat;
-
- /* wait for read completion */
- lock_page(page);
+ return page;
}
got_it:
if (new_i_size && i_size_read(inode) <
- ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
- i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
- /* Only the directory inode sets new_i_size */
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ ((loff_t)(index + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
static int __allocate_data_block(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
- int seg = CURSEG_WARM_DATA;
pgoff_t fofs;
+ blkcnt_t count = 1;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
- if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
+ if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
alloc:
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
- if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
- seg = CURSEG_DIRECT_IO;
-
allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
- &sum, seg);
+ &sum, CURSEG_WARM_DATA);
set_data_blkaddr(dn);
/* update i_size */
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
- if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
- i_size_write(dn->inode,
- ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));
-
- /* direct IO doesn't use extent cache to maximize the performance */
- f2fs_drop_largest_extent(dn->inode, fofs);
-
+ if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(dn->inode,
+ ((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
-static void __allocate_data_blocks(struct inode *inode, loff_t offset,
- size_t count)
+static inline bool __force_buffered_io(struct inode *inode, int rw)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct dnode_of_data dn;
- u64 start = F2FS_BYTES_TO_BLK(offset);
- u64 len = F2FS_BYTES_TO_BLK(count);
- bool allocated;
- u64 end_offset;
-
- while (len) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
-
- /* When reading holes, we need its node page */
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- if (get_dnode_of_data(&dn, start, ALLOC_NODE))
- goto out;
-
- allocated = false;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-
- while (dn.ofs_in_node < end_offset && len) {
- block_t blkaddr;
+ return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+ (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
+ F2FS_I_SB(inode)->s_ndevs);
+}
- if (unlikely(f2fs_cp_error(sbi)))
- goto sync_out;
+int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct f2fs_map_blocks map;
+ int err = 0;
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
- if (__allocate_data_block(&dn))
- goto sync_out;
- allocated = true;
- }
- len--;
- start++;
- dn.ofs_in_node++;
- }
+ map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
+ map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ map.m_len = 0;
- if (allocated)
- sync_inode_page(&dn);
+ map.m_next_pgofs = NULL;
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ return f2fs_map_blocks(inode, &map, 1,
+ __force_buffered_io(inode, WRITE) ?
+ F2FS_GET_BLOCK_PRE_AIO :
+ F2FS_GET_BLOCK_PRE_DIO);
}
- return;
-
-sync_out:
- if (allocated)
- sync_inode_page(&dn);
- f2fs_put_dnode(&dn);
-out:
- f2fs_unlock_op(sbi);
- return;
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ if (!f2fs_has_inline_data(inode))
+ return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ return err;
}
/*
@@ -567,188 +728,210 @@ out:
* b. do not use extent cache for better performance
* c. give the block addresses to blockdev
*/
-static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
+int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
int create, int flag)
{
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
- pgoff_t pgofs, end_offset;
+ int mode = create ? ALLOC_NODE : LOOKUP_NODE;
+ pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
+ unsigned int ofs_in_node, last_ofs_in_node;
+ blkcnt_t prealloc;
struct extent_info ei;
- bool allocated = false;
+ block_t blkaddr;
+
+ if (!maxblocks)
+ return 0;
map->m_len = 0;
map->m_flags = 0;
/* it only supports block size == page size */
pgofs = (pgoff_t)map->m_lblk;
+ end = pgofs + maxblocks;
- if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+ if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
map->m_pblk = ei.blk + pgofs - ei.fofs;
map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
map->m_flags = F2FS_MAP_MAPPED;
goto out;
}
+next_dnode:
if (create)
- f2fs_lock_op(F2FS_I_SB(inode));
+ f2fs_lock_op(sbi);
/* When reading holes, we need its node page */
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, pgofs, mode);
if (err) {
- if (err == -ENOENT)
+ if (flag == F2FS_GET_BLOCK_BMAP)
+ map->m_pblk = 0;
+ if (err == -ENOENT) {
err = 0;
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs =
+ get_next_page_offset(&dn, pgofs);
+ }
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ prealloc = 0;
+ last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+ blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
- goto put_out;
+ goto sync_out;
+ }
+ if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+ if (blkaddr == NULL_ADDR) {
+ prealloc++;
+ last_ofs_in_node = dn.ofs_in_node;
+ }
+ } else {
+ err = __allocate_data_block(&dn);
+ if (!err)
+ set_inode_flag(inode, FI_APPEND_WRITE);
}
- err = __allocate_data_block(&dn);
if (err)
- goto put_out;
- allocated = true;
+ goto sync_out;
map->m_flags = F2FS_MAP_NEW;
+ blkaddr = dn.data_blkaddr;
} else {
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- dn.data_blkaddr != NEW_ADDR) {
- if (flag == F2FS_GET_BLOCK_BMAP)
- err = -ENOENT;
- goto put_out;
+ if (flag == F2FS_GET_BLOCK_BMAP) {
+ map->m_pblk = 0;
+ goto sync_out;
}
-
- /*
- * preallocated unwritten block should be mapped
- * for fiemap.
- */
- if (dn.data_blkaddr == NEW_ADDR)
- map->m_flags = F2FS_MAP_UNWRITTEN;
+ if (flag == F2FS_GET_BLOCK_FIEMAP &&
+ blkaddr == NULL_ADDR) {
+ if (map->m_next_pgofs)
+ *map->m_next_pgofs = pgofs + 1;
+ }
+ if (flag != F2FS_GET_BLOCK_FIEMAP ||
+ blkaddr != NEW_ADDR)
+ goto sync_out;
}
}
- map->m_flags |= F2FS_MAP_MAPPED;
- map->m_pblk = dn.data_blkaddr;
- map->m_len = 1;
+ if (flag == F2FS_GET_BLOCK_PRE_AIO)
+ goto skip;
+
+ if (map->m_len == 0) {
+ /* preallocated unwritten block should be mapped for fiemap. */
+ if (blkaddr == NEW_ADDR)
+ map->m_flags |= F2FS_MAP_UNWRITTEN;
+ map->m_flags |= F2FS_MAP_MAPPED;
+
+ map->m_pblk = blkaddr;
+ map->m_len = 1;
+ } else if ((map->m_pblk != NEW_ADDR &&
+ blkaddr == (map->m_pblk + ofs)) ||
+ (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+ flag == F2FS_GET_BLOCK_PRE_DIO) {
+ ofs++;
+ map->m_len++;
+ } else {
+ goto sync_out;
+ }
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+skip:
dn.ofs_in_node++;
pgofs++;
-get_next:
- if (dn.ofs_in_node >= end_offset) {
- if (allocated)
- sync_inode_page(&dn);
- allocated = false;
- f2fs_put_dnode(&dn);
+ /* preallocate blocks in batch for one dnode page */
+ if (flag == F2FS_GET_BLOCK_PRE_AIO &&
+ (pgofs == end || dn.ofs_in_node == end_offset)) {
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, mode);
- if (err) {
- if (err == -ENOENT)
- err = 0;
- goto unlock_out;
- }
+ dn.ofs_in_node = ofs_in_node;
+ err = reserve_new_blocks(&dn, prealloc);
+ if (err)
+ goto sync_out;
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ map->m_len += dn.ofs_in_node - ofs_in_node;
+ if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
+ err = -ENOSPC;
+ goto sync_out;
+ }
+ dn.ofs_in_node = end_offset;
}
- if (maxblocks > map->m_len) {
- block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (pgofs >= end)
+ goto sync_out;
+ else if (dn.ofs_in_node < end_offset)
+ goto next_block;
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
- if (create) {
- if (unlikely(f2fs_cp_error(sbi))) {
- err = -EIO;
- goto sync_out;
- }
- err = __allocate_data_block(&dn);
- if (err)
- goto sync_out;
- allocated = true;
- map->m_flags |= F2FS_MAP_NEW;
- blkaddr = dn.data_blkaddr;
- } else {
- /*
- * we only merge preallocated unwritten blocks
- * for fiemap.
- */
- if (flag != F2FS_GET_BLOCK_FIEMAP ||
- blkaddr != NEW_ADDR)
- goto sync_out;
- }
- }
+ f2fs_put_dnode(&dn);
- /* Give more consecutive addresses for the readahead */
- if ((map->m_pblk != NEW_ADDR &&
- blkaddr == (map->m_pblk + ofs)) ||
- (map->m_pblk == NEW_ADDR &&
- blkaddr == NEW_ADDR)) {
- ofs++;
- dn.ofs_in_node++;
- pgofs++;
- map->m_len++;
- goto get_next;
- }
+ if (create) {
+ f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
}
+ goto next_dnode;
+
sync_out:
- if (allocated)
- sync_inode_page(&dn);
-put_out:
f2fs_put_dnode(&dn);
unlock_out:
- if (create)
- f2fs_unlock_op(F2FS_I_SB(inode));
+ if (create) {
+ f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+ }
out:
trace_f2fs_map_blocks(inode, map, err);
return err;
}
static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create, int flag)
+ struct buffer_head *bh, int create, int flag,
+ pgoff_t *next_pgofs)
{
struct f2fs_map_blocks map;
- int ret;
+ int err;
map.m_lblk = iblock;
map.m_len = bh->b_size >> inode->i_blkbits;
+ map.m_next_pgofs = next_pgofs;
- ret = f2fs_map_blocks(inode, &map, create, flag);
- if (!ret) {
+ err = f2fs_map_blocks(inode, &map, create, flag);
+ if (!err) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = map.m_len << inode->i_blkbits;
}
- return ret;
+ return err;
}
static int get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create, int flag)
+ struct buffer_head *bh_result, int create, int flag,
+ pgoff_t *next_pgofs)
{
- return __get_data_block(inode, iblock, bh_result, create, flag);
+ return __get_data_block(inode, iblock, bh_result, create,
+ flag, next_pgofs);
}
static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO);
+ F2FS_GET_BLOCK_DIO, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
/* Block number less than F2FS MAX BLOCKS */
- if (unlikely(iblock >= max_file_size(0)))
+ if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
return -EFBIG;
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_BMAP);
+ F2FS_GET_BLOCK_BMAP, NULL);
}
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -766,10 +949,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
- loff_t isize = i_size_read(inode);
+ pgoff_t next_pgofs;
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
- bool past_eof = false, whole_file = false;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
@@ -782,82 +964,55 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
- mutex_lock(&inode->i_mutex);
-
- if (len >= isize) {
- whole_file = true;
- len = isize;
- }
+ inode_lock(inode);
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
+
next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_data_block(inode, start_blk, &map_bh, 0,
- F2FS_GET_BLOCK_FIEMAP);
+ F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
goto out;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
- start_blk++;
-
- if (!past_eof && blk_to_logical(inode, start_blk) >= isize)
- past_eof = 1;
-
- if (past_eof && size) {
- flags |= FIEMAP_EXTENT_LAST;
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- } else if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- size = 0;
- }
+ start_blk = next_pgofs;
- /* if we have holes up to/past EOF then we're done */
- if (start_blk > last_blk || past_eof || ret)
- goto out;
- } else {
- if (start_blk > last_blk && !whole_file) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- goto out;
- }
-
- /*
- * if size != 0 then we know we already have an extent
- * to add, so add it.
- */
- if (size) {
- ret = fiemap_fill_next_extent(fieinfo, logical,
- phys, size, flags);
- if (ret)
- goto out;
- }
+ if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
+ F2FS_I_SB(inode)->max_file_blocks))
+ goto prep_next;
- logical = blk_to_logical(inode, start_blk);
- phys = blk_to_logical(inode, map_bh.b_blocknr);
- size = map_bh.b_size;
- flags = 0;
- if (buffer_unwritten(&map_bh))
- flags = FIEMAP_EXTENT_UNWRITTEN;
+ flags |= FIEMAP_EXTENT_LAST;
+ }
- start_blk += logical_to_blk(inode, size);
+ if (size) {
+ if (f2fs_encrypted_inode(inode))
+ flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
- /*
- * If we are past the EOF, then we need to make sure as
- * soon as we find a hole that the last extent we found
- * is marked with FIEMAP_EXTENT_LAST
- */
- if (!past_eof && logical + size >= isize)
- past_eof = true;
+ ret = fiemap_fill_next_extent(fieinfo, logical,
+ phys, size, flags);
}
+
+ if (start_blk > last_blk || ret)
+ goto out;
+
+ logical = blk_to_logical(inode, start_blk);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = map_bh.b_size;
+ flags = 0;
+ if (buffer_unwritten(&map_bh))
+ flags = FIEMAP_EXTENT_UNWRITTEN;
+
+ start_blk += logical_to_blk(inode, size);
+
+prep_next:
cond_resched();
if (fatal_signal_pending(current))
ret = -EINTR;
@@ -867,10 +1022,39 @@ out:
if (ret == 1)
ret = 0;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return ret;
}
+static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct bio *bio;
+
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+
+ return bio;
+}
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -889,13 +1073,13 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
map.m_lblk = 0;
map.m_len = 0;
map.m_flags = 0;
+ map.m_next_pgofs = NULL;
for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
@@ -934,7 +1118,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_READ))
+ F2FS_GET_BLOCK_READ))
goto set_error_page;
}
got_it:
@@ -947,8 +1131,9 @@ got_it:
goto confused;
}
} else {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
- SetPageUptodate(page);
+ zero_user_segment(page, 0, PAGE_SIZE);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
@@ -957,37 +1142,18 @@ got_it:
* This page will go to BIO. Do we need to send this
* BIO off first?
*/
- if (bio && (last_block_in_bio != block_nr - 1)) {
+ if (bio && (last_block_in_bio != block_nr - 1 ||
+ !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
submit_and_realloc:
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
bio = NULL;
}
if (bio == NULL) {
- struct f2fs_crypto_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- f2fs_release_crypto_ctx(ctx);
+ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio)) {
+ bio = NULL;
goto set_error_page;
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -997,22 +1163,22 @@ submit_and_realloc:
goto next_page;
set_error_page:
SetPageError(page);
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
unlock_page(page);
goto next_page;
confused:
if (bio) {
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
bio = NULL;
}
unlock_page(page);
next_page:
if (pages)
- page_cache_release(page);
+ put_page(page);
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- submit_bio(READ, bio);
+ __submit_bio(F2FS_I_SB(inode), READ, bio, DATA);
return 0;
}
@@ -1059,23 +1225,33 @@ int do_write_data_page(struct f2fs_io_info *fio)
if (err)
return err;
- fio->blk_addr = dn.data_blkaddr;
+ fio->old_blkaddr = dn.data_blkaddr;
/* This page is already truncated */
- if (fio->blk_addr == NULL_ADDR) {
+ if (fio->old_blkaddr == NULL_ADDR) {
ClearPageUptodate(page);
goto out_writepage;
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ gfp_t gfp_flags = GFP_NOFS;
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
- fio->blk_addr);
-
- fio->encrypted_page = f2fs_encrypt(inode, fio->page);
+ fio->old_blkaddr);
+retry_encrypt:
+ fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
+ gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
+ if (err == -ENOMEM) {
+ /* flush pending ios and wait for a while */
+ f2fs_flush_merged_bios(F2FS_I_SB(inode));
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ err = 0;
+ goto retry_encrypt;
+ }
goto out_writepage;
}
}
@@ -1086,20 +1262,19 @@ int do_write_data_page(struct f2fs_io_info *fio)
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(fio->blk_addr != NEW_ADDR &&
+ if (unlikely(fio->old_blkaddr != NEW_ADDR &&
!is_cold_data(page) &&
+ !IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
trace_f2fs_do_write_data_page(page, OPU);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
@@ -1113,7 +1288,8 @@ static int f2fs_write_data_page(struct page *page,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
- >> PAGE_CACHE_SHIFT;
+ >> PAGE_SHIFT;
+ loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
@@ -1134,37 +1310,37 @@ static int f2fs_write_data_page(struct page *page,
* If the offset is out-of-range of file size,
* this page does not have to be written to disk.
*/
- offset = i_size & (PAGE_CACHE_SIZE - 1);
+ offset = i_size & (PAGE_SIZE - 1);
if ((page->index >= end_index + 1) || !offset)
goto out;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ zero_user_segment(page, offset, PAGE_SIZE);
write:
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (f2fs_is_drop_cache(inode))
goto out;
- if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim &&
- available_free_memory(sbi, BASE_CHECK))
+ /* we should not write 0'th page having journal header */
+ if (f2fs_is_volatile_file(inode) && (!page->index ||
+ (!wbc->for_reclaim &&
+ available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
err = do_write_data_page(&fio);
goto done;
}
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- SetPageError(page);
- goto out;
- }
-
if (!wbc->for_reclaim)
need_balance_fs = true;
- else if (has_not_enough_free_secs(sbi, 0))
+ else if (has_not_enough_free_secs(sbi, 0, 0))
goto redirty_out;
err = -EAGAIN;
@@ -1173,35 +1349,37 @@ write:
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
goto redirty_out;
- clear_cold_data(page);
out:
inode_dec_dirty_pages(inode);
if (err)
ClearPageUptodate(page);
+
+ if (wbc->for_reclaim) {
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
+ remove_dirty_inode(inode);
+ }
+
unlock_page(page);
- if (need_balance_fs)
- f2fs_balance_fs(sbi);
- if (wbc->for_reclaim)
+ f2fs_balance_fs(sbi, need_balance_fs);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, DATA, WRITE);
+
return 0;
redirty_out:
redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
-}
-
-static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
+ if (!err)
+ return AOP_WRITEPAGE_ACTIVATE;
+ unlock_page(page);
+ return err;
}
/*
@@ -1210,8 +1388,7 @@ static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data)
+ struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
@@ -1224,10 +1401,10 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
- int step = 0;
+ int nwritten = 0;
pagevec_init(&pvec, 0);
-next:
+
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -1237,8 +1414,8 @@ next:
cycled = 0;
end = -1;
} else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ index = wbc->range_start >> PAGE_SHIFT;
+ end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
cycled = 1; /* ignore range_cyclic tests */
@@ -1282,12 +1459,10 @@ continue_unlock:
goto continue_unlock;
}
- if (step == is_cold_data(page))
- goto continue_unlock;
-
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page,
+ DATA, true);
else
goto continue_unlock;
}
@@ -1296,16 +1471,22 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = (*writepage)(page, wbc, data);
+ ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
+ /*
+ * keep nr_to_write, since vfs uses this to
+ * get # of written pages.
+ */
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
ret = 0;
- } else {
- done_index = page->index + 1;
- done = 1;
- break;
+ continue;
}
+ done_index = page->index + 1;
+ done = 1;
+ break;
+ } else {
+ nwritten++;
}
if (--wbc->nr_to_write <= 0 &&
@@ -1318,11 +1499,6 @@ continue_unlock:
cond_resched();
}
- if (step < 1) {
- step++;
- goto next;
- }
-
if (!cycled && !done) {
cycled = 1;
index = 0;
@@ -1332,6 +1508,10 @@ continue_unlock:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(F2FS_M_SB(mapping), mapping->host,
+ NULL, 0, DATA, WRITE);
+
return ret;
}
@@ -1340,11 +1520,8 @@ static int f2fs_write_data_pages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool locked = false;
+ struct blk_plug plug;
int ret;
- long diff;
-
- trace_f2fs_writepages(mapping->host, wbc, DATA);
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
@@ -1359,39 +1536,117 @@ static int f2fs_write_data_pages(struct address_space *mapping,
available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
+ /* skip writing during file defragment */
+ if (is_inode_flag_set(inode, FI_DO_DEFRAG))
+ goto skip_write;
+
/* during POR, we don't need to trigger writepage at all. */
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto skip_write;
- diff = nr_pages_to_write(sbi, DATA, wbc);
-
- if (!S_ISDIR(inode->i_mode)) {
- mutex_lock(&sbi->writepages);
- locked = true;
- }
- ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- if (locked)
- mutex_unlock(&sbi->writepages);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
- remove_dirty_dir_inode(inode);
+ blk_start_plug(&plug);
+ ret = f2fs_write_cache_pages(mapping, wbc);
+ blk_finish_plug(&plug);
+ /*
+ * if some pages were truncated, we cannot guarantee its mapping->host
+ * to detect pending bios.
+ */
- wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
+ remove_dirty_inode(inode);
return ret;
skip_write:
wbc->pages_skipped += get_dirty_pages(inode);
+ trace_f2fs_writepages(mapping->host, wbc, DATA);
return 0;
}
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
+ loff_t i_size = i_size_read(inode);
+
+ if (to > i_size) {
+ truncate_pagecache(inode, i_size);
+ truncate_blocks(inode, i_size, true);
+ }
+}
+
+static int prepare_write_begin(struct f2fs_sb_info *sbi,
+ struct page *page, loff_t pos, unsigned len,
+ block_t *blk_addr, bool *node_changed)
+{
+ struct inode *inode = page->mapping->host;
+ pgoff_t index = page->index;
+ struct dnode_of_data dn;
+ struct page *ipage;
+ bool locked = false;
+ struct extent_info ei;
+ int err = 0;
+
+ /*
+ * we already allocated all the blocks, so we don't need to get
+ * the block addresses when there is no need to fill the page.
+ */
+ if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE)
+ return 0;
+
+ if (f2fs_has_inline_data(inode) ||
+ (pos & PAGE_MASK) >= i_size_read(inode)) {
+ f2fs_lock_op(sbi);
+ locked = true;
+ }
+restart:
+ /* check inline_data */
+ ipage = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto unlock_out;
+ }
+
+ set_new_dnode(&dn, inode, ipage, ipage, 0);
- if (to > inode->i_size) {
- truncate_pagecache(inode, inode->i_size);
- truncate_blocks(inode, inode->i_size, true);
+ if (f2fs_has_inline_data(inode)) {
+ if (pos + len <= MAX_INLINE_DATA) {
+ read_inline_data(page, ipage);
+ set_inode_flag(inode, FI_DATA_EXIST);
+ if (inode->i_nlink)
+ set_inline_node(ipage);
+ } else {
+ err = f2fs_convert_inline_page(&dn, page);
+ if (err)
+ goto out;
+ if (dn.data_blkaddr == NULL_ADDR)
+ err = f2fs_get_block(&dn, index);
+ }
+ } else if (locked) {
+ err = f2fs_get_block(&dn, index);
+ } else {
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ } else {
+ /* hole case */
+ err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err || dn.data_blkaddr == NULL_ADDR) {
+ f2fs_put_dnode(&dn);
+ f2fs_lock_op(sbi);
+ locked = true;
+ goto restart;
+ }
+ }
}
+
+ /* convert_inline_page can make node_changed */
+ *blk_addr = dn.data_blkaddr;
+ *node_changed = dn.node_changed;
+out:
+ f2fs_put_dnode(&dn);
+unlock_out:
+ if (locked)
+ f2fs_unlock_op(sbi);
+ return err;
}
static int f2fs_write_begin(struct file *file, struct address_space *mapping,
@@ -1401,9 +1656,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *page = NULL;
- struct page *ipage;
- pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
- struct dnode_of_data dn;
+ pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
+ bool need_balance = false;
+ block_t blkaddr = NULL_ADDR;
int err = 0;
if (trace_android_fs_datawrite_start_enabled()) {
@@ -1418,8 +1673,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
}
trace_f2fs_write_begin(inode, pos, len, flags);
- f2fs_balance_fs(sbi);
-
/*
* We should check this at this moment to avoid deadlock on inode page
* and #0 page. The locking rule for inline_data conversion should be:
@@ -1439,98 +1692,63 @@ repeat:
*pagep = page;
- f2fs_lock_op(sbi);
-
- /* check inline_data */
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- err = PTR_ERR(ipage);
- goto unlock_fail;
- }
-
- set_new_dnode(&dn, inode, ipage, ipage, 0);
+ err = prepare_write_begin(sbi, page, pos, len,
+ &blkaddr, &need_balance);
+ if (err)
+ goto fail;
- if (f2fs_has_inline_data(inode)) {
- if (pos + len <= MAX_INLINE_DATA) {
- read_inline_data(page, ipage);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- sync_inode_page(&dn);
- goto put_next;
+ if (need_balance && has_not_enough_free_secs(sbi, 0, 0)) {
+ unlock_page(page);
+ f2fs_balance_fs(sbi, true);
+ lock_page(page);
+ if (page->mapping != mapping) {
+ /* The page got truncated from under us */
+ f2fs_put_page(page, 1);
+ goto repeat;
}
- err = f2fs_convert_inline_page(&dn, page);
- if (err)
- goto put_fail;
}
- err = f2fs_get_block(&dn, index);
- if (err)
- goto put_fail;
-put_next:
- f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
-
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
-
- if (len == PAGE_CACHE_SIZE)
- goto out_update;
- if (PageUptodate(page))
- goto out_clear;
-
- if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
- unsigned start = pos & (PAGE_CACHE_SIZE - 1);
- unsigned end = start + len;
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- /* Reading beyond i_size is simple: memset to zero */
- zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
- goto out_update;
- }
+ if (len == PAGE_SIZE || PageUptodate(page))
+ return 0;
- if (dn.data_blkaddr == NEW_ADDR) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ if (blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .rw = READ_SYNC,
- .blk_addr = dn.data_blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
- if (err)
+ struct bio *bio;
+
+ bio = f2fs_grab_bio(inode, blkaddr, 1);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
goto fail;
+ }
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- err = -EIO;
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ err = -EFAULT;
goto fail;
}
+
+ __submit_bio(sbi, READ_SYNC, bio, DATA);
+
+ lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
-
- /* avoid symlink page */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- err = f2fs_decrypt_one(inode, page);
- if (err)
- goto fail;
+ if (unlikely(!PageUptodate(page))) {
+ err = -EIO;
+ goto fail;
}
}
-out_update:
- SetPageUptodate(page);
-out_clear:
- clear_cold_data(page);
return 0;
-put_fail:
- f2fs_put_dnode(&dn);
-unlock_fail:
- f2fs_unlock_op(sbi);
fail:
f2fs_put_page(page, 1);
f2fs_write_failed(mapping, pos + len);
@@ -1547,15 +1765,27 @@ static int f2fs_write_end(struct file *file,
trace_android_fs_datawrite_end(inode, pos, len);
trace_f2fs_write_end(inode, pos, len, copied);
- set_page_dirty(page);
-
- if (pos + copied > i_size_read(inode)) {
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- update_inode_page(inode);
+ /*
+ * This should be come from len == PAGE_SIZE, and we expect copied
+ * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
+ * let generic_perform_write() try to copy data again through copied=0.
+ */
+ if (!PageUptodate(page)) {
+ if (unlikely(copied != PAGE_SIZE))
+ copied = 0;
+ else
+ SetPageUptodate(page);
}
+ if (!copied)
+ goto unlock_out;
+ set_page_dirty(page);
+
+ if (pos + copied > i_size_read(inode))
+ f2fs_i_size_write(inode, pos + copied);
+unlock_out:
f2fs_put_page(page, 1);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
@@ -1574,29 +1804,20 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
}
static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
- loff_t offset)
+ loff_t offset)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
+ int rw = iov_iter_rw(iter);
int err;
- /* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return 0;
-
err = check_direct_IO(inode, iter, offset);
if (err)
return err;
- trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
+ if (__force_buffered_io(inode, rw))
+ return 0;
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ)) {
@@ -1620,18 +1841,18 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
current->pid, path,
current->comm);
}
- if (iov_iter_rw(iter) == WRITE) {
- __allocate_data_blocks(inode, offset, count);
- if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
- err = -EIO;
- goto out;
- }
- }
+ trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
- if (err < 0 && iov_iter_rw(iter) == WRITE)
- f2fs_write_failed(mapping, offset + count);
+ up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+
+ if (rw == WRITE) {
+ if (err > 0)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+ else if (err < 0)
+ f2fs_write_failed(mapping, offset + count);
+ }
if (trace_android_fs_dataread_start_enabled() &&
(iov_iter_rw(iter) == READ))
@@ -1640,7 +1861,7 @@ out:
(iov_iter_rw(iter) == WRITE))
trace_android_fs_datawrite_end(inode, offset, count);
- trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
+ trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
@@ -1652,22 +1873,25 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
- (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
+ (offset % PAGE_SIZE || length != PAGE_SIZE))
return;
if (PageDirty(page)) {
- if (inode->i_ino == F2FS_META_INO(sbi))
+ if (inode->i_ino == F2FS_META_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_META);
- else if (inode->i_ino == F2FS_NODE_INO(sbi))
+ } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
dec_page_count(sbi, F2FS_DIRTY_NODES);
- else
+ } else {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
}
/* This is atomic written page, keep Private */
if (IS_ATOMIC_WRITTEN_PAGE(page))
return;
+ set_page_private(page, 0);
ClearPagePrivate(page);
}
@@ -1681,10 +1905,42 @@ int f2fs_release_page(struct page *page, gfp_t wait)
if (IS_ATOMIC_WRITTEN_PAGE(page))
return 0;
+ set_page_private(page, 0);
ClearPagePrivate(page);
return 1;
}
+/*
+ * This was copied from __set_page_dirty_buffers which gives higher performance
+ * in very high speed storages. (e.g., pmem)
+ */
+void f2fs_set_page_dirty_nobuffers(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct mem_cgroup *memcg;
+ unsigned long flags;
+
+ if (unlikely(!mapping))
+ return;
+
+ spin_lock(&mapping->private_lock);
+ memcg = mem_cgroup_begin_page_stat(page);
+ SetPageDirty(page);
+ spin_unlock(&mapping->private_lock);
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ WARN_ON_ONCE(!PageUptodate(page));
+ account_page_dirtied(page, mapping, memcg);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+
+ mem_cgroup_end_page_stat(memcg);
+
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return;
+}
+
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -1692,7 +1948,8 @@ static int f2fs_set_data_page_dirty(struct page *page)
trace_f2fs_set_page_dirty(page, DATA);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (f2fs_is_atomic_file(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
@@ -1707,7 +1964,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
}
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
@@ -1728,6 +1985,58 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
+#ifdef CONFIG_MIGRATION
+#include <linux/migrate.h>
+
+int f2fs_migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+ int rc, extra_count;
+ struct f2fs_inode_info *fi = F2FS_I(mapping->host);
+ bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
+
+ BUG_ON(PageWriteback(page));
+
+ /* migrating an atomic written page is safe with the inmem_lock hold */
+ if (atomic_written && !mutex_trylock(&fi->inmem_lock))
+ return -EAGAIN;
+
+ /*
+ * A reference is expected if PagePrivate set when move mapping,
+ * however F2FS breaks this for maintaining dirty page counts when
+ * truncating pages. So here adjusting the 'extra_count' make it work.
+ */
+ extra_count = (atomic_written ? 1 : 0) - page_has_private(page);
+ rc = migrate_page_move_mapping(mapping, newpage,
+ page, NULL, mode, extra_count);
+ if (rc != MIGRATEPAGE_SUCCESS) {
+ if (atomic_written)
+ mutex_unlock(&fi->inmem_lock);
+ return rc;
+ }
+
+ if (atomic_written) {
+ struct inmem_pages *cur;
+ list_for_each_entry(cur, &fi->inmem_pages, list)
+ if (cur->page == page) {
+ cur->page = newpage;
+ break;
+ }
+ mutex_unlock(&fi->inmem_lock);
+ put_page(page);
+ get_page(newpage);
+ }
+
+ if (PagePrivate(page))
+ SetPagePrivate(newpage);
+ set_page_private(newpage, page_private(page));
+
+ migrate_page_copy(newpage, page);
+
+ return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
const struct address_space_operations f2fs_dblock_aops = {
.readpage = f2fs_read_data_page,
.readpages = f2fs_read_data_pages,
@@ -1740,4 +2049,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.releasepage = f2fs_release_page,
.direct_IO = f2fs_direct_IO,
.bmap = f2fs_bmap,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 24d6a51b48d1..fbd5184140d0 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -38,23 +38,31 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
si->total_ext = atomic64_read(&sbi->total_hit_ext);
- si->ext_tree = sbi->total_ext_tree;
+ si->ext_tree = atomic_read(&sbi->total_ext_tree);
+ si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
si->ext_node = atomic_read(&sbi->total_ext_node);
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
- si->ndirty_dirs = sbi->n_dirty_dirs;
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
+ si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
+ si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+ si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
+ si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
- si->wb_pages = get_pages(sbi, F2FS_WRITEBACK);
+ si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
+ si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
si->rsvd_segs = reserved_segments(sbi);
si->overp_segs = overprovision_segments(sbi);
si->valid_count = valid_user_blocks(sbi);
+ si->discard_blks = discard_blocks(sbi);
si->valid_node_count = valid_node_count(sbi);
si->valid_inode_count = valid_inode_count(sbi);
si->inline_xattr = atomic_read(&sbi->inline_xattr);
si->inline_inode = atomic_read(&sbi->inline_inode);
si->inline_dir = atomic_read(&sbi->inline_dir);
+ si->orphans = sbi->im[ORPHAN_INO].ino_num;
si->utilization = utilization(sbi);
si->free_segs = free_segments(sbi);
@@ -67,7 +75,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
si->sits = MAIN_SEGS(sbi);
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
- si->fnids = NM_I(sbi)->fcnt;
+ si->free_nids = NM_I(sbi)->nid_cnt[FREE_NID_LIST];
+ si->alloc_nids = NM_I(sbi)->nid_cnt[ALLOC_NID_LIST];
si->bg_gc = sbi->bg_gc;
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
@@ -105,7 +114,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
bimodal = 0;
total_vblocks = 0;
- blks_per_sec = sbi->segs_per_sec * (1 << sbi->log_blocks_per_seg);
+ blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
hblks_per_sec = blks_per_sec / 2;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
@@ -140,6 +149,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
si->base_mem += sizeof(*sbi->ckpt);
+ si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
/* build sm */
si->base_mem += sizeof(struct f2fs_sm_info);
@@ -148,7 +158,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
si->base_mem += sizeof(struct sit_info);
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
- si->base_mem += 3 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
+ if (f2fs_discard_en(sbi))
+ si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
si->base_mem += SIT_VBLOCK_MAP_SIZE;
if (sbi->segs_per_sec > 1)
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
@@ -161,7 +173,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
/* build curseg */
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
- si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE;
+ si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
/* build dirty segmap */
si->base_mem += sizeof(struct dirty_seglist_info);
@@ -184,23 +196,25 @@ get_cache:
si->cache_mem += sizeof(struct flush_cmd_control);
/* free nids */
- si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
+ si->cache_mem += (NM_I(sbi)->nid_cnt[FREE_NID_LIST] +
+ NM_I(sbi)->nid_cnt[ALLOC_NID_LIST]) *
+ sizeof(struct free_nid);
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
sizeof(struct nat_entry_set);
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
- si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry);
- for (i = 0; i <= UPDATE_INO; i++)
+ for (i = 0; i <= ORPHAN_INO; i++)
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
- si->cache_mem += sbi->total_ext_tree * sizeof(struct extent_tree);
+ si->cache_mem += atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree);
si->cache_mem += atomic_read(&sbi->total_ext_node) *
sizeof(struct extent_node);
si->page_mem = 0;
npages = NODE_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
npages = META_MAPPING(sbi)->nrpages;
- si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
+ si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
}
static int stat_show(struct seq_file *s, void *v)
@@ -211,20 +225,24 @@ static int stat_show(struct seq_file *s, void *v)
mutex_lock(&f2fs_stat_mutex);
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
- char devname[BDEVNAME_SIZE];
-
update_general_status(si->sbi);
- seq_printf(s, "\n=====[ partition info(%s). #%d ]=====\n",
- bdevname(si->sbi->sb->s_bdev, devname), i++);
+ seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
+ si->sbi->sb->s_bdev, i++,
+ f2fs_readonly(si->sbi->sb) ? "RO": "RW");
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
si->sit_area_segs, si->nat_area_segs);
seq_printf(s, "[SSA: %d] [MAIN: %d",
si->ssa_area_segs, si->main_area_segs);
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
si->overp_segs, si->rsvd_segs);
- seq_printf(s, "Utilization: %d%% (%d valid blocks)\n",
- si->utilization, si->valid_count);
+ if (test_opt(si->sbi, DISCARD))
+ seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
+ si->utilization, si->valid_count, si->discard_blks);
+ else
+ seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
+ si->utilization, si->valid_count);
+
seq_printf(s, " - Node: %u (Inode: %u, ",
si->valid_node_count, si->valid_inode_count);
seq_printf(s, "Other: %u)\n - Data: %u\n",
@@ -236,6 +254,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inline_inode);
seq_printf(s, " - Inline_dentry Inode: %u\n",
si->inline_dir);
+ seq_printf(s, " - Orphan Inode: %u\n",
+ si->orphans);
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
si->main_area_segs, si->main_area_sections,
si->main_area_zones);
@@ -269,7 +289,8 @@ static int stat_show(struct seq_file *s, void *v)
si->dirty_count);
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
- seq_printf(s, "CP calls: %d\n", si->cp_count);
+ seq_printf(s, "CP calls: %d (BG: %d)\n",
+ si->cp_count, si->bg_cp_count);
seq_printf(s, "GC calls: %d (BG: %d)\n",
si->call_count, si->bg_gc);
seq_printf(s, " - data segments : %d (%d)\n",
@@ -290,21 +311,25 @@ static int stat_show(struct seq_file *s, void *v)
!si->total_ext ? 0 :
div64_u64(si->hit_total * 100, si->total_ext),
si->hit_total, si->total_ext);
- seq_printf(s, " - Inner Struct Count: tree: %d, node: %d\n",
- si->ext_tree, si->ext_node);
+ seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
+ si->ext_tree, si->zombie_tree, si->ext_node);
seq_puts(s, "\nBalancing F2FS Async:\n");
- seq_printf(s, " - inmem: %4d, wb: %4d\n",
- si->inmem_pages, si->wb_pages);
+ seq_printf(s, " - inmem: %4d, wb_cp_data: %4d, wb_data: %4d\n",
+ si->inmem_pages, si->nr_wb_cp_data, si->nr_wb_data);
seq_printf(s, " - nodes: %4d in %4d\n",
si->ndirty_node, si->node_pages);
- seq_printf(s, " - dents: %4d in dirs:%4d\n",
- si->ndirty_dent, si->ndirty_dirs);
+ seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
+ si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
+ seq_printf(s, " - datas: %4d in files:%4d\n",
+ si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4d in %4d\n",
si->ndirty_meta, si->meta_pages);
+ seq_printf(s, " - imeta: %4d\n",
+ si->ndirty_imeta);
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
- seq_printf(s, " - free_nids: %9d\n",
- si->fnids);
+ seq_printf(s, " - free_nids: %9d, alloc_nids: %9d\n",
+ si->free_nids, si->alloc_nids);
seq_puts(s, "\nDistribution of User Blocks:");
seq_puts(s, " [ valid | invalid | free ]\n");
seq_puts(s, " [");
@@ -407,20 +432,23 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
kfree(si);
}
-void __init f2fs_create_root_stats(void)
+int __init f2fs_create_root_stats(void)
{
struct dentry *file;
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
if (!f2fs_debugfs_root)
- return;
+ return -ENOMEM;
file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
NULL, &stat_fops);
if (!file) {
debugfs_remove(f2fs_debugfs_root);
f2fs_debugfs_root = NULL;
+ return -ENOMEM;
}
+
+ return 0;
}
void f2fs_destroy_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 60972a559685..4436079dbf0c 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -17,8 +17,8 @@
static unsigned long dir_blocks(struct inode *inode)
{
- return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1))
- >> PAGE_CACHE_SHIFT;
+ return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
+ >> PAGE_SHIFT;
}
static unsigned int dir_buckets(unsigned int level, int dir_level)
@@ -37,7 +37,7 @@ static unsigned int bucket_blocks(unsigned int level)
return 4;
}
-unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
+static unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_UNKNOWN] = DT_UNKNOWN,
[F2FS_FT_REG_FILE] = DT_REG,
[F2FS_FT_DIR] = DT_DIR,
@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_SYMLINK] = DT_LNK,
};
-#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
+unsigned char get_de_type(struct f2fs_dir_entry *de)
+{
+ if (de->file_type < F2FS_FT_MAX)
+ return f2fs_filetype_table[de->file_type];
+ return DT_UNKNOWN;
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
@@ -77,7 +83,7 @@ static unsigned long dir_block_index(unsigned int level,
}
static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
f2fs_hash_t namehash,
int *max_slots,
struct page **res_page)
@@ -95,23 +101,18 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page,
else
kunmap(dentry_page);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0);
return de;
}
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
f2fs_hash_t namehash, int *max_slots,
struct f2fs_dentry_ptr *d)
{
struct f2fs_dir_entry *de;
unsigned long bit_pos = 0;
int max_len = 0;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
- struct f2fs_str *name = &fname->disk_name;
+ struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
+ struct fscrypt_str *name = &fname->disk_name;
if (max_slots)
*max_slots = 0;
@@ -124,37 +125,28 @@ struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *fname,
de = &d->dentry[bit_pos];
- if (de->hash_code != namehash)
- goto not_match;
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
+ }
+ /* encrypted case */
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (unlikely(!name->name)) {
- if (fname->usr_fname->name[0] == '_') {
- if (de_name.len > 32 &&
- !memcmp(de_name.name + ((de_name.len - 17) & ~15),
- fname->crypto_buf.name + 8, 16))
- goto found;
- goto not_match;
- }
- name->name = fname->crypto_buf.name;
- name->len = fname->crypto_buf.len;
- }
-#endif
- if (de_name.len == name->len &&
- !memcmp(de_name.name, name->name, name->len))
+ /* show encrypted name */
+ if (fname->hash) {
+ if (de->hash_code == cpu_to_le32(fname->hash))
+ goto found;
+ } else if (de_name.len == name->len &&
+ de->hash_code == namehash &&
+ !memcmp(de_name.name, name->name, name->len))
goto found;
-not_match:
+
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
max_len = 0;
- /* remain bug on condition */
- if (unlikely(!de->name_len))
- d->max = -1;
-
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
}
@@ -167,7 +159,7 @@ found:
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
- struct f2fs_filename *fname,
+ struct fscrypt_name *fname,
struct page **res_page)
{
struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
@@ -180,9 +172,10 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
int max_slots;
f2fs_hash_t namehash;
- namehash = f2fs_dentry_hash(&name, fname);
-
- f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
+ if(fname->hash)
+ namehash = cpu_to_le32(fname->hash);
+ else
+ namehash = f2fs_dentry_hash(&name);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
@@ -195,8 +188,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
/* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
- room = true;
- continue;
+ if (PTR_ERR(dentry_page) == -ENOENT) {
+ room = true;
+ continue;
+ } else {
+ *res_page = dentry_page;
+ break;
+ }
}
de = find_in_block(dentry_page, fname, namehash, &max_slots,
@@ -217,79 +215,87 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
return de;
}
-/*
- * Find an entry in the specified directory with the wanted name.
- * It returns the page where the entry was found (as a parameter - res_page),
- * and the entry itself. Page is returned mapped and unlocked.
- * Entry is guaranteed to be valid.
- */
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- struct qstr *child, struct page **res_page)
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
+ struct fscrypt_name *fname, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
unsigned int max_depth;
unsigned int level;
- struct f2fs_filename fname;
- int err;
-
- *res_page = NULL;
-
- err = f2fs_fname_setup_filename(dir, child, 1, &fname);
- if (err)
- return NULL;
if (f2fs_has_inline_dentry(dir)) {
- de = find_in_inline_dir(dir, &fname, res_page);
+ *res_page = NULL;
+ de = find_in_inline_dir(dir, fname, res_page);
goto out;
}
- if (npages == 0)
+ if (npages == 0) {
+ *res_page = NULL;
goto out;
+ }
max_depth = F2FS_I(dir)->i_current_depth;
+ if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
+ f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING,
+ "Corrupted max_depth of %lu: %u",
+ dir->i_ino, max_depth);
+ max_depth = MAX_DIR_HASH_DEPTH;
+ f2fs_i_depth_write(dir, max_depth);
+ }
for (level = 0; level < max_depth; level++) {
- de = find_in_level(dir, level, &fname, res_page);
- if (de)
+ *res_page = NULL;
+ de = find_in_level(dir, level, fname, res_page);
+ if (de || IS_ERR(*res_page))
break;
}
out:
- f2fs_fname_free_filename(&fname);
return de;
}
-struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+/*
+ * Find an entry in the specified directory with the wanted name.
+ * It returns the page where the entry was found (as a parameter - res_page),
+ * and the entry itself. Page is returned mapped and unlocked.
+ * Entry is guaranteed to be valid.
+ */
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
+ const struct qstr *child, struct page **res_page)
{
- struct page *page;
- struct f2fs_dir_entry *de;
- struct f2fs_dentry_block *dentry_blk;
-
- if (f2fs_has_inline_dentry(dir))
- return f2fs_parent_inline_dir(dir, p);
+ struct f2fs_dir_entry *de = NULL;
+ struct fscrypt_name fname;
+ int err;
- page = get_lock_data_page(dir, 0, false);
- if (IS_ERR(page))
+ err = fscrypt_setup_filename(dir, child, 1, &fname);
+ if (err) {
+ *res_page = ERR_PTR(err);
return NULL;
+ }
+
+ de = __f2fs_find_entry(dir, &fname, res_page);
- dentry_blk = kmap(page);
- de = &dentry_blk->dentry[1];
- *p = page;
- unlock_page(page);
+ fscrypt_free_filename(&fname);
return de;
}
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
+{
+ struct qstr dotdot = QSTR_INIT("..", 2);
+
+ return f2fs_find_entry(dir, &dotdot, p);
+}
+
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- struct page *page;
- de = f2fs_find_entry(dir, qstr, &page);
+ de = f2fs_find_entry(dir, qstr, page);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_dentry_kunmap(dir, page);
- f2fs_put_page(page, 0);
+ f2fs_dentry_kunmap(dir, *page);
+ f2fs_put_page(*page, 0);
}
return res;
@@ -300,14 +306,14 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
{
enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
lock_page(page);
- f2fs_wait_on_page_writeback(page, type);
+ f2fs_wait_on_page_writeback(page, type, true);
de->ino = cpu_to_le32(inode->i_ino);
set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
f2fs_put_page(page, 1);
}
@@ -315,7 +321,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
{
struct f2fs_inode *ri;
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
@@ -345,24 +351,14 @@ int update_dent_inode(struct inode *inode, struct inode *to,
void do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
- struct f2fs_dir_entry *de;
+ struct qstr dot = QSTR_INIT(".", 1);
+ struct qstr dotdot = QSTR_INIT("..", 2);
- de = &d->dentry[0];
- de->name_len = cpu_to_le16(1);
- de->hash_code = 0;
- de->ino = cpu_to_le32(inode->i_ino);
- memcpy(d->filename[0], ".", 1);
- set_de_type(de, inode->i_mode);
+ /* update dirent of "." */
+ f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
- de = &d->dentry[1];
- de->hash_code = 0;
- de->name_len = cpu_to_le16(2);
- de->ino = cpu_to_le32(parent->i_ino);
- memcpy(d->filename[1], "..", 2);
- set_de_type(de, parent->i_mode);
-
- test_and_set_bit_le(0, (void *)d->bitmap);
- test_and_set_bit_le(1, (void *)d->bitmap);
+ /* update dirent of ".." */
+ f2fs_update_dentry(parent->i_ino, parent->i_mode, d, &dotdot, 0, 1);
}
static int make_empty_dir(struct inode *inode,
@@ -392,32 +388,38 @@ static int make_empty_dir(struct inode *inode,
}
struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *name, struct page *dpage)
+ const struct qstr *new_name, const struct qstr *orig_name,
+ struct page *dpage)
{
struct page *page;
int err;
- if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (is_inode_flag_set(inode, FI_NEW_INODE)) {
page = new_inode_page(inode);
if (IS_ERR(page))
return page;
if (S_ISDIR(inode->i_mode)) {
+ /* in order to handle error case */
+ get_page(page);
err = make_empty_dir(inode, dir, page);
- if (err)
- goto error;
+ if (err) {
+ lock_page(page);
+ goto put_error;
+ }
+ put_page(page);
}
err = f2fs_init_acl(inode, dir, page, dpage);
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, name, page);
+ err = f2fs_init_security(inode, dir, orig_name, page);
if (err)
goto put_error;
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) {
- err = f2fs_inherit_context(dir, inode, page);
+ err = fscrypt_inherit_context(dir, inode, page, false);
if (err)
goto put_error;
}
@@ -429,14 +431,14 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
set_cold_node(inode, page);
}
- if (name)
- init_dent_inode(name, page);
+ if (new_name)
+ init_dent_inode(new_name, page);
/*
* This file should be checkpointed during fsync.
* We lost i_pino from now on.
*/
- if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
+ if (is_inode_flag_set(inode, FI_INC_LINK)) {
file_lost_pino(inode);
/*
* If link the tmpfile to alias through linkat path,
@@ -444,41 +446,33 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
*/
if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
- inc_nlink(inode);
+ f2fs_i_links_write(inode, true);
}
return page;
put_error:
+ clear_nlink(inode);
+ update_inode(inode, page);
f2fs_put_page(page, 1);
-error:
- /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
- truncate_inode_pages(&inode->i_data, 0);
- truncate_blocks(inode, 0, false);
- remove_dirty_dir_inode(inode);
- remove_inode_page(inode);
return ERR_PTR(err);
}
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
- if (S_ISDIR(inode->i_mode)) {
- inc_nlink(dir);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, true);
+ clear_inode_flag(inode, FI_NEW_INODE);
}
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
- if (F2FS_I(dir)->i_current_depth != current_depth) {
- F2FS_I(dir)->i_current_depth = current_depth;
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
+ if (F2FS_I(dir)->i_current_depth != current_depth)
+ f2fs_i_depth_write(dir, current_depth);
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ if (inode && is_inode_flag_set(inode, FI_INC_LINK))
+ clear_inode_flag(inode, FI_INC_LINK);
}
int room_for_filename(const void *bitmap, int slots, int max_slots)
@@ -515,15 +509,16 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
memcpy(d->filename[bit_pos], name->name, name->len);
de->ino = cpu_to_le32(ino);
set_de_type(de, mode);
- for (i = 0; i < slots; i++)
- test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
+ for (i = 0; i < slots; i++) {
+ __set_bit_le(bit_pos + i, (void *)d->bitmap);
+ /* avoid wrong garbage data for readdir */
+ if (i)
+ (de + i)->name_len = 0;
+ }
}
-/*
- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
- * f2fs_unlock_op().
- */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
@@ -536,28 +531,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
struct page *page = NULL;
- struct f2fs_filename fname;
- struct qstr new_name;
- int slots, err;
-
- err = f2fs_fname_setup_filename(dir, name, 0, &fname);
- if (err)
- return err;
-
- new_name.name = fname_name(&fname);
- new_name.len = fname_len(&fname);
-
- if (f2fs_has_inline_dentry(dir)) {
- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
- if (!err || err != -EAGAIN)
- goto out;
- else
- err = 0;
- }
+ int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name.len);
- dentry_hash = f2fs_dentry_hash(&new_name, NULL);
+ slots = GET_DENTRY_SLOTS(new_name->len);
+ dentry_hash = f2fs_dentry_hash(new_name);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -566,10 +544,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
}
start:
- if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
- err = -ENOSPC;
- goto out;
- }
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH))
+ return -ENOSPC;
+#endif
+ if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
+ return -ENOSPC;
/* Increase the depth, if required */
if (level == current_depth)
@@ -583,10 +563,8 @@ start:
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
- goto out;
- }
+ if (IS_ERR(dentry_page))
+ return PTR_ERR(dentry_page);
dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
@@ -602,11 +580,12 @@ start:
++level;
goto start;
add_dentry:
- f2fs_wait_on_page_writeback(dentry_page, DATA);
+ f2fs_wait_on_page_writeback(dentry_page, DATA, true);
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, &new_name, NULL);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -616,14 +595,12 @@ add_dentry:
}
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
- f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
if (inode) {
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -632,14 +609,49 @@ fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode_page(dir);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
-out:
- f2fs_fname_free_filename(&fname);
+
+ return err;
+}
+
+int __f2fs_do_add_link(struct inode *dir, struct fscrypt_name *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct qstr new_name;
+ int err = -EAGAIN;
+
+ new_name.name = fname_name(fname);
+ new_name.len = fname_len(fname);
+
+ if (f2fs_has_inline_dentry(dir))
+ err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+ if (err == -EAGAIN)
+ err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
+ inode, ino, mode);
+
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+ return err;
+}
+
+/*
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ */
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct fscrypt_name fname;
+ int err;
+
+ err = fscrypt_setup_filename(dir, name, 0, &fname);
+ if (err)
+ return err;
+
+ err = __f2fs_do_add_link(dir, &fname, inode, ino, mode);
+
+ fscrypt_free_filename(&fname);
return err;
}
@@ -649,46 +661,39 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, NULL, NULL);
+ page = init_inode_metadata(inode, dir, NULL, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
- /* we don't need to mark_inode_dirty now */
- update_inode(inode, page);
f2fs_put_page(page, 1);
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ clear_inode_flag(inode, FI_NEW_INODE);
fail:
up_write(&F2FS_I(inode)->i_sem);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
-void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page)
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem);
- if (S_ISDIR(inode->i_mode)) {
- drop_nlink(dir);
- if (page)
- update_inode(dir, page);
- else
- update_inode_page(dir);
- }
- inode->i_ctime = CURRENT_TIME;
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, false);
+ inode->i_ctime = current_time(inode);
- drop_nlink(inode);
+ f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
- drop_nlink(inode);
- i_size_write(inode, 0);
+ f2fs_i_links_write(inode, false);
+ f2fs_i_size_write(inode, 0);
}
up_write(&F2FS_I(inode)->i_sem);
- update_inode_page(inode);
if (inode->i_nlink == 0)
- add_orphan_inode(sbi, inode->i_ino);
+ add_orphan_inode(inode);
else
release_orphan_inode(sbi);
}
@@ -705,11 +710,13 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
int i;
+ f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
+
if (f2fs_has_inline_dentry(dir))
return f2fs_delete_inline_entry(dentry, page, dir, inode);
lock_page(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
dentry_blk = page_address(page);
bit_pos = dentry - dentry_blk->dentry;
@@ -723,10 +730,11 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
kunmap(page); /* kunmap - pair of f2fs_find_entry */
set_page_dirty(page);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, NULL);
+ f2fs_drop_nlink(dir, inode);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
@@ -734,6 +742,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
ClearPagePrivate(page);
ClearPageUptodate(page);
inode_dec_dirty_pages(dir);
+ remove_dirty_inode(dir);
}
f2fs_put_page(page, 1);
}
@@ -776,13 +785,13 @@ bool f2fs_empty_dir(struct inode *dir)
return true;
}
-bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
- unsigned int start_pos, struct f2fs_str *fstr)
+int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
+ unsigned int start_pos, struct fscrypt_str *fstr)
{
unsigned char d_type = DT_UNKNOWN;
unsigned int bit_pos;
struct f2fs_dir_entry *de = NULL;
- struct f2fs_str de_name = FSTR_INIT(NULL, 0);
+ struct fscrypt_str de_name = FSTR_INIT(NULL, 0);
bit_pos = ((unsigned long)ctx->pos % d->max);
@@ -792,29 +801,26 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
break;
de = &d->dentry[bit_pos];
- if (de->file_type < F2FS_FT_MAX)
- d_type = f2fs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
+ if (de->name_len == 0) {
+ bit_pos++;
+ ctx->pos = start_pos + bit_pos;
+ continue;
+ }
+
+ d_type = get_de_type(de);
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
if (f2fs_encrypted_inode(d->inode)) {
int save_len = fstr->len;
- int ret;
+ int err;
- de_name.name = kmalloc(de_name.len, GFP_NOFS);
- if (!de_name.name)
- return false;
-
- memcpy(de_name.name, d->filename[bit_pos], de_name.len);
-
- ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
- &de_name, fstr);
- kfree(de_name.name);
- if (ret < 0)
- return true;
+ err = fscrypt_fname_disk_to_usr(d->inode,
+ (u32)de->hash_code, 0,
+ &de_name, fstr);
+ if (err)
+ return err;
de_name = *fstr;
fstr->len = save_len;
@@ -822,12 +828,12 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
if (!dir_emit(ctx, de_name.name, de_name.len,
le32_to_cpu(de->ino), d_type))
- return true;
+ return 1;
bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
ctx->pos = start_pos + bit_pos;
}
- return false;
+ return 0;
}
static int f2fs_readdir(struct file *file, struct dir_context *ctx)
@@ -839,16 +845,15 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
struct file_ra_state *ra = &file->f_ra;
unsigned int n = ((unsigned long)ctx->pos / NR_DENTRY_IN_BLOCK);
struct f2fs_dentry_ptr d;
- struct f2fs_str fstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
int err = 0;
if (f2fs_encrypted_inode(inode)) {
- err = f2fs_get_encryption_info(inode);
- if (err)
+ err = fscrypt_get_encryption_info(inode);
+ if (err && err != -ENOKEY)
return err;
- err = f2fs_fname_crypto_alloc_buffer(inode, F2FS_NAME_LEN,
- &fstr);
+ err = fscrypt_fname_alloc_buffer(inode, F2FS_NAME_LEN, &fstr);
if (err < 0)
return err;
}
@@ -865,29 +870,42 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx)
for (; n < npages; n++) {
dentry_page = get_lock_data_page(inode, n, false);
- if (IS_ERR(dentry_page))
- continue;
+ if (IS_ERR(dentry_page)) {
+ err = PTR_ERR(dentry_page);
+ if (err == -ENOENT) {
+ err = 0;
+ continue;
+ } else {
+ goto out;
+ }
+ }
dentry_blk = kmap(dentry_page);
make_dentry_ptr(inode, &d, (void *)dentry_blk, 1);
- if (f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr))
- goto stop;
+ err = f2fs_fill_dentries(ctx, &d,
+ n * NR_DENTRY_IN_BLOCK, &fstr);
+ if (err) {
+ kunmap(dentry_page);
+ f2fs_put_page(dentry_page, 1);
+ break;
+ }
ctx->pos = (n + 1) * NR_DENTRY_IN_BLOCK;
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
- dentry_page = NULL;
- }
-stop:
- if (dentry_page && !IS_ERR(dentry_page)) {
- kunmap(dentry_page);
- f2fs_put_page(dentry_page, 1);
}
out:
- f2fs_fname_crypto_free_buffer(&fstr);
- return err;
+ fscrypt_fname_free_buffer(&fstr);
+ return err < 0 ? err : 0;
+}
+
+static int f2fs_dir_open(struct inode *inode, struct file *filp)
+{
+ if (f2fs_encrypted_inode(inode))
+ return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
+ return 0;
}
const struct file_operations f2fs_dir_operations = {
@@ -895,6 +913,7 @@ const struct file_operations f2fs_dir_operations = {
.read = generic_read_dir,
.iterate = f2fs_readdir,
.fsync = f2fs_sync_file,
+ .open = f2fs_dir_open,
.unlocked_ioctl = f2fs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = f2fs_compat_ioctl,
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 7ddba812e11b..4db44da7ef69 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -33,10 +33,11 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
en->ei = *ei;
INIT_LIST_HEAD(&en->list);
+ en->et = et;
rb_link_node(&en->rb_node, parent, p);
rb_insert_color(&en->rb_node, &et->root);
- et->count++;
+ atomic_inc(&et->node_cnt);
atomic_inc(&sbi->total_ext_node);
return en;
}
@@ -45,11 +46,29 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
struct extent_tree *et, struct extent_node *en)
{
rb_erase(&en->rb_node, &et->root);
- et->count--;
+ atomic_dec(&et->node_cnt);
atomic_dec(&sbi->total_ext_node);
if (et->cached_en == en)
et->cached_en = NULL;
+ kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+ struct extent_tree *et, struct extent_node *en)
+{
+ spin_lock(&sbi->extent_lock);
+ f2fs_bug_on(sbi, list_empty(&en->list));
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
+
+ __detach_extent_node(sbi, et, en);
}
static struct extent_tree *__grab_extent_tree(struct inode *inode)
@@ -68,11 +87,13 @@ static struct extent_tree *__grab_extent_tree(struct inode *inode)
et->root = RB_ROOT;
et->cached_en = NULL;
rwlock_init(&et->lock);
- atomic_set(&et->refcount, 0);
- et->count = 0;
- sbi->total_ext_tree++;
+ INIT_LIST_HEAD(&et->list);
+ atomic_set(&et->node_cnt, 0);
+ atomic_inc(&sbi->total_ext_tree);
+ } else {
+ atomic_dec(&sbi->total_zombie_tree);
+ list_del_init(&et->list);
}
- atomic_inc(&et->refcount);
up_write(&sbi->extent_tree_lock);
/* never died until evict_inode */
@@ -127,32 +148,21 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
}
static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
- struct extent_tree *et, bool free_all)
+ struct extent_tree *et)
{
struct rb_node *node, *next;
struct extent_node *en;
- unsigned int count = et->count;
+ unsigned int count = atomic_read(&et->node_cnt);
node = rb_first(&et->root);
while (node) {
next = rb_next(node);
en = rb_entry(node, struct extent_node, rb_node);
-
- if (free_all) {
- spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
- list_del_init(&en->list);
- spin_unlock(&sbi->extent_lock);
- }
-
- if (free_all || list_empty(&en->list)) {
- __detach_extent_node(sbi, et, en);
- kmem_cache_free(extent_node_slab, en);
- }
+ __release_extent_node(sbi, et, en);
node = next;
}
- return count - et->count;
+ return count - atomic_read(&et->node_cnt);
}
static void __drop_largest_extent(struct inode *inode,
@@ -160,38 +170,38 @@ static void __drop_largest_extent(struct inode *inode,
{
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
+ if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
-void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
-{
- if (!f2fs_may_extent_tree(inode))
- return;
-
- __drop_largest_extent(inode, fofs, 1);
-}
-
-void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+/* return true, if inode page is changed */
+bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_tree *et;
struct extent_node *en;
struct extent_info ei;
- if (!f2fs_may_extent_tree(inode))
- return;
+ if (!f2fs_may_extent_tree(inode)) {
+ /* drop largest extent */
+ if (i_ext && i_ext->len) {
+ i_ext->len = 0;
+ return true;
+ }
+ return false;
+ }
et = __grab_extent_tree(inode);
- if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
- return;
+ if (!i_ext || !i_ext->len)
+ return false;
- set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
- le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+ get_extent_info(&ei, i_ext);
write_lock(&et->lock);
- if (et->count)
+ if (atomic_read(&et->node_cnt))
goto out;
en = __init_extent_tree(sbi, et, &ei);
@@ -202,6 +212,7 @@ void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
}
out:
write_unlock(&et->lock);
+ return false;
}
static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
@@ -230,9 +241,10 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
if (en) {
*ei = en->ei;
spin_lock(&sbi->extent_lock);
- if (!list_empty(&en->list))
+ if (!list_empty(&en->list)) {
list_move_tail(&en->list, &sbi->extent_list);
- et->cached_en = en;
+ et->cached_en = en;
+ }
spin_unlock(&sbi->extent_lock);
ret = true;
}
@@ -325,12 +337,12 @@ lookup_neighbors:
return en;
}
-static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
- struct extent_node **den,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
@@ -340,28 +352,34 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
}
if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
- if (en) {
- __detach_extent_node(sbi, et, prev_ex);
- *den = prev_ex;
- }
+ if (en)
+ __release_extent_node(sbi, et, prev_ex);
next_ex->ei.fofs = ei->fofs;
next_ex->ei.blk = ei->blk;
next_ex->ei.len += ei->len;
en = next_ex;
}
- if (en) {
- __try_update_largest_extent(et, en);
+ if (!en)
+ return NULL;
+
+ __try_update_largest_extent(inode, et, en);
+
+ spin_lock(&sbi->extent_lock);
+ if (!list_empty(&en->list)) {
+ list_move_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
}
+ spin_unlock(&sbi->extent_lock);
return en;
}
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+static struct extent_node *__insert_extent_tree(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
struct rb_node *insert_parent)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p = &et->root.rb_node;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -388,8 +406,13 @@ do_insert:
if (!en)
return NULL;
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
+
+ /* update in global extent list */
+ spin_lock(&sbi->extent_lock);
+ list_add_tail(&en->list, &sbi->extent_list);
et->cached_en = en;
+ spin_unlock(&sbi->extent_lock);
return en;
}
@@ -412,7 +435,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
write_lock(&et->lock);
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
write_unlock(&et->lock);
return false;
}
@@ -454,7 +477,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(sbi, et, &ei,
+ en1 = __insert_extent_tree(inode, et, &ei,
NULL, NULL);
next_en = en1;
} else {
@@ -475,9 +498,9 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
}
if (parts)
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
else
- __detach_extent_node(sbi, et, en);
+ __release_extent_node(sbi, et, en);
/*
* if original extent is split into zero or two parts, extent
@@ -488,58 +511,28 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
insert_p = NULL;
insert_parent = NULL;
}
-
- /* update in global extent list */
- spin_lock(&sbi->extent_lock);
- if (!parts && !list_empty(&en->list))
- list_del(&en->list);
- if (en1)
- list_add_tail(&en1->list, &sbi->extent_list);
- spin_unlock(&sbi->extent_lock);
-
- /* release extent node */
- if (!parts)
- kmem_cache_free(extent_node_slab, en);
-
en = next_en;
}
/* 3. update extent in extent cache */
if (blkaddr) {
- struct extent_node *den = NULL;
set_extent_info(&ei, fofs, blkaddr, len);
- en1 = __try_merge_extent_node(sbi, et, &ei, &den,
- prev_en, next_en);
- if (!en1)
- en1 = __insert_extent_tree(sbi, et, &ei,
+ if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
+ __insert_extent_tree(inode, et, &ei,
insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- et->largest.len = 0;
- set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
- }
-
- spin_lock(&sbi->extent_lock);
- if (en1) {
- if (list_empty(&en1->list))
- list_add_tail(&en1->list, &sbi->extent_list);
- else
- list_move_tail(&en1->list, &sbi->extent_list);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ set_inode_flag(inode, FI_NO_EXTENT);
}
- if (den && !list_empty(&den->list))
- list_del(&den->list);
- spin_unlock(&sbi->extent_lock);
-
- if (den)
- kmem_cache_free(extent_node_slab, den);
}
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
- __free_extent_tree(sbi, et, true);
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
+ __free_extent_tree(sbi, et);
write_unlock(&et->lock);
@@ -548,46 +541,42 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
- struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
- struct extent_node *en, *tmp;
- unsigned long ino = F2FS_ROOT_INO(sbi);
- struct radix_tree_root *root = &sbi->extent_tree_root;
- unsigned int found;
+ struct extent_tree *et, *next;
+ struct extent_node *en;
unsigned int node_cnt = 0, tree_cnt = 0;
int remained;
if (!test_opt(sbi, EXTENT_CACHE))
return 0;
+ if (!atomic_read(&sbi->total_zombie_tree))
+ goto free_node;
+
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
/* 1. remove unreferenced extent tree */
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
-
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
-
- if (!atomic_read(&et->refcount)) {
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, true);
- write_unlock(&et->lock);
-
- radix_tree_delete(root, et->ino);
- kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
- tree_cnt++;
-
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
+ if (atomic_read(&et->node_cnt)) {
+ write_lock(&et->lock);
+ node_cnt += __free_extent_tree(sbi, et);
+ write_unlock(&et->lock);
}
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
+ list_del_init(&et->list);
+ radix_tree_delete(&sbi->extent_tree_root, et->ino);
+ kmem_cache_free(extent_tree_slab, et);
+ atomic_dec(&sbi->total_ext_tree);
+ atomic_dec(&sbi->total_zombie_tree);
+ tree_cnt++;
+
+ if (node_cnt + tree_cnt >= nr_shrink)
+ goto unlock_out;
+ cond_resched();
}
up_write(&sbi->extent_tree_lock);
+free_node:
/* 2. remove LRU extent entries */
if (!down_write_trylock(&sbi->extent_tree_lock))
goto out;
@@ -595,34 +584,29 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
remained = nr_shrink - (node_cnt + tree_cnt);
spin_lock(&sbi->extent_lock);
- list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
- if (!remained--)
+ for (; remained > 0; remained--) {
+ if (list_empty(&sbi->extent_list))
break;
- list_del_init(&en->list);
- }
- spin_unlock(&sbi->extent_lock);
-
- /*
- * reset ino for searching victims from beginning of global extent tree.
- */
- ino = F2FS_ROOT_INO(sbi);
-
- while ((found = radix_tree_gang_lookup(root,
- (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
- unsigned i;
+ en = list_first_entry(&sbi->extent_list,
+ struct extent_node, list);
+ et = en->et;
+ if (!write_trylock(&et->lock)) {
+ /* refresh this extent node's position in extent list */
+ list_move_tail(&en->list, &sbi->extent_list);
+ continue;
+ }
- ino = treevec[found - 1]->ino + 1;
- for (i = 0; i < found; i++) {
- struct extent_tree *et = treevec[i];
+ list_del_init(&en->list);
+ spin_unlock(&sbi->extent_lock);
- write_lock(&et->lock);
- node_cnt += __free_extent_tree(sbi, et, false);
- write_unlock(&et->lock);
+ __detach_extent_node(sbi, et, en);
- if (node_cnt + tree_cnt >= nr_shrink)
- goto unlock_out;
- }
+ write_unlock(&et->lock);
+ node_cnt++;
+ spin_lock(&sbi->extent_lock);
}
+ spin_unlock(&sbi->extent_lock);
+
unlock_out:
up_write(&sbi->extent_tree_lock);
out:
@@ -637,16 +621,29 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
struct extent_tree *et = F2FS_I(inode)->extent_tree;
unsigned int node_cnt = 0;
- if (!et)
+ if (!et || !atomic_read(&et->node_cnt))
return 0;
write_lock(&et->lock);
- node_cnt = __free_extent_tree(sbi, et, true);
+ node_cnt = __free_extent_tree(sbi, et);
write_unlock(&et->lock);
return node_cnt;
}
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ write_lock(&et->lock);
+ __free_extent_tree(sbi, et);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ write_unlock(&et->lock);
+}
+
void f2fs_destroy_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -656,8 +653,12 @@ void f2fs_destroy_extent_tree(struct inode *inode)
if (!et)
return;
- if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
- atomic_dec(&et->refcount);
+ if (inode->i_nlink && !is_bad_inode(inode) &&
+ atomic_read(&et->node_cnt)) {
+ down_write(&sbi->extent_tree_lock);
+ list_add_tail(&et->list, &sbi->zombie_list);
+ atomic_inc(&sbi->total_zombie_tree);
+ up_write(&sbi->extent_tree_lock);
return;
}
@@ -666,11 +667,10 @@ void f2fs_destroy_extent_tree(struct inode *inode)
/* delete extent tree entry in radix tree */
down_write(&sbi->extent_tree_lock);
- atomic_dec(&et->refcount);
- f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+ f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
kmem_cache_free(extent_tree_slab, et);
- sbi->total_ext_tree--;
+ atomic_dec(&sbi->total_ext_tree);
up_write(&sbi->extent_tree_lock);
F2FS_I(inode)->extent_tree = NULL;
@@ -689,20 +689,20 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
void f2fs_update_extent_cache(struct dnode_of_data *dn)
{
- struct f2fs_inode_info *fi = F2FS_I(dn->inode);
pgoff_t fofs;
+ block_t blkaddr;
if (!f2fs_may_extent_tree(dn->inode))
return;
- f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
- dn->ofs_in_node;
+ if (dn->data_blkaddr == NEW_ADDR)
+ blkaddr = NULL_ADDR;
+ else
+ blkaddr = dn->data_blkaddr;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
- sync_inode_page(dn);
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+ dn->ofs_in_node;
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
}
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
@@ -712,8 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
if (!f2fs_may_extent_tree(dn->inode))
return;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
- sync_inode_page(dn);
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
}
void init_extent_cache_info(struct f2fs_sb_info *sbi)
@@ -722,7 +721,9 @@ void init_extent_cache_info(struct f2fs_sb_info *sbi)
init_rwsem(&sbi->extent_tree_lock);
INIT_LIST_HEAD(&sbi->extent_list);
spin_lock_init(&sbi->extent_lock);
- sbi->total_ext_tree = 0;
+ atomic_set(&sbi->total_ext_tree, 0);
+ INIT_LIST_HEAD(&sbi->zombie_list);
+ atomic_set(&sbi->total_zombie_tree, 0);
atomic_set(&sbi->total_ext_node, 0);
}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9dfbfe6dc775..eb01a0329e90 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -21,10 +21,12 @@
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/fscrypto.h>
+#include <crypto/hash.h>
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
-#define f2fs_down_write(x, y) down_write_nest_lock(x, y)
#else
#define f2fs_bug_on(sbi, condition) \
do { \
@@ -33,7 +35,30 @@
set_sbi_flag(sbi, SBI_NEED_FSCK); \
} \
} while (0)
-#define f2fs_down_write(x, y) down_write(x)
+#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+enum {
+ FAULT_KMALLOC,
+ FAULT_PAGE_ALLOC,
+ FAULT_ALLOC_NID,
+ FAULT_ORPHAN,
+ FAULT_BLOCK,
+ FAULT_DIR_DEPTH,
+ FAULT_EVICT_INODE,
+ FAULT_IO,
+ FAULT_CHECKPOINT,
+ FAULT_MAX,
+};
+
+struct f2fs_fault_info {
+ atomic_t inject_ops;
+ unsigned int inject_rate;
+ unsigned int inject_type;
+};
+
+extern char *fault_name[FAULT_MAX];
+#define IS_FAULT_SET(fi, type) (fi->inject_type & (1 << (type)))
#endif
/*
@@ -54,6 +79,10 @@
#define F2FS_MOUNT_FASTBOOT 0x00001000
#define F2FS_MOUNT_EXTENT_CACHE 0x00002000
#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
+#define F2FS_MOUNT_DATA_FLUSH 0x00008000
+#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
+#define F2FS_MOUNT_ADAPTIVE 0x00020000
+#define F2FS_MOUNT_LFS 0x00040000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -74,6 +103,7 @@ struct f2fs_mount_info {
};
#define F2FS_FEATURE_ENCRYPT 0x0001
+#define F2FS_FEATURE_BLKZONED 0x0002
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -82,25 +112,52 @@ struct f2fs_mount_info {
#define F2FS_CLEAR_FEATURE(sb, mask) \
F2FS_SB(sb)->raw_super->feature &= ~cpu_to_le32(mask)
-#define CRCPOLY_LE 0xedb88320
-
-static inline __u32 f2fs_crc32(void *buf, size_t len)
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: wait queue head
+ *
+ * Returns true if wq has waiting processes
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_sleeper(wait_queue_head_t *wq)
{
- unsigned char *p = (unsigned char *)buf;
- __u32 crc = F2FS_SUPER_MAGIC;
- int i;
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier should be paired with one on the
+ * waiting side.
+ */
+ smp_mb();
+ return waitqueue_active(wq);
+}
- while (len--) {
- crc ^= *p++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
- }
- return crc;
+static inline void inode_nohighmem(struct inode *inode)
+{
+ mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
}
-static inline bool f2fs_crc_valid(__u32 blk_crc, void *buf, size_t buf_size)
+/**
+ * current_time - Return FS time
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs.
+ *
+ * Note that inode and inode->sb cannot be NULL.
+ * Otherwise, the function warns and returns time without truncation.
+ */
+static inline struct timespec current_time(struct inode *inode)
{
- return f2fs_crc32(buf, buf_size) == blk_crc;
+ struct timespec now = current_kernel_time();
+
+ if (unlikely(!inode->i_sb)) {
+ WARN(1, "current_time() called with uninitialized super_block in the inode");
+ return now;
+ }
+
+ return timespec_trunc(now, inode->i_sb->s_time_gran);
}
/*
@@ -119,12 +176,13 @@ enum {
CP_DISCARD,
};
-#define DEF_BATCHED_TRIM_SECTIONS 32
+#define DEF_BATCHED_TRIM_SECTIONS 2
#define BATCHED_TRIM_SEGMENTS(sbi) \
(SM_I(sbi)->trim_sections * (sbi)->segs_per_sec)
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define DEF_CP_INTERVAL 60 /* 60 secs */
+#define DEF_IDLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
@@ -158,13 +216,7 @@ struct ino_entry {
nid_t ino; /* inode number */
};
-/*
- * for the list of directory inodes or gc inodes.
- * NOTE: there are two slab users for this structure, if we add/modify/delete
- * fields in structure for one of slab users, it may affect fields or size of
- * other one, in this condition, it's better to split both of slab and related
- * data structure.
- */
+/* for the list of inodes to be GCed */
struct inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
@@ -177,46 +229,52 @@ struct discard_entry {
int len; /* # of consecutive blocks of the discard */
};
+struct bio_entry {
+ struct list_head list;
+ struct bio *bio;
+ struct completion event;
+ int error;
+};
+
/* for the list of fsync inodes, used only during recovery */
struct fsync_inode_entry {
struct list_head list; /* list head */
struct inode *inode; /* vfs inode pointer */
block_t blkaddr; /* block address locating the last fsync */
block_t last_dentry; /* block address locating the last dentry */
- block_t last_inode; /* block address locating the last inode */
};
-#define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
-#define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
+#define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats))
+#define sits_in_cursum(jnl) (le16_to_cpu(jnl->n_sits))
-#define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
-#define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
-#define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
-#define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
+#define nat_in_journal(jnl, i) (jnl->nat_j.entries[i].ne)
+#define nid_in_journal(jnl, i) (jnl->nat_j.entries[i].nid)
+#define sit_in_journal(jnl, i) (jnl->sit_j.entries[i].se)
+#define segno_in_journal(jnl, i) (jnl->sit_j.entries[i].segno)
-#define MAX_NAT_JENTRIES(sum) (NAT_JOURNAL_ENTRIES - nats_in_cursum(sum))
-#define MAX_SIT_JENTRIES(sum) (SIT_JOURNAL_ENTRIES - sits_in_cursum(sum))
+#define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
+#define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
-static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = nats_in_cursum(rs);
- rs->n_nats = cpu_to_le16(before + i);
+ int before = nats_in_cursum(journal);
+ journal->n_nats = cpu_to_le16(before + i);
return before;
}
-static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
+static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
{
- int before = sits_in_cursum(rs);
- rs->n_sits = cpu_to_le16(before + i);
+ int before = sits_in_cursum(journal);
+ journal->n_sits = cpu_to_le16(before + i);
return before;
}
-static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
- int type)
+static inline bool __has_cursum_space(struct f2fs_journal *journal,
+ int size, int type)
{
if (type == NAT_JOURNAL)
- return size <= MAX_NAT_JENTRIES(sum);
- return size <= MAX_SIT_JENTRIES(sum);
+ return size <= MAX_NAT_JENTRIES(journal);
+ return size <= MAX_SIT_JENTRIES(journal);
}
/*
@@ -234,13 +292,13 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
+#define F2FS_IOC_DEFRAGMENT _IO(F2FS_IOCTL_MAGIC, 8)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
-#define F2FS_IOC_SET_ENCRYPTION_POLICY \
- _IOR('f', 19, struct f2fs_encryption_policy)
-#define F2FS_IOC_GET_ENCRYPTION_PWSALT \
- _IOW('f', 20, __u8[16])
-#define F2FS_IOC_GET_ENCRYPTION_POLICY \
- _IOW('f', 21, struct f2fs_encryption_policy)
+#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
+#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
/*
* should be same as XFS_IOC_GOINGDOWN.
@@ -256,33 +314,27 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size,
/*
* ioctl commands in 32 bit emulation
*/
-#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
-#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
+#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
+#define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION
#endif
-/*
- * For INODE and NODE manager
- */
-/* for directory operations */
-struct f2fs_str {
- unsigned char *name;
- u32 len;
+struct f2fs_defragment {
+ u64 start;
+ u64 len;
};
-struct f2fs_filename {
- const struct qstr *usr_fname;
- struct f2fs_str disk_name;
- f2fs_hash_t hash;
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_str crypto_buf;
-#endif
+struct f2fs_move_range {
+ u32 dst_fd; /* destination fd */
+ u64 pos_in; /* start position in src_fd */
+ u64 pos_out; /* start position in dst_fd */
+ u64 len; /* size to move */
};
-#define FSTR_INIT(n, l) { .name = n, .len = l }
-#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
-#define fname_name(p) ((p)->disk_name.name)
-#define fname_len(p) ((p)->disk_name.len)
-
+/*
+ * For INODE and NODE manager
+ */
+/* for directory operations */
struct f2fs_dentry_ptr {
struct inode *inode;
const void *bitmap;
@@ -350,6 +402,7 @@ struct extent_node {
struct rb_node rb_node; /* rb node located in rb-tree */
struct list_head list; /* node in global extent list of sbi */
struct extent_info ei; /* extent info */
+ struct extent_tree *et; /* extent tree pointer */
};
struct extent_tree {
@@ -357,9 +410,9 @@ struct extent_tree {
struct rb_root root; /* root of extent info rb-tree */
struct extent_node *cached_en; /* recently accessed extent node */
struct extent_info largest; /* largested extent info */
+ struct list_head list; /* to be used by sbi->zombie_list */
rwlock_t lock; /* protect extent info rb-tree */
- atomic_t refcount; /* reference count of rb-tree */
- unsigned int count; /* # of extent node in rb-tree*/
+ atomic_t node_cnt; /* # of extent node in rb-tree*/
};
/*
@@ -378,6 +431,7 @@ struct f2fs_map_blocks {
block_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
+ pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
};
/* for flag in get_data_block */
@@ -385,6 +439,8 @@ struct f2fs_map_blocks {
#define F2FS_GET_BLOCK_DIO 1
#define F2FS_GET_BLOCK_FIEMAP 2
#define F2FS_GET_BLOCK_BMAP 3
+#define F2FS_GET_BLOCK_PRE_DIO 4
+#define F2FS_GET_BLOCK_PRE_AIO 5
/*
* i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
@@ -393,6 +449,7 @@ struct f2fs_map_blocks {
#define FADVISE_LOST_PINO_BIT 0x02
#define FADVISE_ENCRYPT_BIT 0x04
#define FADVISE_ENC_NAME_BIT 0x08
+#define FADVISE_KEEP_SIZE_BIT 0x10
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
@@ -405,15 +462,8 @@ struct f2fs_map_blocks {
#define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
#define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
#define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
-
-/* Encryption algorithms */
-#define F2FS_ENCRYPTION_MODE_INVALID 0
-#define F2FS_ENCRYPTION_MODE_AES_256_XTS 1
-#define F2FS_ENCRYPTION_MODE_AES_256_GCM 2
-#define F2FS_ENCRYPTION_MODE_AES_256_CBC 3
-#define F2FS_ENCRYPTION_MODE_AES_256_CTS 4
-
-#include "f2fs_crypto.h"
+#define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
+#define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
#define DEF_DIR_LEVEL 0
@@ -434,25 +484,22 @@ struct f2fs_inode_info {
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */
- struct inode_entry *dirty_dir; /* the pointer of dirty dir */
+ loff_t last_disk_size; /* lastly written file size */
+ struct list_head dirty_list; /* dirty list for dirs and files */
+ struct list_head gdirty_list; /* linked in global dirty list */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct mutex inmem_lock; /* lock for inmemory pages */
-
struct extent_tree *extent_tree; /* cached extent_tree entry */
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- /* Encryption params */
- struct f2fs_crypt_info *i_crypt_info;
-#endif
+ struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
};
static inline void get_extent_info(struct extent_info *ext,
- struct f2fs_extent i_ext)
+ struct f2fs_extent *i_ext)
{
- ext->fofs = le32_to_cpu(i_ext.fofs);
- ext->blk = le32_to_cpu(i_ext.blk);
- ext->len = le32_to_cpu(i_ext.len);
+ ext->fofs = le32_to_cpu(i_ext->fofs);
+ ext->blk = le32_to_cpu(i_ext->blk);
+ ext->len = le32_to_cpu(i_ext->len);
}
static inline void set_raw_extent(struct extent_info *ext,
@@ -497,20 +544,30 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
return __is_extent_mergeable(cur, front);
}
-static inline void __try_update_largest_extent(struct extent_tree *et,
- struct extent_node *en)
+extern void f2fs_mark_inode_dirty_sync(struct inode *, bool);
+static inline void __try_update_largest_extent(struct inode *inode,
+ struct extent_tree *et, struct extent_node *en)
{
- if (en->ei.len > et->largest.len)
+ if (en->ei.len > et->largest.len) {
et->largest = en->ei;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
}
+enum nid_list {
+ FREE_NID_LIST,
+ ALLOC_NID_LIST,
+ MAX_NID_LIST,
+};
+
struct f2fs_nm_info {
block_t nat_blkaddr; /* base disk address of NAT */
nid_t max_nid; /* maximum possible node ids */
- nid_t available_nids; /* maximum available node ids */
+ nid_t available_nids; /* # of available node ids */
nid_t next_scan_nid; /* the next nid to be scanned */
unsigned int ram_thresh; /* control the memory footprint */
unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
+ unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
@@ -522,9 +579,9 @@ struct f2fs_nm_info {
/* free node ids management */
struct radix_tree_root free_nid_root;/* root of the free_nid cache */
- struct list_head free_nid_list; /* a list for free nids */
- spinlock_t free_nid_list_lock; /* protect free nid list */
- unsigned int fcnt; /* the number of free node id */
+ struct list_head nid_list[MAX_NID_LIST];/* lists for free nids */
+ unsigned int nid_cnt[MAX_NID_LIST]; /* the number of free node id */
+ spinlock_t nid_list_lock; /* protect nid lists ops */
struct mutex build_lock; /* lock for build free nids */
/* for checkpoint */
@@ -544,6 +601,9 @@ struct dnode_of_data {
nid_t nid; /* node id of the direct node block */
unsigned int ofs_in_node; /* data offset in the node page */
bool inode_page_locked; /* inode page is locked or not */
+ bool node_changed; /* is node block changed */
+ char cur_level; /* level of hole node page */
+ char max_level; /* level of current page located */
block_t data_blkaddr; /* block address of the node block */
};
@@ -582,7 +642,6 @@ enum {
CURSEG_WARM_NODE, /* direct node blocks of normal files */
CURSEG_COLD_NODE, /* indirect node blocks */
NO_CHECK_TYPE,
- CURSEG_DIRECT_IO, /* to use for the direct IO path */
};
struct flush_cmd {
@@ -594,6 +653,7 @@ struct flush_cmd {
struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
+ atomic_t submit_flush; /* # of issued flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
@@ -618,6 +678,7 @@ struct f2fs_sm_info {
/* for small discard management */
struct list_head discard_list; /* 4KB discard list */
+ struct list_head wait_list; /* linked with issued discard bio */
int nr_discards; /* # of discards in the list */
int max_discards; /* max. discards to be issued */
@@ -644,12 +705,16 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
+#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
- F2FS_WRITEBACK,
F2FS_DIRTY_DENTS,
+ F2FS_DIRTY_DATA,
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
+ F2FS_DIRTY_IMETA,
+ F2FS_WB_CP_DATA,
+ F2FS_WB_DATA,
NR_COUNT_TYPE,
};
@@ -673,6 +738,7 @@ enum page_type {
META_FLUSH,
INMEM, /* the below types are used by tracepoints only. */
INMEM_DROP,
+ INMEM_REVOKE,
IPU,
OPU,
};
@@ -681,7 +747,8 @@ struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
- block_t blk_addr; /* block address to be written */
+ block_t new_blkaddr; /* new block address to be written */
+ block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
};
@@ -695,6 +762,27 @@ struct f2fs_bio_info {
struct rw_semaphore io_rwsem; /* blocking op for bio */
};
+#define FDEV(i) (sbi->devs[i])
+#define RDEV(i) (raw_super->devs[i])
+struct f2fs_dev_info {
+ struct block_device *bdev;
+ char path[MAX_PATH_LEN];
+ unsigned int total_segments;
+ block_t start_blk;
+ block_t end_blk;
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int nr_blkz; /* Total number of zones */
+ u8 *blkz_type; /* Array of zones type */
+#endif
+};
+
+enum inode_type {
+ DIR_INODE, /* for dirty dir inode */
+ FILE_INODE, /* for dirty regular/symlink inode */
+ DIRTY_META, /* for all dirtied inode metadata */
+ NR_INODE_TYPE,
+};
+
/* for inner inode cache management */
struct inode_management {
struct radix_tree_root ino_root; /* ino entry array */
@@ -709,14 +797,36 @@ enum {
SBI_IS_CLOSE, /* specify unmounting */
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
SBI_POR_DOING, /* recovery is doing or not */
+ SBI_NEED_SB_WRITE, /* need to recover superblock */
+ SBI_NEED_CP, /* need to checkpoint */
};
+enum {
+ CP_TIME,
+ REQ_TIME,
+ MAX_TIME,
+};
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+#define F2FS_KEY_DESC_PREFIX "f2fs:"
+#define F2FS_KEY_DESC_PREFIX_SIZE 5
+#endif
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
struct proc_dir_entry *s_proc; /* proc entry */
- struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
- int s_flag; /* flags for sbi */
+ int valid_super_block; /* valid super block no */
+ unsigned long s_flag; /* flags for sbi */
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE];
+ u8 key_prefix_size;
+#endif
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ unsigned int blocks_per_blkz; /* F2FS blocks per zone */
+ unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
+#endif
/* for node-related operations */
struct f2fs_nm_info *nm_info; /* node manager */
@@ -728,32 +838,37 @@ struct f2fs_sb_info {
/* for bio operations */
struct f2fs_bio_info read_io; /* for read bios */
struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
+ struct mutex wio_mutex[NODE + 1]; /* bio ordering for NODE/DATA */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ int cur_cp_pack; /* remain current cp pack */
+ spinlock_t cp_lock; /* for flag in ckpt */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
- struct mutex writepages; /* mutex for writepages() */
wait_queue_head_t cp_wait;
- long cp_expires, cp_interval; /* next expected periodic cp */
+ unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
+ long interval_time[MAX_TIME]; /* to store thresholds */
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
- /* for directory inode management */
- struct list_head dir_inode_list; /* dir inode list */
- spinlock_t dir_inode_lock; /* for dir inode list lock */
+ /* for inode management */
+ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
+ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
/* for extent tree cache */
struct radix_tree_root extent_tree_root;/* cache extent cache entries */
struct rw_semaphore extent_tree_lock; /* locking extent radix tree */
struct list_head extent_list; /* lru list for shrinker */
spinlock_t extent_lock; /* locking extent lru list */
- int total_ext_tree; /* extent tree count */
+ atomic_t total_ext_tree; /* extent tree count */
+ struct list_head zombie_list; /* extent zombie tree list */
+ atomic_t total_zombie_tree; /* extent zombie tree count */
atomic_t total_ext_node; /* extent info count */
/* basic filesystem units */
@@ -770,17 +885,23 @@ struct f2fs_sb_info {
unsigned int total_sections; /* total section count */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
- unsigned int total_valid_inode_count; /* valid inode count */
+ loff_t max_file_blocks; /* max block index of file */
int active_logs; /* # of active logs */
int dir_level; /* directory level */
block_t user_block_count; /* # of user blocks */
block_t total_valid_block_count; /* # of valid blocks */
- block_t alloc_valid_block_count; /* # of allocated blocks */
block_t discard_blks; /* discard command candidats */
block_t last_valid_block_count; /* for recovery */
u32 s_next_generation; /* for NFS support */
- atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */
+
+ /* # of pages, see count_type */
+ atomic_t nr_pages[NR_COUNT_TYPE];
+ /* # of allocated blocks */
+ struct percpu_counter alloc_valid_block_count;
+
+ /* valid inode count */
+ struct percpu_counter total_valid_inode_count;
struct f2fs_mount_info mount_opt; /* mount options */
@@ -809,7 +930,7 @@ struct f2fs_sb_info {
atomic_t inline_inode; /* # of inline_data inodes */
atomic_t inline_dir; /* # of inline_dentry inodes */
int bg_gc; /* background gc calls */
- unsigned int n_dirty_dirs; /* # of dir inodes */
+ unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
#endif
unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
@@ -820,13 +941,106 @@ struct f2fs_sb_info {
/* For shrinker support */
struct list_head s_list;
+ int s_ndevs; /* number of devices */
+ struct f2fs_dev_info *devs; /* for device list */
struct mutex umount_mutex;
unsigned int shrinker_run_no;
+
+ /* For write statistics */
+ u64 sectors_written_start;
+ u64 kbytes_written;
+
+ /* Reference to checksum algorithm driver via cryptoapi */
+ struct crypto_shash *s_chksum_driver;
+
+ /* For fault injection */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info fault_info;
+#endif
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (!ffi->inject_rate)
+ return false;
+
+ if (!IS_FAULT_SET(ffi, type))
+ return false;
+
+ atomic_inc(&ffi->inject_ops);
+ if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ printk("%sF2FS-fs : inject %s in %pF\n",
+ KERN_INFO,
+ fault_name[type],
+ __builtin_return_address(0));
+ return true;
+ }
+ return false;
+}
+#endif
+
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s) \
+(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) - \
+ s->sectors_written_start) >> 1)
+
+static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
+{
+ sbi->last_time[type] = jiffies;
+}
+
+static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
+{
+ struct timespec ts = {sbi->interval_time[type], 0};
+ unsigned long interval = timespec_to_jiffies(&ts);
+
+ return time_after(jiffies, sbi->last_time[type] + interval);
+}
+
+static inline bool is_idle(struct f2fs_sb_info *sbi)
+{
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ struct request_list *rl = &q->root_rl;
+
+ if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
+ return 0;
+
+ return f2fs_time_over(sbi, REQ_TIME);
+}
+
/*
* Inline functions
*/
+static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
+ unsigned int length)
+{
+ SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver);
+ u32 *ctx = (u32 *)shash_desc_ctx(shash);
+ int err;
+
+ shash->tfm = sbi->s_chksum_driver;
+ shash->flags = 0;
+ *ctx = F2FS_SUPER_MAGIC;
+
+ err = crypto_shash_update(shash, address, length);
+ BUG_ON(err);
+
+ return *ctx;
+}
+
+static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
+ void *buf, size_t buf_size)
+{
+ return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
+}
+
static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
{
return container_of(inode, struct f2fs_inode_info, vfs_inode);
@@ -909,17 +1123,17 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
{
- return sbi->s_flag & (0x01 << type);
+ return test_bit(type, &sbi->s_flag);
}
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag |= (0x01 << type);
+ set_bit(type, &sbi->s_flag);
}
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
{
- sbi->s_flag &= ~(0x01 << type);
+ clear_bit(type, &sbi->s_flag);
}
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
@@ -927,26 +1141,50 @@ static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
return le64_to_cpu(cp->checkpoint_ver);
}
-static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+
return ckpt_flags & f;
}
-static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
+}
+
+static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags |= f;
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
{
- unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
+ spin_lock(&sbi->cp_lock);
+ __set_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
+static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
+{
+ unsigned int ckpt_flags;
+
+ ckpt_flags = le32_to_cpu(cp->ckpt_flags);
ckpt_flags &= (~f);
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
+static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
+{
+ spin_lock(&sbi->cp_lock);
+ __clear_ckpt_flags(F2FS_CKPT(sbi), f);
+ spin_unlock(&sbi->cp_lock);
+}
+
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
down_read(&sbi->cp_rwsem);
@@ -959,7 +1197,7 @@ static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex);
+ down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
@@ -985,8 +1223,8 @@ static inline bool __remain_node_summaries(int reason)
static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
{
- return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) ||
- is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG));
+ return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
+ is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
}
/*
@@ -1019,22 +1257,37 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
+static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
- struct inode *inode, blkcnt_t count)
+ struct inode *inode, blkcnt_t *count)
{
- block_t valid_block_count;
+ blkcnt_t diff;
- spin_lock(&sbi->stat_lock);
- valid_block_count =
- sbi->total_valid_block_count + (block_t)count;
- if (unlikely(valid_block_count > sbi->user_block_count)) {
- spin_unlock(&sbi->stat_lock);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_BLOCK))
return false;
+#endif
+ /*
+ * let's increase this in prior to actual block count change in order
+ * for f2fs_sync_file to avoid data races when deciding checkpoint.
+ */
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+
+ spin_lock(&sbi->stat_lock);
+ sbi->total_valid_block_count += (block_t)(*count);
+ if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
+ diff = sbi->total_valid_block_count - sbi->user_block_count;
+ *count -= diff;
+ sbi->total_valid_block_count = sbi->user_block_count;
+ if (!*count) {
+ spin_unlock(&sbi->stat_lock);
+ percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
+ return false;
+ }
}
- inode->i_blocks += count;
- sbi->total_valid_block_count = valid_block_count;
- sbi->alloc_valid_block_count += (block_t)count;
spin_unlock(&sbi->stat_lock);
+
+ f2fs_i_blocks_write(inode, *count, true);
return true;
}
@@ -1045,22 +1298,27 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
f2fs_bug_on(sbi, inode->i_blocks < count);
- inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
+ f2fs_i_blocks_write(inode, count, false);
}
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
{
atomic_inc(&sbi->nr_pages[count_type]);
+
+ if (count_type == F2FS_DIRTY_DATA || count_type == F2FS_INMEM_PAGES ||
+ count_type == F2FS_WB_CP_DATA || count_type == F2FS_WB_DATA)
+ return;
+
set_sbi_flag(sbi, SBI_IS_DIRTY);
}
static inline void inode_inc_dirty_pages(struct inode *inode)
{
atomic_inc(&F2FS_I(inode)->dirty_pages);
- if (S_ISDIR(inode->i_mode))
- inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
@@ -1075,12 +1333,11 @@ static inline void inode_dec_dirty_pages(struct inode *inode)
return;
atomic_dec(&F2FS_I(inode)->dirty_pages);
-
- if (S_ISDIR(inode->i_mode))
- dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_DENTS);
+ dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
+ F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
}
-static inline int get_pages(struct f2fs_sb_info *sbi, int count_type)
+static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
{
return atomic_read(&sbi->nr_pages[count_type]);
}
@@ -1092,10 +1349,11 @@ static inline int get_dirty_pages(struct inode *inode)
static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
{
- unsigned int pages_per_sec = sbi->segs_per_sec *
- (1 << sbi->log_blocks_per_seg);
- return ((get_pages(sbi, block_type) + pages_per_sec - 1)
- >> sbi->log_blocks_per_seg) / sbi->segs_per_sec;
+ unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
+ unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
+ sbi->log_blocks_per_seg;
+
+ return segs / sbi->segs_per_sec;
}
static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
@@ -1103,6 +1361,11 @@ static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
return sbi->total_valid_block_count;
}
+static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
+{
+ return sbi->discard_blks;
+}
+
static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
@@ -1140,22 +1403,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
- block_t start_addr;
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = cur_cp_version(ckpt);
-
- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
- /*
- * odd numbered checkpoint should at cp segment 0
- * and even segment must be at cp segment 1
- */
- if (!(ckpt_version & 1))
+ if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg;
+ return start_addr;
+}
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+
+ if (sbi->cur_cp_pack == 1)
+ start_addr += sbi->blocks_per_seg;
return start_addr;
}
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
@@ -1182,13 +1450,13 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
}
if (inode)
- inode->i_blocks++;
+ f2fs_i_blocks_write(inode, 1, true);
- sbi->alloc_valid_block_count++;
sbi->total_valid_node_count++;
sbi->total_valid_block_count++;
spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->alloc_valid_block_count);
return true;
}
@@ -1201,7 +1469,7 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, !sbi->total_valid_node_count);
f2fs_bug_on(sbi, !inode->i_blocks);
- inode->i_blocks--;
+ f2fs_i_blocks_write(inode, 1, false);
sbi->total_valid_node_count--;
sbi->total_valid_block_count--;
@@ -1215,28 +1483,30 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count);
- sbi->total_valid_inode_count++;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_inc(&sbi->total_valid_inode_count);
}
static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
{
- spin_lock(&sbi->stat_lock);
- f2fs_bug_on(sbi, !sbi->total_valid_inode_count);
- sbi->total_valid_inode_count--;
- spin_unlock(&sbi->stat_lock);
+ percpu_counter_dec(&sbi->total_valid_inode_count);
}
-static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi)
+static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
{
- return sbi->total_valid_inode_count;
+ return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
}
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct page *page = find_lock_page(mapping, index);
+ if (page)
+ return page;
+
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC))
+ return NULL;
+#endif
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1261,7 +1531,7 @@ static inline void f2fs_put_page(struct page *page, int unlock)
f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
unlock_page(page);
}
- page_cache_release(page);
+ put_page(page);
}
static inline void f2fs_put_dnode(struct dnode_of_data *dn)
@@ -1396,13 +1666,12 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
FI_DIRTY_INODE, /* indicate inode is dirty or not */
+ FI_AUTO_RECOVER, /* indicate inode is recoverable */
FI_DIRTY_DIR, /* indicate directory has dirty pages */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */
- FI_UPDATE_DIR, /* should update inode block for consistency */
- FI_DELAY_IPUT, /* used for the recovery */
FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/
@@ -1416,71 +1685,145 @@ enum {
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
+ FI_DO_DEFRAG, /* indicate defragment is running */
+ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
};
-static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void __mark_inode_dirty_flag(struct inode *inode,
+ int flag, bool set)
+{
+ switch (flag) {
+ case FI_INLINE_XATTR:
+ case FI_INLINE_DATA:
+ case FI_INLINE_DENTRY:
+ if (set)
+ return;
+ case FI_DATA_EXIST:
+ case FI_INLINE_DOTS:
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+}
+
+static inline void set_inode_flag(struct inode *inode, int flag)
+{
+ if (!test_bit(flag, &F2FS_I(inode)->flags))
+ set_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, true);
+}
+
+static inline int is_inode_flag_set(struct inode *inode, int flag)
+{
+ return test_bit(flag, &F2FS_I(inode)->flags);
+}
+
+static inline void clear_inode_flag(struct inode *inode, int flag)
+{
+ if (test_bit(flag, &F2FS_I(inode)->flags))
+ clear_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, false);
+}
+
+static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
- if (!test_bit(flag, &fi->flags))
- set_bit(flag, &fi->flags);
+ F2FS_I(inode)->i_acl_mode = mode;
+ set_inode_flag(inode, FI_ACL_MODE);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
-static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+static inline void f2fs_i_links_write(struct inode *inode, bool inc)
{
- return test_bit(flag, &fi->flags);
+ if (inc)
+ inc_nlink(inode);
+ else
+ drop_nlink(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void f2fs_i_blocks_write(struct inode *inode,
+ blkcnt_t diff, bool add)
{
- if (test_bit(flag, &fi->flags))
- clear_bit(flag, &fi->flags);
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ inode->i_blocks = add ? inode->i_blocks + diff :
+ inode->i_blocks - diff;
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
{
- fi->i_acl_mode = mode;
- set_inode_flag(fi, FI_ACL_MODE);
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ if (i_size_read(inode) == i_size)
+ return;
+
+ i_size_write(inode, i_size);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline void get_inline_info(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
{
+ F2FS_I(inode)->i_current_depth = depth;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
+{
+ F2FS_I(inode)->i_xattr_nid = xnid;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
+{
+ F2FS_I(inode)->i_pino = pino;
+ f2fs_mark_inode_dirty_sync(inode, true);
+}
+
+static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
if (ri->i_inline & F2FS_INLINE_XATTR)
- set_inode_flag(fi, FI_INLINE_XATTR);
+ set_bit(FI_INLINE_XATTR, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DATA)
- set_inode_flag(fi, FI_INLINE_DATA);
+ set_bit(FI_INLINE_DATA, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DENTRY)
- set_inode_flag(fi, FI_INLINE_DENTRY);
+ set_bit(FI_INLINE_DENTRY, &fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
- set_inode_flag(fi, FI_DATA_EXIST);
+ set_bit(FI_DATA_EXIST, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DOTS)
- set_inode_flag(fi, FI_INLINE_DOTS);
+ set_bit(FI_INLINE_DOTS, &fi->flags);
}
-static inline void set_raw_inline(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
{
ri->i_inline = 0;
- if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ if (is_inode_flag_set(inode, FI_INLINE_XATTR))
ri->i_inline |= F2FS_INLINE_XATTR;
- if (is_inode_flag_set(fi, FI_INLINE_DATA))
+ if (is_inode_flag_set(inode, FI_INLINE_DATA))
ri->i_inline |= F2FS_INLINE_DATA;
- if (is_inode_flag_set(fi, FI_INLINE_DENTRY))
+ if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
ri->i_inline |= F2FS_INLINE_DENTRY;
- if (is_inode_flag_set(fi, FI_DATA_EXIST))
+ if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(fi, FI_INLINE_DOTS))
+ if (is_inode_flag_set(inode, FI_INLINE_DOTS))
ri->i_inline |= F2FS_INLINE_DOTS;
}
static inline int f2fs_has_inline_xattr(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
+ return is_inode_flag_set(inode, FI_INLINE_XATTR);
}
-static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+static inline unsigned int addrs_per_inode(struct inode *inode)
{
- if (f2fs_has_inline_xattr(&fi->vfs_inode))
+ if (f2fs_has_inline_xattr(inode))
return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
return DEF_ADDRS_PER_INODE;
}
@@ -1502,43 +1845,43 @@ static inline int inline_xattr_size(struct inode *inode)
static inline int f2fs_has_inline_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
+ return is_inode_flag_set(inode, FI_INLINE_DATA);
}
static inline void f2fs_clear_inline_inode(struct inode *inode)
{
- clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- clear_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ clear_inode_flag(inode, FI_INLINE_DATA);
+ clear_inode_flag(inode, FI_DATA_EXIST);
}
static inline int f2fs_exist_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST);
+ return is_inode_flag_set(inode, FI_DATA_EXIST);
}
static inline int f2fs_has_inline_dots(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS);
+ return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE);
+ return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
+ return is_inode_flag_set(inode, FI_VOLATILE_FILE);
}
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
}
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
+ return is_inode_flag_set(inode, FI_DROP_CACHE);
}
static inline void *inline_data_addr(struct page *page)
@@ -1549,7 +1892,7 @@ static inline void *inline_data_addr(struct page *page)
static inline int f2fs_has_inline_dentry(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY);
+ return is_inode_flag_set(inode, FI_INLINE_DENTRY);
}
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
@@ -1566,27 +1909,41 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
+ f2fs_mark_inode_dirty_sync(inode, true);
}
-static inline int f2fs_readonly(struct super_block *sb)
+static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
{
- return sb->s_flags & MS_RDONLY;
+ if (dsync) {
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool ret;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ ret = list_empty(&F2FS_I(inode)->gdirty_list);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+ }
+ if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
+ file_keep_isize(inode) ||
+ i_size_read(inode) & PAGE_MASK)
+ return false;
+ return F2FS_I(inode)->last_disk_size == i_size_read(inode);
}
-static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
+static inline int f2fs_readonly(struct super_block *sb)
{
- return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ return sb->s_flags & MS_RDONLY;
}
-static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi)
+static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
{
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- sbi->sb->s_flags |= MS_RDONLY;
+ return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
static inline bool is_dot_dotdot(const struct qstr *str)
@@ -1602,13 +1959,21 @@ static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
- mode_t mode = inode->i_mode;
-
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
- is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
- return S_ISREG(mode);
+ return S_ISREG(inode->i_mode);
+}
+
+static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
+ size_t size, gfp_t flags)
+{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_KMALLOC))
+ return NULL;
+#endif
+ return kmalloc(size, flags);
}
static inline void *f2fs_kvmalloc(size_t size, gfp_t flags)
@@ -1632,14 +1997,14 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
}
#define get_inode_mode(i) \
- ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
+ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
/* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, fi) \
- ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) : \
- (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
- ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define PGOFS_OF_NEXT_DNODE(pgofs, inode) \
+ ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) : \
+ (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) / \
+ ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
/*
* file.c
@@ -1647,7 +2012,7 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
void truncate_data_blocks(struct dnode_of_data *);
int truncate_blocks(struct inode *, u64, bool);
-int f2fs_truncate(struct inode *, bool);
+int f2fs_truncate(struct inode *);
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1660,9 +2025,10 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
*/
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long);
+struct inode *f2fs_iget_retry(struct super_block *, unsigned long);
int try_to_free_nats(struct f2fs_sb_info *, int);
-void update_inode(struct inode *, struct page *);
-void update_inode_page(struct inode *);
+int update_inode(struct inode *, struct page *);
+int update_inode_page(struct inode *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
void f2fs_evict_inode(struct inode *);
void handle_failed_inode(struct inode *);
@@ -1675,29 +2041,34 @@ struct dentry *f2fs_get_parent(struct dentry *child);
/*
* dir.c
*/
-extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t);
-
-struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
+unsigned char get_de_type(struct f2fs_dir_entry *);
+struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *,
f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
-bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
- unsigned int, struct f2fs_str *);
+int f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
+ unsigned int, struct fscrypt_str *);
void do_make_empty_dir(struct inode *, struct inode *,
struct f2fs_dentry_ptr *);
struct page *init_inode_metadata(struct inode *, struct inode *,
- const struct qstr *, struct page *);
+ const struct qstr *, const struct qstr *, struct page *);
void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int);
-void f2fs_drop_nlink(struct inode *, struct inode *, struct page *);
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+void f2fs_drop_nlink(struct inode *, struct inode *);
+struct f2fs_dir_entry *__f2fs_find_entry(struct inode *, struct fscrypt_name *,
+ struct page **);
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
+ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int);
+int f2fs_add_regular_entry(struct inode *, const struct qstr *,
+ const struct qstr *, struct inode *, nid_t, umode_t);
+int __f2fs_do_add_link(struct inode *, struct fscrypt_name*, struct inode *,
+ nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
@@ -1714,17 +2085,19 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
/*
* super.c
*/
+int f2fs_inode_dirtied(struct inode *, bool);
+void f2fs_inode_synced(struct inode *);
int f2fs_commit_super(struct f2fs_sb_info *, bool);
loff_t max_file_size(unsigned bits);
int f2fs_sync_fs(struct super_block *, int);
extern __printf(3, 4)
void f2fs_msg(struct super_block *, const char *, const char *, ...);
+int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
/*
* node.c
@@ -1737,6 +2110,7 @@ int need_dentry_mark(struct f2fs_sb_info *, nid_t);
bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
int truncate_xattr_node(struct inode *, struct page *);
@@ -1747,8 +2121,11 @@ struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int);
-void sync_inode_page(struct dnode_of_data *);
-int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *);
+void move_node_page(struct page *, int);
+int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
+ struct writeback_control *, bool);
+int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
+void build_free_nids(struct f2fs_sb_info *, bool);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
@@ -1768,18 +2145,18 @@ void destroy_node_manager_caches(void);
* segment.c
*/
void register_inmem_page(struct inode *, struct page *);
-int commit_inmem_pages(struct inode *, bool);
-void f2fs_balance_fs(struct f2fs_sb_info *);
+void drop_inmem_pages(struct inode *);
+int commit_inmem_pages(struct inode *);
+void f2fs_balance_fs(struct f2fs_sb_info *, bool);
void f2fs_balance_fs_bg(struct f2fs_sb_info *);
int f2fs_issue_flush(struct f2fs_sb_info *);
int create_flush_cmd_control(struct f2fs_sb_info *);
-void destroy_flush_cmd_control(struct f2fs_sb_info *);
+void destroy_flush_cmd_control(struct f2fs_sb_info *, bool);
void invalidate_blocks(struct f2fs_sb_info *, block_t);
bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1789,16 +2166,17 @@ void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(unsigned int, struct f2fs_io_info *);
void write_data_page(struct dnode_of_data *, struct f2fs_io_info *);
void rewrite_data_page(struct f2fs_io_info *);
+void __f2fs_replace_block(struct f2fs_sb_info *, struct f2fs_summary *,
+ block_t, block_t, bool, bool);
void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
- block_t, block_t, unsigned char, bool);
+ block_t, block_t, unsigned char, bool, bool);
void allocate_data_block(struct f2fs_sb_info *, struct page *,
block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
void write_data_summaries(struct f2fs_sb_info *, block_t);
void write_node_summaries(struct f2fs_sb_info *, block_t);
-int lookup_journal_in_cursum(struct f2fs_summary_block *,
- int, unsigned int, int);
+int lookup_journal_in_cursum(struct f2fs_journal *, int, unsigned int, int);
void flush_sit_entries(struct f2fs_sb_info *, struct cp_control *);
int build_segment_manager(struct f2fs_sb_info *);
void destroy_segment_manager(struct f2fs_sb_info *);
@@ -1808,6 +2186,7 @@ void destroy_segment_manager_caches(void);
/*
* checkpoint.c
*/
+void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool);
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
@@ -1815,21 +2194,21 @@ bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type);
-void release_dirty_inode(struct f2fs_sb_info *);
+void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
+void release_ino_entry(struct f2fs_sb_info *, bool);
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
+int f2fs_sync_inode_meta(struct f2fs_sb_info *);
int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *);
-void add_orphan_inode(struct f2fs_sb_info *, nid_t);
+void add_orphan_inode(struct inode *);
void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *);
void update_dirty_page(struct inode *, struct page *);
-void add_dirty_dir_inode(struct inode *);
-void remove_dirty_dir_inode(struct inode *);
-void sync_dirty_dir_inodes(struct f2fs_sb_info *);
-void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
+void remove_dirty_inode(struct inode *);
+int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type);
+int write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
void init_ino_entry_info(struct f2fs_sb_info *);
int __init create_checkpoint_caches(void);
void destroy_checkpoint_caches(void);
@@ -1838,34 +2217,49 @@ void destroy_checkpoint_caches(void);
* data.c
*/
void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
+ struct page *, nid_t, enum page_type, int);
+void f2fs_flush_merged_bios(struct f2fs_sb_info *);
int f2fs_submit_page_bio(struct f2fs_io_info *);
void f2fs_submit_page_mbio(struct f2fs_io_info *);
+struct block_device *f2fs_target_device(struct f2fs_sb_info *,
+ block_t, struct bio *);
+int f2fs_target_device_index(struct f2fs_sb_info *, block_t);
void set_data_blkaddr(struct dnode_of_data *);
+void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t);
+int reserve_new_blocks(struct dnode_of_data *, blkcnt_t);
int reserve_new_block(struct dnode_of_data *);
int f2fs_get_block(struct dnode_of_data *, pgoff_t);
+int f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
struct page *find_data_page(struct inode *, pgoff_t);
struct page *get_lock_data_page(struct inode *, pgoff_t, bool);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct f2fs_io_info *);
+int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void f2fs_set_page_dirty_nobuffers(struct page *);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t);
+#ifdef CONFIG_MIGRATION
+int f2fs_migrate_page(struct address_space *, struct page *, struct page *,
+ enum migrate_mode);
+#endif
/*
* gc.c
*/
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
-int f2fs_gc(struct f2fs_sb_info *, bool);
+block_t start_bidx_of_node(unsigned int, struct inode *);
+int f2fs_gc(struct f2fs_sb_info *, bool, bool);
void build_gc_manager(struct f2fs_sb_info *);
/*
* recovery.c
*/
-int recover_fsync_data(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *, bool);
bool space_for_roll_forward(struct f2fs_sb_info *);
/*
@@ -1879,18 +2273,20 @@ struct f2fs_stat_info {
int main_area_segs, main_area_sections, main_area_zones;
unsigned long long hit_largest, hit_cached, hit_rbtree;
unsigned long long hit_total, total_ext;
- int ext_tree, ext_node;
- int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
- int nats, dirty_nats, sits, dirty_sits, fnids;
+ int ext_tree, zombie_tree, ext_node;
+ int ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, ndirty_imeta;
+ int inmem_pages;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
+ int nats, dirty_nats, sits, dirty_sits, free_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, inmem_pages, wb_pages;
- int inline_xattr, inline_inode, inline_dir;
- unsigned int valid_count, valid_node_count, valid_inode_count;
+ int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int inline_xattr, inline_inode, inline_dir, orphans;
+ unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
unsigned int bimodal, avg_vblocks;
int util_free, util_valid, util_invalid;
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages;
- int prefree_count, call_count, cp_count;
+ int prefree_count, call_count, cp_count, bg_cp_count;
int tot_segs, node_segs, data_segs, free_segs, free_secs;
int bg_node_segs, bg_data_segs;
int tot_blks, data_blks, node_blks;
@@ -1911,10 +2307,11 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
}
#define stat_inc_cp_count(si) ((si)->cp_count++)
+#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_bggc_count(sbi) ((sbi)->bg_gc++)
-#define stat_inc_dirty_dir(sbi) ((sbi)->n_dirty_dirs++)
-#define stat_dec_dirty_dir(sbi) ((sbi)->n_dirty_dirs--)
+#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
+#define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
@@ -1989,14 +2386,15 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
int f2fs_build_stats(struct f2fs_sb_info *);
void f2fs_destroy_stats(struct f2fs_sb_info *);
-void __init f2fs_create_root_stats(void);
+int __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
#else
#define stat_inc_cp_count(si)
+#define stat_inc_bg_cp_count(si)
#define stat_inc_call_count(si)
#define stat_inc_bggc_count(si)
-#define stat_inc_dirty_dir(sbi)
-#define stat_dec_dirty_dir(sbi)
+#define stat_inc_dirty_inode(sbi, type)
+#define stat_dec_dirty_inode(sbi, type)
#define stat_inc_total_hit(sb)
#define stat_inc_rbtree_node_hit(sb)
#define stat_inc_largest_node_hit(sbi)
@@ -2017,7 +2415,7 @@ void f2fs_destroy_root_stats(void);
static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
-static inline void __init f2fs_create_root_stats(void) { }
+static inline int __init f2fs_create_root_stats(void) { return 0; }
static inline void f2fs_destroy_root_stats(void) { }
#endif
@@ -2046,16 +2444,15 @@ int f2fs_convert_inline_inode(struct inode *);
int f2fs_write_inline_data(struct inode *, struct page *);
bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
- struct f2fs_filename *, struct page **);
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **);
+ struct fscrypt_name *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
-int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
- nid_t, umode_t);
+int f2fs_add_inline_entry(struct inode *, const struct qstr *,
+ const struct qstr *, struct inode *, nid_t, umode_t);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *, struct page *,
struct inode *, struct inode *);
bool f2fs_empty_inline_dir(struct inode *);
int f2fs_read_inline_dir(struct file *, struct dir_context *,
- struct f2fs_str *);
+ struct fscrypt_str *);
int f2fs_inline_data_fiemap(struct inode *,
struct fiemap_extent_info *, __u64, __u64);
@@ -2071,8 +2468,8 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *);
* extent_cache.c
*/
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_drop_largest_extent(struct inode *, pgoff_t);
-void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+void f2fs_drop_extent_tree(struct inode *);
unsigned int f2fs_destroy_extent_node(struct inode *);
void f2fs_destroy_extent_tree(struct inode *);
bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
@@ -2086,13 +2483,9 @@ void destroy_extent_cache(void);
/*
* crypto support
*/
-static inline int f2fs_encrypted_inode(struct inode *inode)
+static inline bool f2fs_encrypted_inode(struct inode *inode)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return file_is_encrypt(inode);
-#else
- return 0;
-#endif
}
static inline void f2fs_set_encrypted_inode(struct inode *inode)
@@ -2104,113 +2497,88 @@ static inline void f2fs_set_encrypted_inode(struct inode *inode)
static inline bool f2fs_bio_encrypted(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- return unlikely(bio->bi_private != NULL);
-#else
- return false;
-#endif
+ return bio->bi_private != NULL;
}
static inline int f2fs_sb_has_crypto(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
-#else
- return 0;
-#endif
}
-static inline bool f2fs_may_encrypt(struct inode *inode)
+static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- mode_t mode = inode->i_mode;
-
- return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
-#else
- return 0;
-#endif
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED);
}
-/* crypto_policy.c */
-int f2fs_is_child_context_consistent_with_parent(struct inode *,
- struct inode *);
-int f2fs_inherit_context(struct inode *, struct inode *, struct page *);
-int f2fs_process_policy(const struct f2fs_encryption_policy *, struct inode *);
-int f2fs_get_policy(struct inode *, struct f2fs_encryption_policy *);
-
-/* crypt.c */
-extern struct kmem_cache *f2fs_crypt_info_cachep;
-bool f2fs_valid_contents_enc_mode(uint32_t);
-uint32_t f2fs_validate_encryption_key_size(uint32_t, uint32_t);
-struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *);
-void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *);
-struct page *f2fs_encrypt(struct inode *, struct page *);
-int f2fs_decrypt(struct f2fs_crypto_ctx *, struct page *);
-int f2fs_decrypt_one(struct inode *, struct page *);
-void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *, struct bio *);
-
-/* crypto_key.c */
-void f2fs_free_encryption_info(struct inode *, struct f2fs_crypt_info *);
-int _f2fs_get_encryption_info(struct inode *inode);
-
-/* crypto_fname.c */
-bool f2fs_valid_filenames_enc_mode(uint32_t);
-u32 f2fs_fname_crypto_round_up(u32, u32);
-int f2fs_fname_crypto_alloc_buffer(struct inode *, u32, struct f2fs_str *);
-int f2fs_fname_disk_to_usr(struct inode *, f2fs_hash_t *,
- const struct f2fs_str *, struct f2fs_str *);
-int f2fs_fname_usr_to_disk(struct inode *, const struct qstr *,
- struct f2fs_str *);
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
-void f2fs_restore_and_release_control_page(struct page **);
-void f2fs_restore_control_page(struct page *);
-
-int __init f2fs_init_crypto(void);
-int f2fs_crypto_initialize(void);
-void f2fs_exit_crypto(void);
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline int get_blkz_type(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkaddr)
+{
+ unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
+ int i;
-int f2fs_has_encryption_key(struct inode *);
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return FDEV(i).blkz_type[zno];
+ return -EINVAL;
+}
+#endif
-static inline int f2fs_get_encryption_info(struct inode *inode)
+static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi)
{
- struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
+ struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev);
- if (!ci ||
- (ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD)))))
- return _f2fs_get_encryption_info(inode);
- return 0;
+ return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb);
}
-void f2fs_fname_crypto_free_buffer(struct f2fs_str *);
-int f2fs_fname_setup_filename(struct inode *, const struct qstr *,
- int lookup, struct f2fs_filename *);
-void f2fs_fname_free_filename(struct f2fs_filename *);
-#else
-static inline void f2fs_restore_and_release_control_page(struct page **p) { }
-static inline void f2fs_restore_control_page(struct page *p) { }
-
-static inline int __init f2fs_init_crypto(void) { return 0; }
-static inline void f2fs_exit_crypto(void) { }
+static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+{
+ clear_opt(sbi, ADAPTIVE);
+ clear_opt(sbi, LFS);
-static inline int f2fs_has_encryption_key(struct inode *i) { return 0; }
-static inline int f2fs_get_encryption_info(struct inode *i) { return 0; }
-static inline void f2fs_fname_crypto_free_buffer(struct f2fs_str *p) { }
+ switch (mt) {
+ case F2FS_MOUNT_ADAPTIVE:
+ set_opt(sbi, ADAPTIVE);
+ break;
+ case F2FS_MOUNT_LFS:
+ set_opt(sbi, LFS);
+ break;
+ }
+}
-static inline int f2fs_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct f2fs_filename *fname)
+static inline bool f2fs_may_encrypt(struct inode *inode)
{
- memset(fname, 0, sizeof(struct f2fs_filename));
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *)iname->name;
- fname->disk_name.len = iname->len;
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ umode_t mode = inode->i_mode;
+
+ return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
+#else
return 0;
+#endif
}
-static inline void f2fs_fname_free_filename(struct f2fs_filename *fname) { }
+#ifndef CONFIG_F2FS_FS_ENCRYPTION
+#define fscrypt_set_d_op(i)
+#define fscrypt_get_ctx fscrypt_notsupp_get_ctx
+#define fscrypt_release_ctx fscrypt_notsupp_release_ctx
+#define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page
+#define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page
+#define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages
+#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
+#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
+#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
+#define fscrypt_process_policy fscrypt_notsupp_process_policy
+#define fscrypt_get_policy fscrypt_notsupp_get_policy
+#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
+#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
+#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
+#define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info
+#define fscrypt_setup_filename fscrypt_notsupp_setup_filename
+#define fscrypt_free_filename fscrypt_notsupp_free_filename
+#define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size
+#define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer
+#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
+#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
+#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
#endif
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 4b449d263333..5808d5c709a7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -21,6 +21,8 @@
#include <linux/mount.h>
#include <linux/pagevec.h>
#include <linux/random.h>
+#include <linux/uuid.h>
+#include <linux/file.h>
#include "f2fs.h"
#include "node.h"
@@ -40,8 +42,6 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
struct dnode_of_data dn;
int err;
- f2fs_balance_fs(sbi);
-
sb_start_pagefault(inode->i_sb);
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
@@ -57,6 +57,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
file_update_time(vma->vm_file);
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping ||
@@ -74,28 +76,28 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
goto mapped;
/* page is wholly or partially inside EOF */
- if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) >
+ if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
i_size_read(inode)) {
unsigned offset;
- offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
- zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ offset = i_size_read(inode) & ~PAGE_MASK;
+ zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
/* fill the page */
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
- /* if gced page is attached, don't write to cold segment */
- clear_cold_data(page);
out:
sb_end_pagefault(inode->i_sb);
+ f2fs_update_time(sbi, REQ_TIME);
return block_page_mkwrite_return(err);
}
@@ -132,7 +134,7 @@ static inline bool need_do_checkpoint(struct inode *inode)
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
- else if (file_enc_name(inode) && need_dentry_mark(sbi, inode->i_ino))
+ else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
need_cp = true;
else if (file_wrong_pino(inode))
need_cp = true;
@@ -170,21 +172,16 @@ static void try_to_fix_pino(struct inode *inode)
fi->xattr_ver = 0;
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
- fi->i_pino = pino;
+ f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
- up_write(&fi->i_sem);
-
- mark_inode_dirty_sync(inode);
- f2fs_write_inode(inode, NULL);
- } else {
- up_write(&fi->i_sem);
}
+ up_write(&fi->i_sem);
}
-int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
+ int datasync, bool atomic)
{
struct inode *inode = file->f_mapping->host;
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t ino = inode->i_ino;
int ret = 0;
@@ -201,10 +198,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_f2fs_sync_file_enter(inode);
/* if fdatasync is triggered, let's do in-place-update */
- if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
- set_inode_flag(fi, FI_NEED_IPU);
+ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
+ set_inode_flag(inode, FI_NEED_IPU);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- clear_inode_flag(fi, FI_NEED_IPU);
+ clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
@@ -212,7 +209,7 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
}
/* if the inode is dirty, let's recover all the time */
- if (!datasync) {
+ if (!f2fs_skip_inode_update(inode, datasync)) {
f2fs_write_inode(inode, NULL);
goto go_write;
}
@@ -220,29 +217,26 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
/*
* if there is no written data, don't waste time to write recovery info.
*/
- if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
+ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
!exist_written_data(sbi, ino, APPEND_INO)) {
/* it may call write_inode just prior to fsync */
if (need_inode_page_update(sbi, ino))
goto go_write;
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
exist_written_data(sbi, ino, UPDATE_INO))
goto flush_out;
goto out;
}
go_write:
- /* guarantee free sections for fsync */
- f2fs_balance_fs(sbi);
-
/*
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&fi->i_sem);
+ down_read(&F2FS_I(inode)->i_sem);
need_cp = need_do_checkpoint(inode);
- up_read(&fi->i_sem);
+ up_read(&F2FS_I(inode)->i_sem);
if (need_cp) {
/* all the dirty node pages should be flushed for POR */
@@ -253,19 +247,23 @@ go_write:
* will be used only for fsynced inodes after checkpoint.
*/
try_to_fix_pino(inode);
- clear_inode_flag(fi, FI_APPEND_WRITE);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
goto out;
}
sync_nodes:
- sync_node_pages(sbi, ino, &wbc);
+ ret = fsync_node_pages(sbi, inode, &wbc, atomic);
+ if (ret)
+ goto out;
/* if cp_error was enabled, we should avoid infinite loop */
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto out;
+ }
if (need_inode_block_update(sbi, ino)) {
- mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
f2fs_write_inode(inode, NULL);
goto sync_nodes;
}
@@ -275,18 +273,24 @@ sync_nodes:
goto out;
/* once recovery info is written, don't need to tack this */
- remove_dirty_inode(sbi, ino, APPEND_INO);
- clear_inode_flag(fi, FI_APPEND_WRITE);
+ remove_ino_entry(sbi, ino, APPEND_INO);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
- remove_dirty_inode(sbi, ino, UPDATE_INO);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ remove_ino_entry(sbi, ino, UPDATE_INO);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
ret = f2fs_issue_flush(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
out:
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
f2fs_trace_ios(NULL, 1);
return ret;
}
+int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ return f2fs_do_sync_file(file, start, end, datasync, false);
+}
+
static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pgoff_t pgofs, int whence)
{
@@ -300,7 +304,7 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
pagevec_init(&pvec, 0);
nr_pages = pagevec_lookup_tag(&pvec, mapping, &pgofs,
PAGECACHE_TAG_DIRTY, 1);
- pgofs = nr_pages ? pvec.pages[0]->index : LONG_MAX;
+ pgofs = nr_pages ? pvec.pages[0]->index : ULONG_MAX;
pagevec_release(&pvec);
return pgofs;
}
@@ -332,7 +336,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
loff_t isize;
int err = 0;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
isize = i_size_read(inode);
if (offset >= isize)
@@ -345,32 +349,31 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
goto found;
}
- pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT);
+ pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence);
- for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
+ err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
if (err && err != -ENOENT) {
goto fail;
} else if (err == -ENOENT) {
/* direct node does not exists */
if (whence == SEEK_DATA) {
- pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
- F2FS_I(inode));
+ pgofs = get_next_page_offset(&dn, pgofs);
continue;
} else {
goto found;
}
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
/* find data/hole in dnode block */
for (; dn.ofs_in_node < end_offset;
dn.ofs_in_node++, pgofs++,
- data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) {
+ data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
@@ -387,10 +390,10 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
found:
if (whence == SEEK_HOLE && data_ofs > isize)
data_ofs = isize;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return vfs_setpos(file, data_ofs, maxbytes);
fail:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
return -ENXIO;
}
@@ -418,19 +421,20 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
+ int err;
if (f2fs_encrypted_inode(inode)) {
- int err = f2fs_get_encryption_info(inode);
+ err = fscrypt_get_encryption_info(inode);
if (err)
return 0;
+ if (!f2fs_encrypted_inode(inode))
+ return -ENOKEY;
}
/* we don't need to use inline_data strictly */
- if (f2fs_has_inline_data(inode)) {
- int err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
@@ -440,12 +444,22 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
static int f2fs_file_open(struct inode *inode, struct file *filp)
{
int ret = generic_file_open(inode, filp);
+ struct dentry *dir;
if (!ret && f2fs_encrypted_inode(inode)) {
- ret = f2fs_get_encryption_info(inode);
+ ret = fscrypt_get_encryption_info(inode);
if (ret)
- ret = -EACCES;
+ return -EACCES;
+ if (!fscrypt_has_encryption_key(inode))
+ return -ENOKEY;
+ }
+ dir = dget_parent(file_dentry(filp));
+ if (f2fs_encrypted_inode(d_inode(dir)) &&
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
+ dput(dir);
+ return -EPERM;
}
+ dput(dir);
return ret;
}
@@ -468,8 +482,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
set_data_blkaddr(dn);
invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(F2FS_I(dn->inode),
- FI_FIRST_BLOCK_WRITTEN);
+ clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
nr_free++;
}
@@ -480,14 +493,13 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
* we will invalidate all blkaddr in the whole range.
*/
fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
- F2FS_I(dn->inode)) + ofs;
+ dn->inode) + ofs;
f2fs_update_extent_cache_range(dn, fofs, 0, len);
dec_valid_block_count(sbi, dn->inode, nr_free);
- set_page_dirty(dn->node_page);
- sync_inode_page(dn);
}
dn->ofs_in_node = ofs;
+ f2fs_update_time(sbi, REQ_TIME);
trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
dn->ofs_in_node, nr_free);
return nr_free;
@@ -501,8 +513,8 @@ void truncate_data_blocks(struct dnode_of_data *dn)
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool cache_only)
{
- unsigned offset = from & (PAGE_CACHE_SIZE - 1);
- pgoff_t index = from >> PAGE_CACHE_SHIFT;
+ unsigned offset = from & (PAGE_SIZE - 1);
+ pgoff_t index = from >> PAGE_SHIFT;
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -510,7 +522,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
if (cache_only) {
- page = f2fs_grab_cache_page(mapping, index, false);
+ page = find_lock_page(mapping, index);
if (page && PageUptodate(page))
goto truncate_out;
f2fs_put_page(page, 1);
@@ -521,9 +533,10 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
if (IS_ERR(page))
return 0;
truncate_out:
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user(page, offset, PAGE_CACHE_SIZE - offset);
- if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user(page, offset, PAGE_SIZE - offset);
+ if (!cache_only || !f2fs_encrypted_inode(inode) ||
+ !S_ISREG(inode->i_mode))
set_page_dirty(page);
f2fs_put_page(page, 1);
return 0;
@@ -543,6 +556,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1);
+ if (free_from >= sbi->max_file_blocks)
+ goto free_partial;
+
if (lock)
f2fs_lock_op(sbi);
@@ -561,14 +577,14 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
}
set_new_dnode(&dn, inode, ipage, NULL, 0);
- err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
+ err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
if (err) {
if (err == -ENOENT)
goto free_next;
goto out;
}
- count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ count = ADDRS_PER_PAGE(dn.node_page, inode);
count -= dn.ofs_in_node;
f2fs_bug_on(sbi, count < 0);
@@ -584,7 +600,7 @@ free_next:
out:
if (lock)
f2fs_unlock_op(sbi);
-
+free_partial:
/* lastly zero out the first data page */
if (!err)
err = truncate_partial_data_page(inode, from, truncate_page);
@@ -593,7 +609,7 @@ out:
return err;
}
-int f2fs_truncate(struct inode *inode, bool lock)
+int f2fs_truncate(struct inode *inode)
{
int err;
@@ -604,18 +620,18 @@ int f2fs_truncate(struct inode *inode, bool lock)
trace_f2fs_truncate(inode);
/* we should check inline_data size */
- if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
+ if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
}
- err = truncate_blocks(inode, i_size_read(inode), lock);
+ err = truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
return 0;
}
@@ -631,7 +647,6 @@ int f2fs_getattr(struct vfsmount *mnt,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
@@ -652,7 +667,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
- set_acl_inode(fi, mode);
+ set_acl_inode(inode, mode);
}
}
#else
@@ -662,8 +677,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
+ bool size_changed = false;
err = inode_change_ok(inode, attr);
if (err)
@@ -671,36 +686,49 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_SIZE) {
if (f2fs_encrypted_inode(inode) &&
- f2fs_get_encryption_info(inode))
+ fscrypt_get_encryption_info(inode))
return -EACCES;
if (attr->ia_size <= i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (err)
return err;
- f2fs_balance_fs(F2FS_I_SB(inode));
} else {
/*
* do not trim all blocks after i_size if target size is
* larger than i_size.
*/
truncate_setsize(inode, attr->ia_size);
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+
+ /* should convert inline inode here */
+ if (!f2fs_may_inline_data(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
+ }
+ inode->i_mtime = inode->i_ctime = current_time(inode);
}
+
+ size_changed = true;
}
__setattr_copy(inode, attr);
if (attr->ia_valid & ATTR_MODE) {
err = posix_acl_chmod(inode, get_inode_mode(inode));
- if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ clear_inode_flag(inode, FI_ACL_MODE);
}
}
- mark_inode_dirty(inode);
+ /* file size may changed here */
+ f2fs_mark_inode_dirty_sync(inode, size_changed);
+
+ /* inode change will produce dirty node pages flushed by checkpoint */
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
return err;
}
@@ -727,7 +755,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (!len)
return 0;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
page = get_new_data_page(inode, NULL, index, false);
@@ -736,7 +764,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
if (IS_ERR(page))
return PTR_ERR(page);
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
zero_user(page, start, len);
set_page_dirty(page);
f2fs_put_page(page, 1);
@@ -761,7 +789,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
return err;
}
- end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -778,19 +806,17 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
pgoff_t pg_start, pg_end;
loff_t off_start, off_end;
- int ret = 0;
+ int ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -800,7 +826,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
}
@@ -815,10 +841,10 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
loff_t blk_start, blk_end;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
- blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT;
- blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT;
+ blk_start = (loff_t)pg_start << PAGE_SHIFT;
+ blk_end = (loff_t)pg_end << PAGE_SHIFT;
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
@@ -831,83 +857,199 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
-static int __exchange_data_block(struct inode *inode, pgoff_t src,
- pgoff_t dst, bool full)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, pgoff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- block_t new_addr;
- bool do_replace = false;
- int ret;
+ int ret, done, i;
+next_dnode:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+ ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
if (ret && ret != -ENOENT) {
return ret;
} else if (ret == -ENOENT) {
- new_addr = NULL_ADDR;
- } else {
- new_addr = dn.data_blkaddr;
- if (!is_checkpointed_data(sbi, new_addr)) {
- dn.data_blkaddr = NULL_ADDR;
+ if (dn.max_level == 0)
+ return -ENOENT;
+ done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+ blkaddr += done;
+ do_replace += done;
+ goto next;
+ }
+
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ dn.ofs_in_node, len);
+ for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+ *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+ if (test_opt(sbi, LFS)) {
+ f2fs_put_dnode(&dn);
+ return -ENOTSUPP;
+ }
+
/* do not invalidate this block address */
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- do_replace = true;
+ f2fs_update_data_blkaddr(&dn, NULL_ADDR);
+ *do_replace = 1;
}
- f2fs_put_dnode(&dn);
}
+ f2fs_put_dnode(&dn);
+next:
+ len -= done;
+ off += done;
+ if (len)
+ goto next_dnode;
+ return 0;
+}
- if (new_addr == NULL_ADDR)
- return full ? truncate_hole(inode, dst, dst + 1) : 0;
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, int len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ int ret, i;
- if (do_replace) {
- struct page *ipage = get_node_page(sbi, inode->i_ino);
- struct node_info ni;
+ for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+ if (*do_replace == 0)
+ continue;
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto err_out;
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+ if (ret) {
+ dec_valid_block_count(sbi, inode, 1);
+ invalidate_blocks(sbi, *blkaddr);
+ } else {
+ f2fs_update_data_blkaddr(&dn, *blkaddr);
}
+ f2fs_put_dnode(&dn);
+ }
+ return 0;
+}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, dst);
- if (ret)
- goto err_out;
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ block_t *blkaddr, int *do_replace,
+ pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+ pgoff_t i = 0;
+ int ret;
+
+ while (i < len) {
+ if (blkaddr[i] == NULL_ADDR && !full) {
+ i++;
+ continue;
+ }
- truncate_data_blocks_range(&dn, 1);
+ if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+ size_t new_size;
+ pgoff_t ilen;
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
- ni.version, true);
- f2fs_put_dnode(&dn);
- } else {
- struct page *psrc, *pdst;
+ set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+ if (ret)
+ return ret;
+
+ get_node_info(sbi, dn.nid, &ni);
+ ilen = min((pgoff_t)
+ ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ dn.ofs_in_node, len - i);
+ do {
+ dn.data_blkaddr = datablock_addr(dn.node_page,
+ dn.ofs_in_node);
+ truncate_data_blocks_range(&dn, 1);
+
+ if (do_replace[i]) {
+ f2fs_i_blocks_write(src_inode,
+ 1, false);
+ f2fs_i_blocks_write(dst_inode,
+ 1, true);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ blkaddr[i], ni.version, true, false);
+
+ do_replace[i] = 0;
+ }
+ dn.ofs_in_node++;
+ i++;
+ new_size = (dst + i) << PAGE_SHIFT;
+ if (dst_inode->i_size < new_size)
+ f2fs_i_size_write(dst_inode, new_size);
+ } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
- psrc = get_lock_data_page(inode, src, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = get_new_data_page(inode, NULL, dst, false);
- if (IS_ERR(pdst)) {
+ f2fs_put_dnode(&dn);
+ } else {
+ struct page *psrc, *pdst;
+
+ psrc = get_lock_data_page(src_inode, src + i, true);
+ if (IS_ERR(psrc))
+ return PTR_ERR(psrc);
+ pdst = get_new_data_page(dst_inode, NULL, dst + i,
+ true);
+ if (IS_ERR(pdst)) {
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
+ f2fs_copy_page(psrc, pdst);
+ set_page_dirty(pdst);
+ f2fs_put_page(pdst, 1);
f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
- }
- f2fs_copy_page(psrc, pdst);
- set_page_dirty(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
- return truncate_hole(inode, src, src + 1);
+ ret = truncate_hole(src_inode, src + i, src + i + 1);
+ if (ret)
+ return ret;
+ i++;
+ }
}
return 0;
+}
-err_out:
- if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
- dn.data_blkaddr = new_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- f2fs_put_dnode(&dn);
+static int __exchange_data_block(struct inode *src_inode,
+ struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+ pgoff_t len, bool full)
+{
+ block_t *src_blkaddr;
+ int *do_replace;
+ pgoff_t olen;
+ int ret;
+
+ while (len) {
+ olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+
+ src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ if (!src_blkaddr)
+ return -ENOMEM;
+
+ do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ if (!do_replace) {
+ kvfree(src_blkaddr);
+ return -ENOMEM;
+ }
+
+ ret = __read_out_blkaddrs(src_inode, src_blkaddr,
+ do_replace, src, olen);
+ if (ret)
+ goto roll_back;
+
+ ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+ do_replace, src, dst, olen, full);
+ if (ret)
+ goto roll_back;
+
+ src += olen;
+ dst += olen;
+ len -= olen;
+
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
}
+ return 0;
+
+roll_back:
+ __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
return ret;
}
@@ -915,16 +1057,15 @@ static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
- int ret = 0;
+ int ret;
- for (; end < nrpages; start++, end++) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, end, start, true);
- f2fs_unlock_op(sbi);
- if (ret)
- break;
- }
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+ f2fs_unlock_op(sbi);
return ret;
}
@@ -941,16 +1082,12 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(F2FS_I_SB(inode));
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
@@ -972,7 +1109,50 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = truncate_blocks(inode, new_size, true);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
+
+ return ret;
+}
+
+static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
+ pgoff_t end)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ pgoff_t index = start;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ blkcnt_t count = 0;
+ int ret;
+
+ for (; index < end; index++, dn->ofs_in_node++) {
+ if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR)
+ count++;
+ }
+
+ dn->ofs_in_node = ofs_in_node;
+ ret = reserve_new_blocks(dn, count);
+ if (ret)
+ return ret;
+
+ dn->ofs_in_node = ofs_in_node;
+ for (index = start; index < end; index++, dn->ofs_in_node++) {
+ dn->data_blkaddr =
+ datablock_addr(dn->node_page, dn->ofs_in_node);
+ /*
+ * reserve_new_blocks will not guarantee entire block
+ * allocation.
+ */
+ if (dn->data_blkaddr == NULL_ADDR) {
+ ret = -ENOSPC;
+ break;
+ }
+ if (dn->data_blkaddr != NEW_ADDR) {
+ invalidate_blocks(sbi, dn->data_blkaddr);
+ dn->data_blkaddr = NEW_ADDR;
+ set_data_blkaddr(dn);
+ }
+ }
+
+ f2fs_update_extent_cache_range(dn, start, 0, index - start);
return ret;
}
@@ -991,13 +1171,9 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
if (ret)
return ret;
- f2fs_balance_fs(sbi);
-
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
@@ -1005,11 +1181,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
truncate_pagecache_range(inode, offset, offset + len - 1);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
+ pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ off_start = offset & (PAGE_SIZE - 1);
+ off_end = (offset + len) & (PAGE_SIZE - 1);
if (pg_start == pg_end) {
ret = fill_zero(inode, pg_start, off_start,
@@ -1023,48 +1199,43 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
} else {
if (off_start) {
ret = fill_zero(inode, pg_start++, off_start,
- PAGE_CACHE_SIZE - off_start);
+ PAGE_SIZE - off_start);
if (ret)
return ret;
new_size = max_t(loff_t, new_size,
- (loff_t)pg_start << PAGE_CACHE_SHIFT);
+ (loff_t)pg_start << PAGE_SHIFT);
}
- for (index = pg_start; index < pg_end; index++) {
+ for (index = pg_start; index < pg_end;) {
struct dnode_of_data dn;
- struct page *ipage;
+ unsigned int end_offset;
+ pgoff_t end;
f2fs_lock_op(sbi);
- ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- f2fs_unlock_op(sbi);
- goto out;
- }
-
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
goto out;
}
- if (dn.data_blkaddr != NEW_ADDR) {
- invalidate_blocks(sbi, dn.data_blkaddr);
-
- dn.data_blkaddr = NEW_ADDR;
- set_data_blkaddr(&dn);
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ end = min(pg_end, end_offset - dn.ofs_in_node + index);
- dn.data_blkaddr = NULL_ADDR;
- f2fs_update_extent_cache(&dn);
- }
+ ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
+ f2fs_balance_fs(sbi, dn.node_changed);
+
+ if (ret)
+ goto out;
+
+ index = end;
new_size = max_t(loff_t, new_size,
- (loff_t)(index + 1) << PAGE_CACHE_SHIFT);
+ (loff_t)index << PAGE_SHIFT);
}
if (off_end) {
@@ -1077,11 +1248,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
}
out:
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1089,7 +1257,7 @@ out:
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t pg_start, pg_end, delta, nrpages, idx;
+ pgoff_t nr, pg_start, pg_end, delta, idx;
loff_t new_size;
int ret = 0;
@@ -1104,13 +1272,11 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
return -EINVAL;
- f2fs_balance_fs(sbi);
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
ret = truncate_blocks(inode, i_size_read(inode), true);
if (ret)
@@ -1123,17 +1289,23 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
- pg_start = offset >> PAGE_CACHE_SHIFT;
- pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
+ pg_start = offset >> PAGE_SHIFT;
+ pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
- nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ while (!ret && idx > pg_start) {
+ nr = idx - pg_start;
+ if (nr > delta)
+ nr = delta;
+ idx -= nr;
- for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, idx, idx + delta, false);
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, idx,
+ idx + delta, nr, false);
f2fs_unlock_op(sbi);
- if (ret)
- break;
}
/* write out all moved pages, if possible */
@@ -1141,7 +1313,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1149,62 +1321,50 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
loff_t len, int mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t index, pg_start, pg_end;
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ pgoff_t pg_end;
loff_t new_size = i_size_read(inode);
- loff_t off_start, off_end;
- int ret = 0;
+ loff_t off_end;
+ int err;
- f2fs_balance_fs(sbi);
+ err = inode_newsize_ok(inode, (len + offset));
+ if (err)
+ return err;
- ret = inode_newsize_ok(inode, (len + offset));
- if (ret)
- return ret;
+ err = f2fs_convert_inline_inode(inode);
+ if (err)
+ return err;
- if (f2fs_has_inline_data(inode)) {
- ret = f2fs_convert_inline_inode(inode);
- if (ret)
- return ret;
- }
+ f2fs_balance_fs(sbi, true);
- pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
- pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
+ pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
+ off_end = (offset + len) & (PAGE_SIZE - 1);
- off_start = offset & (PAGE_CACHE_SIZE - 1);
- off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
+ map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
+ map.m_len = pg_end - map.m_lblk;
+ if (off_end)
+ map.m_len++;
- f2fs_lock_op(sbi);
+ err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+ if (err) {
+ pgoff_t last_off;
- for (index = pg_start; index <= pg_end; index++) {
- struct dnode_of_data dn;
+ if (!map.m_len)
+ return err;
- if (index == pg_end && !off_end)
- goto noalloc;
+ last_off = map.m_lblk + map.m_len - 1;
- set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = f2fs_reserve_block(&dn, index);
- if (ret)
- break;
-noalloc:
- if (pg_start == pg_end)
- new_size = offset + len;
- else if (index == pg_start && off_start)
- new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT;
- else if (index == pg_end)
- new_size = ((loff_t)index << PAGE_CACHE_SHIFT) +
- off_end;
- else
- new_size += PAGE_CACHE_SIZE;
+ /* update new size to the failed position */
+ new_size = (last_off == pg_end) ? offset + len:
+ (loff_t)(last_off + 1) << PAGE_SHIFT;
+ } else {
+ new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
- f2fs_unlock_op(sbi);
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
- return ret;
+ return err;
}
static long f2fs_fallocate(struct file *file, int mode,
@@ -1226,7 +1386,7 @@ static long f2fs_fallocate(struct file *file, int mode,
FALLOC_FL_INSERT_RANGE))
return -EOPNOTSUPP;
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
if (offset >= inode->i_size)
@@ -1244,12 +1404,15 @@ static long f2fs_fallocate(struct file *file, int mode,
}
if (!ret) {
- inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ inode->i_mtime = inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, false);
+ if (mode & FALLOC_FL_KEEP_SIZE)
+ file_set_keep_isize(inode);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
out:
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
trace_f2fs_fallocate(inode, mode, offset, len, ret);
return ret;
@@ -1257,13 +1420,22 @@ out:
static int f2fs_release_file(struct inode *inode, struct file *filp)
{
+ /*
+ * f2fs_relase_file is called at every close calls. So we should
+ * not drop any inmemory pages by close called by other process.
+ */
+ if (!(filp->f_mode & FMODE_WRITE) ||
+ atomic_read(&inode->i_writecount) != 1)
+ return 0;
+
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
- set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ set_inode_flag(inode, FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_DROP_CACHE);
}
return 0;
}
@@ -1293,33 +1465,29 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int flags = fi->i_flags & FS_FL_USER_VISIBLE;
+ unsigned int flags;
unsigned int oldflags;
int ret;
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (get_user(flags, (int __user *)arg))
+ return -EFAULT;
+
ret = mnt_want_write_file(filp);
if (ret)
return ret;
- if (!inode_owner_or_capable(inode)) {
- ret = -EACCES;
- goto out;
- }
-
- if (get_user(flags, (int __user *)arg)) {
- ret = -EFAULT;
- goto out;
- }
-
flags = f2fs_mask_flags(inode->i_mode, flags);
- mutex_lock(&inode->i_mutex);
+ inode_lock(inode);
oldflags = fi->i_flags;
if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
ret = -EPERM;
goto out;
}
@@ -1328,11 +1496,10 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
flags = flags & FS_FL_USER_MODIFIABLE;
flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
fi->i_flags = flags;
- mutex_unlock(&inode->i_mutex);
+ inode_unlock(inode);
+ inode->i_ctime = current_time(inode);
f2fs_set_inode_flags(inode);
- inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1353,17 +1520,35 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
if (f2fs_is_atomic_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- return 0;
+ set_inode_flag(inode, FI_ATOMIC_FILE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+
+ if (!get_dirty_pages(inode))
+ goto out;
+
+ f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
+ "Unexpected flush for atomic writes: ino=%lu, npages=%u",
+ inode->i_ino, get_dirty_pages(inode));
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_commit_atomic_write(struct file *filp)
@@ -1374,22 +1559,27 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
- if (f2fs_is_volatile_file(inode))
- return 0;
-
ret = mnt_want_write_file(filp);
if (ret)
return ret;
+ inode_lock(inode);
+
+ if (f2fs_is_volatile_file(inode))
+ goto err_out;
+
if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- ret = commit_inmem_pages(inode, false);
- if (ret)
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+ ret = commit_inmem_pages(inode);
+ if (ret) {
+ set_inode_flag(inode, FI_ATOMIC_FILE);
goto err_out;
+ }
}
- ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
err_out:
+ inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
}
@@ -1402,31 +1592,54 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
ret = f2fs_convert_inline_inode(inode);
if (ret)
- return ret;
+ goto out;
- set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- return 0;
+ set_inode_flag(inode, FI_VOLATILE_FILE);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_release_volatile_write(struct file *filp)
{
struct inode *inode = file_inode(filp);
+ int ret;
if (!inode_owner_or_capable(inode))
return -EACCES;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ inode_lock(inode);
+
if (!f2fs_is_volatile_file(inode))
- return 0;
+ goto out;
- if (!f2fs_is_first_block_written(inode))
- return truncate_partial_data_page(inode, 0, true);
+ if (!f2fs_is_first_block_written(inode)) {
+ ret = truncate_partial_data_page(inode, 0, true);
+ goto out;
+ }
- return punch_hole(inode, 0, F2FS_BLKSIZE);
+ ret = punch_hole(inode, 0, F2FS_BLKSIZE);
+out:
+ inode_unlock(inode);
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_abort_volatile_write(struct file *filp)
@@ -1441,13 +1654,19 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (ret)
return ret;
- f2fs_balance_fs(F2FS_I_SB(inode));
+ inode_lock(inode);
+
+ if (f2fs_is_atomic_file(inode))
+ drop_inmem_pages(inode);
+ if (f2fs_is_volatile_file(inode)) {
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
+ }
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- commit_inmem_pages(inode, true);
+ inode_unlock(inode);
mnt_drop_write_file(filp);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return ret;
}
@@ -1457,6 +1676,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1464,30 +1684,38 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (get_user(in, (__u32 __user *)arg))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
switch (in) {
case F2FS_GOING_DOWN_FULLSYNC:
sb = freeze_bdev(sb->s_bdev);
if (sb && !IS_ERR(sb)) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
thaw_bdev(sb->s_bdev, sb);
}
break;
case F2FS_GOING_DOWN_METASYNC:
/* do checkpoint only */
f2fs_sync_fs(sb, 1);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_NOSYNC:
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
case F2FS_GOING_DOWN_METAFLUSH:
sync_meta_pages(sbi, META, LONG_MAX);
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return 0;
+ f2fs_update_time(sbi, REQ_TIME);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
@@ -1508,15 +1736,21 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
sizeof(range)))
return -EFAULT;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
range.minlen = max((unsigned int)range.minlen,
q->limits.discard_granularity);
ret = f2fs_trim_fs(F2FS_SB(sb), &range);
+ mnt_drop_write_file(filp);
if (ret < 0)
return ret;
if (copy_to_user((struct fstrim_range __user *)arg, &range,
sizeof(range)))
return -EFAULT;
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return 0;
}
@@ -1532,45 +1766,31 @@ static bool uuid_is_nonzero(__u8 u[16])
static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
+ struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
- int err;
- if (copy_from_user(&policy, (struct f2fs_encryption_policy __user *)arg,
- sizeof(policy)))
+ if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg,
+ sizeof(policy)))
return -EFAULT;
- mutex_lock(&inode->i_mutex);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
- err = f2fs_process_policy(&policy, inode);
-
- mutex_unlock(&inode->i_mutex);
-
- return err;
-#else
- return -EOPNOTSUPP;
-#endif
+ return fscrypt_process_policy(filp, &policy);
}
static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
{
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- struct f2fs_encryption_policy policy;
+ struct fscrypt_policy policy;
struct inode *inode = file_inode(filp);
int err;
- err = f2fs_get_policy(inode, &policy);
+ err = fscrypt_get_policy(inode, &policy);
if (err)
return err;
- if (copy_to_user((struct f2fs_encryption_policy __user *)arg, &policy,
- sizeof(policy)))
+ if (copy_to_user((struct fscrypt_policy __user *)arg, &policy, sizeof(policy)))
return -EFAULT;
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
}
static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
@@ -1593,13 +1813,13 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
err = f2fs_commit_super(sbi, false);
-
- mnt_drop_write_file(filp);
if (err) {
/* undo new data */
memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
+ mnt_drop_write_file(filp);
return err;
}
+ mnt_drop_write_file(filp);
got_it:
if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
16))
@@ -1612,6 +1832,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
__u32 sync;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1622,21 +1843,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
if (!sync) {
- if (!mutex_trylock(&sbi->gc_mutex))
- return -EBUSY;
+ if (!mutex_trylock(&sbi->gc_mutex)) {
+ ret = -EBUSY;
+ goto out;
+ }
} else {
mutex_lock(&sbi->gc_mutex);
}
- return f2fs_gc(sbi, sync);
+ ret = f2fs_gc(sbi, sync, true);
+out:
+ mnt_drop_write_file(filp);
+ return ret;
}
static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct cp_control cpc;
+ int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1644,13 +1874,343 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
if (f2fs_readonly(sbi->sb))
return -EROFS;
- cpc.reason = __get_cp_reason(sbi);
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ ret = f2fs_sync_fs(sbi->sb, 1);
- mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
- mutex_unlock(&sbi->gc_mutex);
+ mnt_drop_write_file(filp);
+ return ret;
+}
- return 0;
+static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
+ struct file *filp,
+ struct f2fs_defragment *range)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
+ struct extent_info ei;
+ pgoff_t pg_start, pg_end;
+ unsigned int blk_per_seg = sbi->blocks_per_seg;
+ unsigned int total = 0, sec_num;
+ unsigned int pages_per_sec = sbi->segs_per_sec * blk_per_seg;
+ block_t blk_end = 0;
+ bool fragmented = false;
+ int err;
+
+ /* if in-place-update policy is enabled, don't waste time here */
+ if (need_inplace_update(inode))
+ return -EINVAL;
+
+ pg_start = range->start >> PAGE_SHIFT;
+ pg_end = (range->start + range->len) >> PAGE_SHIFT;
+
+ f2fs_balance_fs(sbi, true);
+
+ inode_lock(inode);
+
+ /* writeback all dirty pages in the range */
+ err = filemap_write_and_wait_range(inode->i_mapping, range->start,
+ range->start + range->len - 1);
+ if (err)
+ goto out;
+
+ /*
+ * lookup mapping info in extent cache, skip defragmenting if physical
+ * block addresses are continuous.
+ */
+ if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
+ if (ei.fofs + ei.len >= pg_end)
+ goto out;
+ }
+
+ map.m_lblk = pg_start;
+
+ /*
+ * lookup mapping info in dnode page cache, skip defragmenting if all
+ * physical block addresses are continuous even if there are hole(s)
+ * in logical blocks.
+ */
+ while (map.m_lblk < pg_end) {
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ if (err)
+ goto out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ if (blk_end && blk_end != map.m_pblk) {
+ fragmented = true;
+ break;
+ }
+ blk_end = map.m_pblk + map.m_len;
+
+ map.m_lblk += map.m_len;
+ }
+
+ if (!fragmented)
+ goto out;
+
+ map.m_lblk = pg_start;
+ map.m_len = pg_end - pg_start;
+
+ sec_num = (map.m_len + pages_per_sec - 1) / pages_per_sec;
+
+ /*
+ * make sure there are enough free section for LFS allocation, this can
+ * avoid defragment running in SSR mode when free section are allocated
+ * intensively
+ */
+ if (has_not_enough_free_secs(sbi, 0, sec_num)) {
+ err = -EAGAIN;
+ goto out;
+ }
+
+ while (map.m_lblk < pg_end) {
+ pgoff_t idx;
+ int cnt = 0;
+
+do_map:
+ map.m_len = pg_end - map.m_lblk;
+ err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_READ);
+ if (err)
+ goto clear_out;
+
+ if (!(map.m_flags & F2FS_MAP_FLAGS)) {
+ map.m_lblk++;
+ continue;
+ }
+
+ set_inode_flag(inode, FI_DO_DEFRAG);
+
+ idx = map.m_lblk;
+ while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
+ struct page *page;
+
+ page = get_lock_data_page(inode, idx, true);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto clear_out;
+ }
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+
+ idx++;
+ cnt++;
+ total++;
+ }
+
+ map.m_lblk = idx;
+
+ if (idx < pg_end && cnt < blk_per_seg)
+ goto do_map;
+
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+
+ err = filemap_fdatawrite(inode->i_mapping);
+ if (err)
+ goto out;
+ }
+clear_out:
+ clear_inode_flag(inode, FI_DO_DEFRAG);
+out:
+ inode_unlock(inode);
+ if (!err)
+ range->len = (u64)total << PAGE_SHIFT;
+ return err;
+}
+
+static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_defragment range;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ return err;
+
+ if (f2fs_readonly(sbi->sb)) {
+ err = -EROFS;
+ goto out;
+ }
+
+ if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
+ sizeof(range))) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* verify alignment of offset & size */
+ if (range.start & (F2FS_BLKSIZE - 1) ||
+ range.len & (F2FS_BLKSIZE - 1)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = f2fs_defragment_range(sbi, filp, &range);
+ f2fs_update_time(sbi, REQ_TIME);
+ if (err < 0)
+ goto out;
+
+ if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
+ sizeof(range)))
+ err = -EFAULT;
+out:
+ mnt_drop_write_file(filp);
+ return err;
+}
+
+static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, size_t len)
+{
+ struct inode *src = file_inode(file_in);
+ struct inode *dst = file_inode(file_out);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+ size_t olen = len, dst_max_i_size = 0;
+ size_t dst_osize;
+ int ret;
+
+ if (file_in->f_path.mnt != file_out->f_path.mnt ||
+ src->i_sb != dst->i_sb)
+ return -EXDEV;
+
+ if (unlikely(f2fs_readonly(src->i_sb)))
+ return -EROFS;
+
+ if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
+ return -EINVAL;
+
+ if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+ return -EOPNOTSUPP;
+
+ if (src == dst) {
+ if (pos_in == pos_out)
+ return 0;
+ if (pos_out > pos_in && pos_out < pos_in + len)
+ return -EINVAL;
+ }
+
+ inode_lock(src);
+ if (src != dst) {
+ if (!inode_trylock(dst)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+ if (len == 0)
+ olen = len = src->i_size - pos_in;
+ if (pos_in + len == src->i_size)
+ len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ dst_osize = dst->i_size;
+ if (pos_out + olen > dst->i_size)
+ dst_max_i_size = pos_out + olen;
+
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(src);
+ if (ret)
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(dst);
+ if (ret)
+ goto out_unlock;
+
+ /* write out all dirty pages from offset */
+ ret = filemap_write_and_wait_range(src->i_mapping,
+ pos_in, pos_in + len);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(dst->i_mapping,
+ pos_out, pos_out + len);
+ if (ret)
+ goto out_unlock;
+
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+ ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
+ pos_out >> F2FS_BLKSIZE_BITS,
+ len >> F2FS_BLKSIZE_BITS, false);
+
+ if (!ret) {
+ if (dst_max_i_size)
+ f2fs_i_size_write(dst, dst_max_i_size);
+ else if (dst_osize != dst->i_size)
+ f2fs_i_size_write(dst, dst_osize);
+ }
+ f2fs_unlock_op(sbi);
+out_unlock:
+ if (src != dst)
+ inode_unlock(dst);
+out:
+ inode_unlock(src);
+ return ret;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_move_range range;
+ struct fd dst;
+ int err;
+
+ if (!(filp->f_mode & FMODE_READ) ||
+ !(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ dst = fdget(range.dst_fd);
+ if (!dst.file)
+ return -EBADF;
+
+ if (!(dst.file->f_mode & FMODE_WRITE)) {
+ err = -EBADF;
+ goto err_out;
+ }
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ goto err_out;
+
+ err = f2fs_move_file_range(filp, range.pos_in, dst.file,
+ range.pos_out, range.len);
+
+ mnt_drop_write_file(filp);
+
+ if (copy_to_user((struct f2fs_move_range __user *)arg,
+ &range, sizeof(range)))
+ err = -EFAULT;
+err_out:
+ fdput(dst);
+ return err;
}
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
@@ -1686,6 +2246,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_gc(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
return f2fs_ioc_write_checkpoint(filp, arg);
+ case F2FS_IOC_DEFRAGMENT:
+ return f2fs_ioc_defragment(filp, arg);
+ case F2FS_IOC_MOVE_RANGE:
+ return f2fs_ioc_move_range(filp, arg);
default:
return -ENOTTY;
}
@@ -1693,14 +2257,39 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct inode *inode = file_inode(iocb->ki_filp);
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct blk_plug plug;
+ ssize_t ret;
if (f2fs_encrypted_inode(inode) &&
- !f2fs_has_encryption_key(inode) &&
- f2fs_get_encryption_info(inode))
+ !fscrypt_has_encryption_key(inode) &&
+ fscrypt_get_encryption_info(inode))
return -EACCES;
- return generic_file_write_iter(iocb, from);
+ inode_lock(inode);
+ ret = generic_write_checks(iocb, from);
+ if (ret > 0) {
+ int err = f2fs_preallocate_blocks(iocb, from);
+
+ if (err) {
+ inode_unlock(inode);
+ return err;
+ }
+ blk_start_plug(&plug);
+ ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
+ }
+ inode_unlock(inode);
+
+ if (ret > 0) {
+ ssize_t err;
+
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -1713,6 +2302,24 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC32_SETFLAGS:
cmd = F2FS_IOC_SETFLAGS;
break;
+ case F2FS_IOC32_GETVERSION:
+ cmd = F2FS_IOC_GETVERSION;
+ break;
+ case F2FS_IOC_START_ATOMIC_WRITE:
+ case F2FS_IOC_COMMIT_ATOMIC_WRITE:
+ case F2FS_IOC_START_VOLATILE_WRITE:
+ case F2FS_IOC_RELEASE_VOLATILE_WRITE:
+ case F2FS_IOC_ABORT_VOLATILE_WRITE:
+ case F2FS_IOC_SHUTDOWN:
+ case F2FS_IOC_SET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GET_ENCRYPTION_PWSALT:
+ case F2FS_IOC_GET_ENCRYPTION_POLICY:
+ case F2FS_IOC_GARBAGE_COLLECT:
+ case F2FS_IOC_WRITE_CHECKPOINT:
+ case F2FS_IOC_DEFRAGMENT:
+ break;
+ case F2FS_IOC_MOVE_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index fedbf67a0842..6390d45c1b68 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -16,7 +16,6 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
-#include <linux/blkdev.h>
#include "f2fs.h"
#include "node.h"
@@ -48,6 +47,11 @@ static int gc_thread_func(void *data)
continue;
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
/*
* [GC triggering condition]
* 0. GC is not conducted currently.
@@ -78,7 +82,7 @@ static int gc_thread_func(void *data)
stat_inc_bggc_count(sbi);
/* if return value is not zero, no victim was selected */
- if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC)))
+ if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
wait_ms = gc_th->no_gc_sleep_time;
trace_f2fs_background_gc(sbi->sb, wait_ms,
@@ -97,7 +101,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
dev_t dev = sbi->sb->s_bdev->bd_dev;
int err = 0;
- gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
+ gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
if (!gc_th) {
err = -ENOMEM;
goto out;
@@ -173,9 +177,9 @@ static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
{
/* SSR allocates in a segment unit */
if (p->alloc_mode == SSR)
- return 1 << sbi->log_blocks_per_seg;
+ return sbi->blocks_per_seg;
if (p->gc_mode == GC_GREEDY)
- return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
+ return sbi->blocks_per_seg * p->ofs_unit;
else if (p->gc_mode == GC_CB)
return UINT_MAX;
else /* No other gc_mode */
@@ -246,6 +250,18 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
return get_cb_cost(sbi, segno);
}
+static unsigned int count_bits(const unsigned long *addr,
+ unsigned int offset, unsigned int len)
+{
+ unsigned int end = offset + len, sum = 0;
+
+ while (offset < end) {
+ if (test_bit(offset++, addr))
+ ++sum;
+ }
+ return sum;
+}
+
/*
* This function is called from two paths.
* One is garbage collection and the other is SSR segment selection.
@@ -259,9 +275,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct victim_sel_policy p;
- unsigned int secno, max_cost;
+ unsigned int secno, last_victim;
unsigned int last_segment = MAIN_SEGS(sbi);
- int nsearched = 0;
+ unsigned int nsearched = 0;
mutex_lock(&dirty_i->seglist_lock);
@@ -269,11 +285,12 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
select_policy(sbi, gc_type, type, &p);
p.min_segno = NULL_SEGNO;
- p.min_cost = max_cost = get_max_cost(sbi, &p);
+ p.min_cost = get_max_cost(sbi, &p);
if (p.max_search == 0)
goto out;
+ last_victim = sbi->last_victim[p.gc_mode];
if (p.alloc_mode == LFS && gc_type == FG_GC) {
p.min_segno = check_bg_victims(sbi);
if (p.min_segno != NULL_SEGNO)
@@ -296,27 +313,35 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
}
p.offset = segno + p.ofs_unit;
- if (p.ofs_unit > 1)
+ if (p.ofs_unit > 1) {
p.offset -= segno % p.ofs_unit;
+ nsearched += count_bits(p.dirty_segmap,
+ p.offset - p.ofs_unit,
+ p.ofs_unit);
+ } else {
+ nsearched++;
+ }
+
secno = GET_SECNO(sbi, segno);
if (sec_usage_check(sbi, secno))
- continue;
+ goto next;
if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
- continue;
+ goto next;
cost = get_gc_cost(sbi, segno, &p);
if (p.min_cost > cost) {
p.min_segno = segno;
p.min_cost = cost;
- } else if (unlikely(cost == max_cost)) {
- continue;
}
-
- if (nsearched++ >= p.max_search) {
- sbi->last_victim[p.gc_mode] = segno;
+next:
+ if (nsearched >= p.max_search) {
+ if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
+ sbi->last_victim[p.gc_mode] = last_victim + 1;
+ else
+ sbi->last_victim[p.gc_mode] = segno + 1;
break;
}
}
@@ -400,13 +425,13 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
* On validity, copy that node with cold status, otherwise (invalid node)
* ignore that.
*/
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
struct f2fs_summary *sum, unsigned int segno, int gc_type)
{
- bool initial = true;
struct f2fs_summary *entry;
block_t start_addr;
int off;
+ int phase = 0;
start_addr = START_BLOCK(sbi, segno);
@@ -419,16 +444,24 @@ next_step:
struct node_info ni;
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
- if (initial) {
+ if (phase == 0) {
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
ra_node_page(sbi, nid);
continue;
}
+
+ /* phase == 2 */
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
continue;
@@ -445,36 +478,12 @@ next_step:
continue;
}
- /* set page dirty and write it */
- if (gc_type == FG_GC) {
- f2fs_wait_on_page_writeback(node_page, NODE);
- set_page_dirty(node_page);
- } else {
- if (!PageWriteback(node_page))
- set_page_dirty(node_page);
- }
- f2fs_put_page(node_page, 1);
+ move_node_page(node_page, gc_type);
stat_inc_node_blk_count(sbi, 1, gc_type);
}
- if (initial) {
- initial = false;
+ if (++phase < 3)
goto next_step;
- }
-
- if (gc_type == FG_GC) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = LONG_MAX,
- .for_reclaim = 0,
- };
- sync_node_pages(sbi, 0, &wbc);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
/*
@@ -484,7 +493,7 @@ next_step:
* as indirect or double indirect node blocks, are given, it must be a caller's
* bug.
*/
-block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
{
unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
unsigned int bidx;
@@ -501,7 +510,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
- return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
+ return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
}
static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -535,7 +544,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
return true;
}
-static void move_encrypted_block(struct inode *inode, block_t bidx)
+static void move_encrypted_block(struct inode *inode, block_t bidx,
+ unsigned int segno, int off)
{
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
@@ -547,6 +557,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
struct f2fs_summary sum;
struct node_info ni;
struct page *page;
+ block_t newaddr;
int err;
/* do not read out */
@@ -554,6 +565,9 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (!page)
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
if (err)
@@ -568,21 +582,24 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_wait_on_page_writeback(page, DATA, true);
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
fio.page = page;
- fio.blk_addr = dn.data_blkaddr;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
- fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
- fio.blk_addr,
- FGP_LOCK|FGP_CREAT,
- GFP_NOFS);
- if (!fio.encrypted_page)
- goto put_out;
+ allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
+ &sum, CURSEG_COLD_DATA);
+
+ fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto recover_block;
+ }
err = f2fs_submit_page_bio(&fio);
if (err)
@@ -591,40 +608,47 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
/* write page */
lock_page(fio.encrypted_page);
- if (unlikely(!PageUptodate(fio.encrypted_page)))
+ if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+ err = -EIO;
goto put_page_out;
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
+ }
+ if (unlikely(!PageUptodate(fio.encrypted_page))) {
+ err = -EIO;
goto put_page_out;
+ }
set_page_dirty(fio.encrypted_page);
- f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+ f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
set_page_writeback(fio.encrypted_page);
/* allocate block address */
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
- allocate_data_block(fio.sbi, NULL, fio.blk_addr,
- &fio.blk_addr, &sum, CURSEG_COLD_DATA);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
+
fio.rw = WRITE_SYNC;
+ fio.new_blkaddr = newaddr;
f2fs_submit_page_mbio(&fio);
- dn.data_blkaddr = fio.blk_addr;
- set_data_blkaddr(&dn);
- f2fs_update_extent_cache(&dn);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ f2fs_update_data_blkaddr(&dn, newaddr);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
+recover_block:
+ if (err)
+ __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
+ true, true);
put_out:
f2fs_put_dnode(&dn);
out:
f2fs_put_page(page, 1);
}
-static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
+static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
+ unsigned int segno, int off)
{
struct page *page;
@@ -632,6 +656,9 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
if (IS_ERR(page))
return;
+ if (!check_valid_map(F2FS_I_SB(inode), segno, off))
+ goto out;
+
if (gc_type == BG_GC) {
if (PageWriteback(page))
goto out;
@@ -645,13 +672,24 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
.page = page,
.encrypted_page = NULL,
};
+ bool is_dirty = PageDirty(page);
+ int err;
+
+retry:
set_page_dirty(page);
- f2fs_wait_on_page_writeback(page, DATA);
- if (clear_page_dirty_for_io(page))
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ }
+
set_cold_data(page);
- do_write_data_page(&fio);
- clear_cold_data(page);
+
+ err = do_write_data_page(&fio);
+ if (err == -ENOMEM && is_dirty) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
}
out:
f2fs_put_page(page, 1);
@@ -664,7 +702,7 @@ out:
* If the parent node is not valid or the data block address is different,
* the victim data block is ignored.
*/
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
{
struct super_block *sb = sbi->sb;
@@ -684,16 +722,23 @@ next_step:
struct node_info dni; /* dnode info for the data */
unsigned int ofs_in_node, nofs;
block_t start_bidx;
+ nid_t nid = le32_to_cpu(entry->nid);
/* stop BG_GC if there is not enough free sections. */
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
- return 0;
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
+ return;
if (check_valid_map(sbi, segno, off) == 0)
continue;
if (phase == 0) {
- ra_node_page(sbi, le32_to_cpu(entry->nid));
+ ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
+ META_NAT, true);
+ continue;
+ }
+
+ if (phase == 1) {
+ ra_node_page(sbi, nid);
continue;
}
@@ -701,14 +746,14 @@ next_step:
if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
continue;
- if (phase == 1) {
+ if (phase == 2) {
ra_node_page(sbi, dni.ino);
continue;
}
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
- if (phase == 2) {
+ if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
@@ -720,7 +765,7 @@ next_step:
continue;
}
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+ start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode,
start_bidx + ofs_in_node, READA, true);
if (IS_ERR(data_page)) {
@@ -733,30 +778,41 @@ next_step:
continue;
}
- /* phase 3 */
+ /* phase 4 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
- start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ bool locked = false;
+
+ if (S_ISREG(inode->i_mode)) {
+ if (!down_write_trylock(&fi->dio_rwsem[READ]))
+ continue;
+ if (!down_write_trylock(
+ &fi->dio_rwsem[WRITE])) {
+ up_write(&fi->dio_rwsem[READ]);
+ continue;
+ }
+ locked = true;
+ }
+
+ start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- move_encrypted_block(inode, start_bidx);
+ move_encrypted_block(inode, start_bidx, segno, off);
else
- move_data_page(inode, start_bidx, gc_type);
+ move_data_page(inode, start_bidx, gc_type, segno, off);
+
+ if (locked) {
+ up_write(&fi->dio_rwsem[WRITE]);
+ up_write(&fi->dio_rwsem[READ]);
+ }
+
stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
- if (++phase < 4)
+ if (++phase < 5)
goto next_step;
-
- if (gc_type == FG_GC) {
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
- /* return 1 only if FG_GC succefully reclaimed one */
- if (get_valid_blocks(sbi, segno, 1) == 0)
- return 1;
- }
- return 0;
}
static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -772,51 +828,84 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
return ret;
}
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+ unsigned int start_segno,
struct gc_inode_list *gc_list, int gc_type)
{
struct page *sum_page;
struct f2fs_summary_block *sum;
struct blk_plug plug;
- int nfree = 0;
+ unsigned int segno = start_segno;
+ unsigned int end_segno = start_segno + sbi->segs_per_sec;
+ int sec_freed = 0;
+ unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+ SUM_TYPE_DATA : SUM_TYPE_NODE;
+
+ /* readahead multi ssa blocks those have contiguous address */
+ if (sbi->segs_per_sec > 1)
+ ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+ sbi->segs_per_sec, META_SSA, true);
- /* read segment summary of victim */
- sum_page = get_sum_page(sbi, segno);
+ /* reference all summary page */
+ while (segno < end_segno) {
+ sum_page = get_sum_page(sbi, segno++);
+ unlock_page(sum_page);
+ }
blk_start_plug(&plug);
- sum = page_address(sum_page);
+ for (segno = start_segno; segno < end_segno; segno++) {
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - mutex_lock(sentry_lock)
- * - mutex_lock(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- unlock_page(sum_page);
-
- switch (GET_SUM_TYPE((&sum->footer))) {
- case SUM_TYPE_NODE:
- nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
- break;
- case SUM_TYPE_DATA:
- nfree = gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type);
- break;
+ /* find segment summary of victim */
+ sum_page = find_get_page(META_MAPPING(sbi),
+ GET_SUM_BLOCK(sbi, segno));
+ f2fs_put_page(sum_page, 0);
+
+ if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ !PageUptodate(sum_page) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
+
+ sum = page_address(sum_page);
+ f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - mutex_lock(sentry_lock)
+ * - mutex_lock(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+
+ if (type == SUM_TYPE_NODE)
+ gc_node_segment(sbi, sum->entries, segno, gc_type);
+ else
+ gc_data_segment(sbi, sum->entries, gc_list, segno,
+ gc_type);
+
+ stat_inc_seg_count(sbi, type, gc_type);
+next:
+ f2fs_put_page(sum_page, 0);
}
+
+ if (gc_type == FG_GC)
+ f2fs_submit_merged_bio(sbi,
+ (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
+
blk_finish_plug(&plug);
- stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
+ sec_freed = 1;
+
stat_inc_call_count(sbi->stat_info);
- f2fs_put_page(sum_page, 0);
- return nfree;
+ return sec_freed;
}
-int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
+int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
{
- unsigned int segno, i;
+ unsigned int segno;
int gc_type = sync ? FG_GC : BG_GC;
int sec_freed = 0;
int ret = -EINVAL;
@@ -832,46 +921,51 @@ gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
goto stop;
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ ret = -EIO;
goto stop;
+ }
- if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
+ if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
gc_type = FG_GC;
- if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
- write_checkpoint(sbi, &cpc);
+ /*
+ * If there is no victim and no prefree segment but still not
+ * enough free sections, we should flush dent/node blocks and do
+ * garbage collections.
+ */
+ if (__get_victim(sbi, &segno, gc_type) ||
+ prefree_segments(sbi)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ segno = NULL_SEGNO;
+ } else if (has_not_enough_free_secs(sbi, 0, 0)) {
+ ret = write_checkpoint(sbi, &cpc);
+ if (ret)
+ goto stop;
+ }
+ } else if (gc_type == BG_GC && !background) {
+ /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
+ goto stop;
}
if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
goto stop;
ret = 0;
- /* readahead multi ssa blocks those have contiguous address */
- if (sbi->segs_per_sec > 1)
- ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
- META_SSA, true);
-
- for (i = 0; i < sbi->segs_per_sec; i++) {
- /*
- * for FG_GC case, halt gcing left segments once failed one
- * of segments in selected section to avoid long latency.
- */
- if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
- gc_type == FG_GC)
- break;
- }
-
- if (i == sbi->segs_per_sec && gc_type == FG_GC)
+ if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
+ gc_type == FG_GC)
sec_freed++;
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed))
+ if (has_not_enough_free_secs(sbi, sec_freed, 0))
goto gc_more;
if (gc_type == FG_GC)
- write_checkpoint(sbi, &cpc);
+ ret = write_checkpoint(sbi, &cpc);
}
stop:
mutex_unlock(&sbi->gc_mutex);
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b4a65be9f7d3..a993967dcdb9 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -100,11 +100,3 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
return true;
return false;
}
-
-static inline int is_idle(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- struct request_list *rl = &q->root_rl;
- return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
-}
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index b238d2fec3e5..71b7206c431e 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,8 +70,7 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
- struct f2fs_filename *fname)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
@@ -80,10 +79,6 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
const unsigned char *name = name_info->name;
size_t len = name_info->len;
- /* encrypted bigname case */
- if (fname && !fname->disk_name.name)
- return cpu_to_le32(fname->hash);
-
if (is_dot_dotdot(name_info))
return 0;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index f35f3eb3541f..b85987703d1e 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -17,9 +17,6 @@
bool f2fs_may_inline_data(struct inode *inode)
{
- if (!test_opt(F2FS_I_SB(inode), INLINE_DATA))
- return false;
-
if (f2fs_is_atomic_file(inode))
return false;
@@ -55,7 +52,7 @@ void read_inline_data(struct page *page, struct page *ipage)
f2fs_bug_on(F2FS_P_SB(page), page->index);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
/* Copy the whole inline data block */
src_addr = inline_data_addr(ipage);
@@ -63,7 +60,8 @@ void read_inline_data(struct page *page, struct page *ipage)
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
flush_dcache_page(page);
kunmap_atomic(dst_addr);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
}
bool truncate_inline_inode(struct page *ipage, u64 from)
@@ -75,9 +73,9 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
addr = inline_data_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memset(addr + from, 0, MAX_INLINE_DATA - from);
-
+ set_page_dirty(ipage);
return true;
}
@@ -112,11 +110,12 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
}
if (page->index)
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
else
read_inline_data(page, ipage);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
f2fs_put_page(ipage, 1);
trace_android_fs_dataread_end(inode, page_offset(page),
PAGE_SIZE);
@@ -126,7 +125,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
{
- void *src_addr, *dst_addr;
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.type = DATA,
@@ -136,8 +134,6 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
};
int dirty, err;
- f2fs_bug_on(F2FS_I_SB(dn->inode), page->index);
-
if (!f2fs_exist_data(dn->inode))
goto clear_out;
@@ -145,21 +141,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
- f2fs_wait_on_page_writeback(page, DATA);
+ f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
- if (PageUptodate(page))
- goto no_update;
-
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
-
- /* Copy the whole inline data block */
- src_addr = inline_data_addr(dn->inode_page);
- dst_addr = kmap_atomic(page);
- memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- flush_dcache_page(page);
- kunmap_atomic(dst_addr);
- SetPageUptodate(page);
-no_update:
+ read_inline_data(page, dn->inode_page);
set_page_dirty(page);
/* clear dirty state */
@@ -167,23 +151,23 @@ no_update:
/* write data page to try to make data consistent */
set_page_writeback(page);
- fio.blk_addr = dn->data_blkaddr;
+ fio.old_blkaddr = dn->data_blkaddr;
write_data_page(dn, &fio);
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
- f2fs_wait_on_page_writeback(page, DATA);
- if (dirty)
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (dirty) {
inode_dec_dirty_pages(dn->inode);
+ remove_dirty_inode(dn->inode);
+ }
/* this converted inline_data should be recovered. */
- set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
+ set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
truncate_inline_inode(dn->inode_page, 0);
+ clear_inline_node(dn->inode_page);
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
- sync_inode_page(dn);
f2fs_put_dnode(dn);
return 0;
}
@@ -195,7 +179,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
struct page *ipage, *page;
int err = 0;
- page = grab_cache_page(inode->i_mapping, 0);
+ if (!f2fs_has_inline_data(inode))
+ return 0;
+
+ page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
if (!page)
return -ENOMEM;
@@ -217,6 +204,9 @@ out:
f2fs_unlock_op(sbi);
f2fs_put_page(page, 1);
+
+ f2fs_balance_fs(sbi, dn.node_changed);
+
return err;
}
@@ -238,16 +228,17 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
f2fs_bug_on(F2FS_I_SB(inode), page->index);
- f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+ f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
src_addr = kmap_atomic(page);
dst_addr = inline_data_addr(dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
kunmap_atomic(src_addr);
+ set_page_dirty(dn.inode_page);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_DATA_EXIST);
- sync_inode_page(&dn);
+ clear_inline_node(dn.inode_page);
f2fs_put_dnode(&dn);
return 0;
}
@@ -276,16 +267,16 @@ process_inline:
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
src_addr = inline_data_addr(npage);
dst_addr = inline_data_addr(ipage);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_INLINE_DATA);
+ set_inode_flag(inode, FI_DATA_EXIST);
- update_inode(inode, ipage);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
return true;
}
@@ -296,7 +287,6 @@ process_inline:
if (!truncate_inline_inode(ipage, 0))
return false;
f2fs_clear_inline_inode(inode);
- update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false))
@@ -307,7 +297,7 @@ process_inline:
}
struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
- struct f2fs_filename *fname, struct page **res_page)
+ struct fscrypt_name *fname, struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
struct f2fs_inline_dentry *inline_dentry;
@@ -318,10 +308,12 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ if (IS_ERR(ipage)) {
+ *res_page = ipage;
return NULL;
+ }
- namehash = f2fs_dentry_hash(&name, fname);
+ namehash = f2fs_dentry_hash(&name);
inline_dentry = inline_data_addr(ipage);
@@ -333,30 +325,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
else
f2fs_put_page(ipage, 0);
- /*
- * For the most part, it should be a bug when name_len is zero.
- * We stop here for figuring out where the bugs has occurred.
- */
- f2fs_bug_on(sbi, d.max < 0);
- return de;
-}
-
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
- struct page **p)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
- struct f2fs_dir_entry *de;
- struct f2fs_inline_dentry *dentry_blk;
-
- ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return NULL;
-
- dentry_blk = inline_data_addr(ipage);
- de = &dentry_blk->dentry[1];
- *p = ipage;
- unlock_page(ipage);
return de;
}
@@ -374,10 +342,8 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
set_page_dirty(ipage);
/* update i_size to MAX_INLINE_DATA */
- if (i_size_read(inode) < MAX_INLINE_DATA) {
- i_size_write(inode, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ if (i_size_read(inode) < MAX_INLINE_DATA)
+ f2fs_i_size_write(inode, MAX_INLINE_DATA);
return 0;
}
@@ -385,7 +351,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
struct page *page;
@@ -393,7 +359,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
struct f2fs_dentry_block *dentry_blk;
int err;
- page = grab_cache_page(dir->i_mapping, 0);
+ page = f2fs_grab_cache_page(dir->i_mapping, 0, false);
if (!page) {
f2fs_put_page(ipage, 1);
return -ENOMEM;
@@ -404,8 +370,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
if (err)
goto out;
- f2fs_wait_on_page_writeback(page, DATA);
- zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);
dentry_blk = kmap_atomic(page);
@@ -426,37 +392,132 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
NR_INLINE_DENTRY * F2FS_SLOT_LEN);
kunmap_atomic(dentry_blk);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir);
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
- if (i_size_read(dir) < PAGE_CACHE_SIZE) {
- i_size_write(dir, PAGE_CACHE_SIZE);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
-
- sync_inode_page(&dn);
+ f2fs_i_depth_write(dir, 1);
+ if (i_size_read(dir) < PAGE_SIZE)
+ f2fs_i_size_write(dir, PAGE_SIZE);
out:
f2fs_put_page(page, 1);
return err;
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
- struct inode *inode, nid_t ino, umode_t mode)
+static int f2fs_add_inline_entries(struct inode *dir,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_dentry_ptr d;
+ unsigned long bit_pos = 0;
+ int err = 0;
+
+ make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+
+ while (bit_pos < d.max) {
+ struct f2fs_dir_entry *de;
+ struct qstr new_name;
+ nid_t ino;
+ umode_t fake_mode;
+
+ if (!test_bit_le(bit_pos, d.bitmap)) {
+ bit_pos++;
+ continue;
+ }
+
+ de = &d.dentry[bit_pos];
+
+ if (unlikely(!de->name_len)) {
+ bit_pos++;
+ continue;
+ }
+
+ new_name.name = d.filename[bit_pos];
+ new_name.len = le16_to_cpu(de->name_len);
+
+ ino = le32_to_cpu(de->ino);
+ fake_mode = get_de_type(de) << S_SHIFT;
+
+ err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
+ ino, fake_mode);
+ if (err)
+ goto punch_dentry_pages;
+
+ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ }
+ return 0;
+punch_dentry_pages:
+ truncate_inode_pages(&dir->i_data, 0);
+ truncate_blocks(dir, 0, false);
+ remove_dirty_inode(dir);
+ return err;
+}
+
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_inline_dentry *backup_dentry;
+ int err;
+
+ backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
+ sizeof(struct f2fs_inline_dentry), GFP_F2FS_ZERO);
+ if (!backup_dentry) {
+ f2fs_put_page(ipage, 1);
+ return -ENOMEM;
+ }
+
+ memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
+ truncate_inline_inode(ipage, 0);
+
+ unlock_page(ipage);
+
+ err = f2fs_add_inline_entries(dir, backup_dentry);
+ if (err)
+ goto recover;
+
+ lock_page(ipage);
+
+ stat_dec_inline_dir(dir);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
+ kfree(backup_dentry);
+ return 0;
+recover:
+ lock_page(ipage);
+ memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
+ f2fs_i_depth_write(dir, 0);
+ f2fs_i_size_write(dir, MAX_INLINE_DATA);
+ set_page_dirty(ipage);
+ f2fs_put_page(ipage, 1);
+
+ kfree(backup_dentry);
+ return err;
+}
+
+static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ if (!F2FS_I(dir)->i_dir_level)
+ return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ else
+ return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+}
+
+int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ const struct qstr *orig_name,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
f2fs_hash_t name_hash;
- size_t namelen = name->len;
struct f2fs_inline_dentry *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(namelen);
+ int slots = GET_DENTRY_SLOTS(new_name->len);
struct page *page = NULL;
int err = 0;
@@ -477,25 +538,27 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, name, ipage);
+ page = init_inode_metadata(inode, dir, new_name,
+ orig_name, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
}
+ if (f2fs_encrypted_inode(dir))
+ file_set_enc_name(inode);
}
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- name_hash = f2fs_dentry_hash(name, NULL);
+ name_hash = f2fs_dentry_hash(new_name);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
- f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
set_page_dirty(ipage);
/* we don't need to mark_inode_dirty now */
if (inode) {
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -503,11 +566,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
-
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode(dir, ipage);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
out:
f2fs_put_page(ipage, 1);
return err;
@@ -522,22 +580,22 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
int i;
lock_page(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
inline_dentry = inline_data_addr(page);
bit_pos = dentry - inline_dentry->dentry;
for (i = 0; i < slots; i++)
- test_and_clear_bit_le(bit_pos + i,
+ __clear_bit_le(bit_pos + i,
&inline_dentry->dentry_bitmap);
set_page_dirty(page);
+ f2fs_put_page(page, 1);
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = current_time(dir);
+ f2fs_mark_inode_dirty_sync(dir, false);
if (inode)
- f2fs_drop_nlink(dir, inode, page);
-
- f2fs_put_page(page, 1);
+ f2fs_drop_nlink(dir, inode);
}
bool f2fs_empty_inline_dir(struct inode *dir)
@@ -565,12 +623,13 @@ bool f2fs_empty_inline_dir(struct inode *dir)
}
int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
- struct f2fs_str *fstr)
+ struct fscrypt_str *fstr)
{
struct inode *inode = file_inode(file);
struct f2fs_inline_dentry *inline_dentry = NULL;
struct page *ipage = NULL;
struct f2fs_dentry_ptr d;
+ int err;
if (ctx->pos == NR_INLINE_DENTRY)
return 0;
@@ -583,11 +642,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
make_dentry_ptr(inode, &d, (void *)inline_dentry, 2);
- if (!f2fs_fill_dentries(ctx, &d, 0, fstr))
+ err = f2fs_fill_dentries(ctx, &d, 0, fstr);
+ if (!err)
ctx->pos = NR_INLINE_DENTRY;
f2fs_put_page(ipage, 1);
- return 0;
+ return err < 0 ? err : 0;
}
int f2fs_inline_data_fiemap(struct inode *inode,
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 97e20decacb4..af06bda51a54 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
+#include <linux/backing-dev.h>
#include <linux/writeback.h>
#include "f2fs.h"
@@ -18,6 +19,14 @@
#include <trace/events/f2fs.h>
+void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
+{
+ if (f2fs_inode_dirtied(inode, sync))
+ return;
+
+ mark_inode_dirty_sync(inode);
+}
+
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
@@ -35,6 +44,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ f2fs_mark_inode_dirty_sync(inode, false);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -83,10 +93,10 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
while (start < end) {
if (*start++) {
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
+ set_inode_flag(inode, FI_DATA_EXIST);
+ set_raw_inline(inode, F2FS_INODE(ipage));
set_page_dirty(ipage);
return;
}
@@ -138,9 +148,10 @@ static int do_read_inode(struct inode *inode)
fi->i_pino = le32_to_cpu(ri->i_pino);
fi->i_dir_level = ri->i_dir_level;
- f2fs_init_extent_tree(inode, &ri->i_ext);
+ if (f2fs_init_extent_tree(inode, &ri->i_ext))
+ set_page_dirty(node_page);
- get_inline_info(fi, ri);
+ get_inline_info(inode, ri);
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
@@ -150,7 +161,10 @@ static int do_read_inode(struct inode *inode)
__get_inode_rdev(inode, ri);
if (__written_first_block(ri))
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ if (!need_inode_block_update(sbi, inode->i_ino))
+ fi->last_disk_size = inode->i_size;
f2fs_put_page(node_page, 1);
@@ -202,6 +216,7 @@ make_now:
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
@@ -221,11 +236,28 @@ bad_inode:
return ERR_PTR(ret);
}
-void update_inode(struct inode *inode, struct page *node_page)
+struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
+{
+ struct inode *inode;
+retry:
+ inode = f2fs_iget(sb, ino);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ }
+ return inode;
+}
+
+int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ f2fs_inode_synced(inode);
- f2fs_wait_on_page_writeback(node_page, NODE);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
ri = F2FS_INODE(node_page);
@@ -237,12 +269,14 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
- if (F2FS_I(inode)->extent_tree)
- set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
- &ri->i_ext);
- else
+ if (et) {
+ read_lock(&et->lock);
+ set_raw_extent(&et->largest, &ri->i_ext);
+ read_unlock(&et->lock);
+ } else {
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
- set_raw_inline(F2FS_I(inode), ri);
+ }
+ set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
@@ -259,15 +293,19 @@ void update_inode(struct inode *inode, struct page *node_page)
__set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
- set_page_dirty(node_page);
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ /* deleted inode */
+ if (inode->i_nlink == 0)
+ clear_inline_node(node_page);
+
+ return set_page_dirty(node_page);
}
-void update_inode_page(struct inode *inode)
+int update_inode_page(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct page *node_page;
+ int ret = 0;
retry:
node_page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(node_page)) {
@@ -276,12 +314,14 @@ retry:
cond_resched();
goto retry;
} else if (err != -ENOENT) {
- f2fs_stop_checkpoint(sbi);
+ f2fs_stop_checkpoint(sbi, false);
}
- return;
+ f2fs_inode_synced(inode);
+ return 0;
}
- update_inode(inode, node_page);
+ ret = update_inode(inode, node_page);
f2fs_put_page(node_page, 1);
+ return ret;
}
int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -292,16 +332,15 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
/*
* We need to balance fs here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
*/
- update_inode_page(inode);
-
- f2fs_balance_fs(sbi);
+ if (update_inode_page(inode) && wbc && wbc->nr_to_write)
+ f2fs_balance_fs(sbi, true);
return 0;
}
@@ -311,13 +350,12 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- nid_t xnid = fi->i_xattr_nid;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
@@ -327,26 +365,44 @@ void f2fs_evict_inode(struct inode *inode)
goto out_clear;
f2fs_bug_on(sbi, get_dirty_pages(inode));
- remove_dirty_dir_inode(inode);
+ remove_dirty_inode(inode);
f2fs_destroy_extent_tree(inode);
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_EVICT_INODE))
+ goto no_delete;
+#endif
+
+ remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
+
sb_start_intwrite(inode->i_sb);
- set_inode_flag(fi, FI_NO_ALLOC);
+ set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
-
+retry:
if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (!err) {
f2fs_lock_op(sbi);
err = remove_inode_page(inode);
f2fs_unlock_op(sbi);
+ if (err == -ENOENT)
+ err = 0;
}
+ /* give more chances, if ENOMEM case */
+ if (err == -ENOMEM) {
+ err = 0;
+ goto retry;
+ }
+
+ if (err)
+ update_inode_page(inode);
sb_end_intwrite(inode->i_sb);
no_delete:
stat_dec_inline_xattr(inode);
@@ -356,36 +412,20 @@ no_delete:
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
- if (is_inode_flag_set(fi, FI_APPEND_WRITE))
- add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
- add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
- if (is_inode_flag_set(fi, FI_FREE_NID)) {
- if (err && err != -ENOENT)
- alloc_nid_done(sbi, inode->i_ino);
- else
- alloc_nid_failed(sbi, inode->i_ino);
- clear_inode_flag(fi, FI_FREE_NID);
+ if (inode->i_nlink) {
+ if (is_inode_flag_set(inode, FI_APPEND_WRITE))
+ add_ino_entry(sbi, inode->i_ino, APPEND_INO);
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
+ add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
}
-
- if (err && err != -ENOENT) {
- if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
- /*
- * get here because we failed to release resource
- * of inode previously, reminder our user to run fsck
- * for fixing.
- */
- set_sbi_flag(sbi, SBI_NEED_FSCK);
- f2fs_msg(sbi->sb, KERN_WARNING,
- "inode (ino:%lu) resource leak, run fsck "
- "to fix this issue!", inode->i_ino);
- }
+ if (is_inode_flag_set(inode, FI_FREE_NID)) {
+ alloc_nid_failed(sbi, inode->i_ino);
+ clear_inode_flag(inode, FI_FREE_NID);
}
+ f2fs_bug_on(sbi, err &&
+ !exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
out_clear:
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (fi->i_crypt_info)
- f2fs_free_encryption_info(inode, fi->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
clear_inode(inode);
}
@@ -393,37 +433,44 @@ out_clear:
void handle_failed_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int err = 0;
+ struct node_info ni;
+ /*
+ * clear nlink of inode in order to release resource of inode
+ * immediately.
+ */
clear_nlink(inode);
- make_bad_inode(inode);
- unlock_new_inode(inode);
- i_size_write(inode, 0);
- if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, false);
+ /*
+ * we must call this to avoid inode being remained as dirty, resulting
+ * in a panic when flushing dirty inodes in gdirty_list.
+ */
+ update_inode_page(inode);
- if (!err)
- err = remove_inode_page(inode);
+ /* don't make bad inode, since it becomes a regular file. */
+ unlock_new_inode(inode);
/*
- * if we skip truncate_node in remove_inode_page bacause we failed
- * before, it's better to find another way to release resource of
- * this inode (e.g. valid block count, node block or nid). Here we
- * choose to add this inode to orphan list, so that we can call iput
- * for releasing in orphan recovery flow.
- *
* Note: we should add inode to orphan list before f2fs_unlock_op()
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- if (err && err != -ENOENT) {
- err = acquire_orphan_inode(sbi);
- if (!err)
- add_orphan_inode(sbi, inode->i_ino);
+ get_node_info(sbi, inode->i_ino, &ni);
+
+ if (ni.blk_addr != NULL_ADDR) {
+ int err = acquire_orphan_inode(sbi);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Too many orphan inodes, run fsck to fix.");
+ } else {
+ add_orphan_inode(inode);
+ }
+ alloc_nid_done(sbi, inode->i_ino);
+ } else {
+ set_inode_flag(inode, FI_FREE_NID);
}
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 2c32110f9fc0..468b2dbe6d34 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -46,7 +46,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
inode->i_ino = ino;
inode->i_blocks = 0;
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
inode->i_generation = sbi->s_next_generation++;
err = insert_inode_locked(inode);
@@ -60,10 +60,14 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
- if (f2fs_may_inline_data(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ set_inode_flag(inode, FI_NEW_INODE);
+
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
+ if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
+ set_inode_flag(inode, FI_INLINE_DATA);
if (f2fs_may_inline_dentry(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
+ set_inode_flag(inode, FI_INLINE_DENTRY);
f2fs_init_extent_tree(inode, NULL);
@@ -72,14 +76,13 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
stat_inc_inline_dir(inode);
trace_f2fs_new_inode(inode, 0);
- mark_inode_dirty(inode);
return inode;
fail:
trace_f2fs_new_inode(inode, err);
make_bad_inode(inode);
if (nid_free)
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+ set_inode_flag(inode, FI_FREE_NID);
iput(inode);
return ERR_PTR(err);
}
@@ -88,18 +91,23 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
+ int i;
/*
* filename format of multimedia file should be defined as:
- * "filename + '.' + extension".
+ * "filename + '.' + extension + (optional: '.' + temp extension)".
*/
if (slen < sublen + 2)
return 0;
- if (s[slen - sublen - 1] != '.')
- return 0;
+ for (i = 1; i < slen - sublen; i++) {
+ if (s[i] != '.')
+ continue;
+ if (!strncasecmp(s + i + 1, sub, sublen))
+ return 1;
+ }
- return !strncasecmp(s + slen - sublen, sub, sublen);
+ return 0;
}
/*
@@ -128,8 +136,6 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
nid_t ino = 0;
int err;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -142,6 +148,8 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
ino = inode->i_ino;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -169,15 +177,15 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
int err;
if (f2fs_encrypted_inode(dir) &&
- !f2fs_is_child_context_consistent_with_parent(dir, inode))
+ !fscrypt_has_permitted_context(dir, inode))
return -EPERM;
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
- inode->i_ctime = CURRENT_TIME;
+ inode->i_ctime = current_time(inode);
ihold(inode);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -190,7 +198,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
return 0;
out:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
iput(inode);
f2fs_unlock_op(sbi);
return err;
@@ -199,10 +207,14 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
- if (!ino)
+ struct page *page;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
+ if (!ino) {
+ if (IS_ERR(page))
+ return ERR_CAST(page);
return ERR_PTR(-ENOENT);
- return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino));
+ }
+ return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
static int __recover_dot_dentries(struct inode *dir, nid_t pino)
@@ -214,12 +226,24 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
struct page *page;
int err = 0;
+ if (f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "skip recovering inline_dots inode (ino:%lu, pino:%u) "
+ "in readonly mountpoint", dir->i_ino, pino);
+ return 0;
+ }
+
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
de = f2fs_find_entry(dir, &dot, &page);
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
} else {
err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
if (err)
@@ -230,14 +254,14 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
}
out:
- if (!err) {
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
- mark_inode_dirty(dir);
- }
+ if (!err)
+ clear_inode_flag(dir, FI_INLINE_DOTS);
f2fs_unlock_op(sbi);
return err;
@@ -251,13 +275,32 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
struct page *page;
nid_t ino;
int err = 0;
+ unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
+
+ if (f2fs_encrypted_inode(dir)) {
+ int res = fscrypt_get_encryption_info(dir);
+
+ /*
+ * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
+ * created while the directory was encrypted and we
+ * don't have access to the key.
+ */
+ if (fscrypt_has_encryption_key(dir))
+ fscrypt_set_encrypted_dentry(dentry);
+ fscrypt_set_d_op(dentry);
+ if (res && res != -ENOKEY)
+ return ERR_PTR(res);
+ }
if (dentry->d_name.len > F2FS_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ return (struct dentry *)page;
return d_splice_alias(inode, dentry);
+ }
ino = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page);
@@ -267,15 +310,29 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
return ERR_CAST(inode);
+ if ((dir->i_ino == root_ino) && f2fs_has_inline_dots(dir)) {
+ err = __recover_dot_dentries(dir, root_ino);
+ if (err)
+ goto err_out;
+ }
+
if (f2fs_has_inline_dots(inode)) {
err = __recover_dot_dentries(inode, dir->i_ino);
if (err)
goto err_out;
}
+ if (!IS_ERR(inode) && f2fs_encrypted_inode(dir) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
+ !fscrypt_has_permitted_context(dir, inode)) {
+ bool nokey = f2fs_encrypted_inode(inode) &&
+ !fscrypt_has_encryption_key(inode);
+ err = nokey ? -ENOKEY : -EPERM;
+ goto err_out;
+ }
return d_splice_alias(inode, dentry);
err_out:
- iget_failed(inode);
+ iput(inode);
return ERR_PTR(err);
}
@@ -288,11 +345,15 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
int err = -ENOENT;
trace_f2fs_unlink_enter(dir, dentry);
- f2fs_balance_fs(sbi);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ err = PTR_ERR(page);
goto fail;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
@@ -305,9 +366,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
f2fs_delete_entry(de, page, dir, inode);
f2fs_unlock_op(sbi);
- /* In order to evict this inode, we set it dirty */
- mark_inode_dirty(inode);
-
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
@@ -332,16 +390,24 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
size_t len = strlen(symname);
- size_t p_len;
- char *p_str;
- struct f2fs_str disk_link = FSTR_INIT(NULL, 0);
- struct f2fs_encrypted_symlink_data *sd = NULL;
+ struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1);
+ struct fscrypt_symlink_data *sd = NULL;
int err;
- if (len > dir->i_sb->s_blocksize)
- return -ENAMETOOLONG;
+ if (f2fs_encrypted_inode(dir)) {
+ err = fscrypt_get_encryption_info(dir);
+ if (err)
+ return err;
+
+ if (!fscrypt_has_encryption_key(dir))
+ return -EPERM;
- f2fs_balance_fs(sbi);
+ disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+ sizeof(struct fscrypt_symlink_data));
+ }
+
+ if (disk_link.len > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
if (IS_ERR(inode))
@@ -351,8 +417,11 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
else
inode->i_op = &f2fs_symlink_inode_operations;
+ inode_nohighmem(inode);
inode->i_mapping->a_ops = &f2fs_dblock_aops;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -360,42 +429,36 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
f2fs_unlock_op(sbi);
alloc_nid_done(sbi, inode->i_ino);
- if (f2fs_encrypted_inode(dir)) {
+ if (f2fs_encrypted_inode(inode)) {
struct qstr istr = QSTR_INIT(symname, len);
+ struct fscrypt_str ostr;
- err = f2fs_get_encryption_info(inode);
- if (err)
+ sd = kzalloc(disk_link.len, GFP_NOFS);
+ if (!sd) {
+ err = -ENOMEM;
goto err_out;
+ }
- err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link);
+ err = fscrypt_get_encryption_info(inode);
if (err)
goto err_out;
- err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link);
- if (err < 0)
- goto err_out;
-
- p_len = encrypted_symlink_data_len(disk_link.len) + 1;
-
- if (p_len > dir->i_sb->s_blocksize) {
- err = -ENAMETOOLONG;
+ if (!fscrypt_has_encryption_key(inode)) {
+ err = -EPERM;
goto err_out;
}
- sd = kzalloc(p_len, GFP_NOFS);
- if (!sd) {
- err = -ENOMEM;
+ ostr.name = sd->encrypted_path;
+ ostr.len = disk_link.len;
+ err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
+ if (err)
goto err_out;
- }
- memcpy(sd->encrypted_path, disk_link.name, disk_link.len);
- sd->len = cpu_to_le16(disk_link.len);
- p_str = (char *)sd;
- } else {
- p_len = len + 1;
- p_str = (char *)symname;
+
+ sd->len = cpu_to_le16(ostr.len);
+ disk_link.name = (char *)sd;
}
- err = page_symlink(inode, p_str, p_len);
+ err = page_symlink(inode, disk_link.name, disk_link.len);
err_out:
d_instantiate(dentry, inode);
@@ -411,7 +474,8 @@ err_out:
* performance regression.
*/
if (!err) {
- filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);
+ filemap_write_and_wait_range(inode->i_mapping, 0,
+ disk_link.len - 1);
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
@@ -420,7 +484,6 @@ err_out:
}
kfree(sd);
- f2fs_fname_crypto_free_buffer(&disk_link);
return err;
out:
handle_failed_inode(inode);
@@ -433,8 +496,6 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct inode *inode;
int err;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, S_IFDIR | mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -444,7 +505,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_mapping->a_ops = &f2fs_dblock_aops;
mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ f2fs_balance_fs(sbi, true);
+
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -461,7 +524,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
out_fail:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
handle_failed_inode(inode);
return err;
}
@@ -481,8 +544,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err = 0;
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -490,6 +551,8 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
init_special_inode(inode, inode->i_mode, rdev);
inode->i_op = &f2fs_special_inode_operations;
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -516,9 +579,6 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err;
- if (!whiteout)
- f2fs_balance_fs(sbi);
-
inode = f2fs_new_inode(dir, mode);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -532,6 +592,8 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
inode->i_mapping->a_ops = &f2fs_dblock_aops;
}
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = acquire_orphan_inode(sbi);
if (err)
@@ -545,17 +607,17 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
* add this non-linked tmpfile to orphan list, in this way we could
* remove all unused data of tmpfile after abnormal power-off.
*/
- add_orphan_inode(sbi, inode->i_ino);
- f2fs_unlock_op(sbi);
-
+ add_orphan_inode(inode);
alloc_nid_done(sbi, inode->i_ino);
if (whiteout) {
- inode_dec_link_count(inode);
+ f2fs_i_links_write(inode, false);
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
}
+ /* link_count was changed by d_tmpfile as well. */
+ f2fs_unlock_op(sbi);
unlock_new_inode(inode);
return 0;
@@ -569,7 +631,7 @@ out:
static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (f2fs_encrypted_inode(dir)) {
- int err = f2fs_get_encryption_info(dir);
+ int err = fscrypt_get_encryption_info(dir);
if (err)
return err;
}
@@ -595,26 +657,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
+ bool is_old_inline = f2fs_has_inline_dentry(old_dir);
int err = -ENOENT;
if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
- !f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode)) {
+ !fscrypt_has_permitted_context(new_dir, old_inode)) {
err = -EPERM;
goto out;
}
- f2fs_balance_fs(sbi);
-
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_old;
+ }
}
if (flags & RENAME_WHITEOUT) {
@@ -632,8 +697,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_whiteout;
+ }
+
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
@@ -641,31 +711,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (err)
goto put_out_dir;
- if (update_dent_inode(old_inode, new_inode,
- &new_dentry->d_name)) {
+ err = update_dent_inode(old_inode, new_inode,
+ &new_dentry->d_name);
+ if (err) {
release_orphan_inode(sbi);
goto put_out_dir;
}
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
- new_inode->i_ctime = CURRENT_TIME;
+ new_inode->i_ctime = current_time(new_inode);
down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry)
- drop_nlink(new_inode);
- drop_nlink(new_inode);
+ f2fs_i_links_write(new_inode, false);
+ f2fs_i_links_write(new_inode, false);
up_write(&F2FS_I(new_inode)->i_sem);
- mark_inode_dirty(new_inode);
-
if (!new_inode->i_nlink)
- add_orphan_inode(sbi, new_inode->i_ino);
+ add_orphan_inode(new_inode);
else
release_orphan_inode(sbi);
-
- update_inode_page(old_inode);
- update_inode_page(new_inode);
} else {
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = f2fs_add_link(new_dentry, old_inode);
@@ -674,9 +742,29 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_whiteout;
}
- if (old_dir_entry) {
- inc_nlink(new_dir);
- update_inode_page(new_dir);
+ if (old_dir_entry)
+ f2fs_i_links_write(new_dir, true);
+
+ /*
+ * old entry and new entry can locate in the same inline
+ * dentry in inode, when attaching new entry in inline dentry,
+ * it could force inline dentry conversion, after that,
+ * old_entry and old_page will point to wrong address, in
+ * order to avoid this, let's do the check and update here.
+ */
+ if (is_old_inline && !f2fs_has_inline_dentry(old_dir)) {
+ f2fs_put_page(old_page, 0);
+ old_page = NULL;
+
+ old_entry = f2fs_find_entry(old_dir,
+ &old_dentry->d_name, &old_page);
+ if (!old_entry) {
+ err = -ENOENT;
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
+ f2fs_unlock_op(sbi);
+ goto out_whiteout;
+ }
}
}
@@ -686,14 +774,14 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
file_set_enc_name(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- old_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(old_inode);
+ old_inode->i_ctime = current_time(old_inode);
+ f2fs_mark_inode_dirty_sync(old_inode, false);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
if (whiteout) {
whiteout->i_state |= I_LINKABLE;
- set_inode_flag(F2FS_I(whiteout), FI_INC_LINK);
+ set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
@@ -705,14 +793,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir && !whiteout) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
- update_inode_page(old_inode);
} else {
f2fs_dentry_kunmap(old_inode, old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
- drop_nlink(old_dir);
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_i_links_write(old_dir, false);
}
f2fs_unlock_op(sbi);
@@ -756,39 +841,45 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
int err = -ENOENT;
if ((f2fs_encrypted_inode(old_dir) || f2fs_encrypted_inode(new_dir)) &&
- (old_dir != new_dir) &&
- (!f2fs_is_child_context_consistent_with_parent(new_dir,
- old_inode) ||
- !f2fs_is_child_context_consistent_with_parent(old_dir,
- new_inode)))
+ (old_dir != new_dir) &&
+ (!fscrypt_has_permitted_context(new_dir, old_inode) ||
+ !fscrypt_has_permitted_context(old_dir, new_inode)))
return -EPERM;
- f2fs_balance_fs(sbi);
-
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_old;
+ }
/* prepare for updating ".." directory entry info later */
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode,
&old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_new;
+ }
}
if (S_ISDIR(new_inode->i_mode)) {
- err = -EIO;
new_dir_entry = f2fs_parent_dir(new_inode,
&new_dir_page);
- if (!new_dir_entry)
+ if (!new_dir_entry) {
+ if (IS_ERR(new_dir_page))
+ err = PTR_ERR(new_dir_page);
goto out_old_dir;
+ }
}
}
@@ -807,6 +898,8 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_new_dir;
}
+ f2fs_balance_fs(sbi, true);
+
f2fs_lock_op(sbi);
err = update_dent_inode(old_inode, new_inode, &new_dentry->d_name);
@@ -836,19 +929,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- update_inode_page(old_inode);
-
old_dir->i_ctime = CURRENT_TIME;
if (old_nlink) {
down_write(&F2FS_I(old_dir)->i_sem);
- if (old_nlink < 0)
- drop_nlink(old_dir);
- else
- inc_nlink(old_dir);
+ f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem);
}
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_mark_inode_dirty_sync(old_dir, false);
/* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
@@ -857,19 +944,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(new_inode);
up_write(&F2FS_I(new_inode)->i_sem);
- update_inode_page(new_inode);
-
new_dir->i_ctime = CURRENT_TIME;
if (new_nlink) {
down_write(&F2FS_I(new_dir)->i_sem);
- if (new_nlink < 0)
- drop_nlink(new_dir);
- else
- inc_nlink(new_dir);
+ f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem);
}
- mark_inode_dirty(new_dir);
- update_inode_page(new_dir);
+ f2fs_mark_inode_dirty_sync(new_dir, false);
f2fs_unlock_op(sbi);
@@ -922,89 +1003,85 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry,
return f2fs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie)
{
struct page *cpage = NULL;
char *caddr, *paddr = NULL;
- struct f2fs_str cstr;
- struct f2fs_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str cstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_str pstr = FSTR_INIT(NULL, 0);
+ struct fscrypt_symlink_data *sd;
struct inode *inode = d_inode(dentry);
- struct f2fs_encrypted_symlink_data *sd;
- loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
u32 max_size = inode->i_sb->s_blocksize;
int res;
- res = f2fs_get_encryption_info(inode);
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ res = fscrypt_get_encryption_info(inode);
if (res)
return ERR_PTR(res);
cpage = read_mapping_page(inode->i_mapping, 0, NULL);
if (IS_ERR(cpage))
return ERR_CAST(cpage);
- caddr = kmap(cpage);
- caddr[size] = 0;
+ caddr = page_address(cpage);
/* Symlink is encrypted */
- sd = (struct f2fs_encrypted_symlink_data *)caddr;
+ sd = (struct fscrypt_symlink_data *)caddr;
+ cstr.name = sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
- cstr.name = kmalloc(cstr.len, GFP_NOFS);
- if (!cstr.name) {
- res = -ENOMEM;
- goto errout;
- }
- memcpy(cstr.name, sd->encrypted_path, cstr.len);
/* this is broken symlink case */
- if (cstr.name[0] == 0 && cstr.len == 0) {
+ if (unlikely(cstr.len == 0)) {
res = -ENOENT;
goto errout;
}
- if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
- max_size) {
+ if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
/* Symlink data on the disk is corrupted */
res = -EIO;
goto errout;
}
- res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr);
+ res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
if (res)
goto errout;
- res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
- if (res < 0)
+ res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
+ if (res)
goto errout;
- kfree(cstr.name);
+ /* this is broken symlink case */
+ if (unlikely(pstr.name[0] == 0)) {
+ res = -ENOENT;
+ goto errout;
+ }
paddr = pstr.name;
/* Null-terminate the name */
- paddr[res] = '\0';
+ paddr[pstr.len] = '\0';
- kunmap(cpage);
- page_cache_release(cpage);
+ put_page(cpage);
return *cookie = paddr;
errout:
- kfree(cstr.name);
- f2fs_fname_crypto_free_buffer(&pstr);
- kunmap(cpage);
- page_cache_release(cpage);
+ fscrypt_fname_free_buffer(&pstr);
+ put_page(cpage);
return ERR_PTR(res);
}
const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
.readlink = generic_readlink,
- .follow_link = f2fs_encrypted_follow_link,
- .put_link = kfree_put_link,
+ .follow_link = f2fs_encrypted_follow_link,
+ .put_link = kfree_put_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
+#ifdef CONFIG_F2FS_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = f2fs_listxattr,
.removexattr = generic_removexattr,
-};
#endif
+};
const struct inode_operations f2fs_dir_inode_operations = {
.create = f2fs_create,
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7bcbc6e9c40d..edacbabb92cf 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -45,13 +45,15 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
* give 25%, 25%, 50%, 50%, 50% memory for each components respectively
*/
if (type == FREE_NIDS) {
- mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
- PAGE_CACHE_SHIFT;
+ mem_size = (nm_i->nid_cnt[FREE_NID_LIST] *
+ sizeof(struct free_nid)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
} else if (type == NAT_ENTRIES) {
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
@@ -62,16 +64,17 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
for (i = 0; i <= UPDATE_INO; i++)
mem_size += (sbi->im[i].ino_num *
- sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct ino_entry)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else if (type == EXTENT_CACHE) {
- mem_size = (sbi->total_ext_tree * sizeof(struct extent_tree) +
+ mem_size = (atomic_read(&sbi->total_ext_tree) *
+ sizeof(struct extent_tree) +
atomic_read(&sbi->total_ext_node) *
- sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT;
+ sizeof(struct extent_node)) >> PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
} else {
- if (sbi->sb->s_bdi->wb.dirty_exceeded)
- return false;
+ if (!sbi->sb->s_bdi->wb.dirty_exceeded)
+ return true;
}
return res;
}
@@ -120,7 +123,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
@@ -256,18 +259,22 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
return new;
}
-static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
+static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nat_entry *ne)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e) {
e = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&e->ni, ne);
+ } else {
+ f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
+ nat_get_blkaddr(e) !=
+ le32_to_cpu(ne->block_addr) ||
+ nat_get_version(e) != ne->version);
}
- up_write(&nm_i->nat_tree_lock);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -355,7 +362,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = START_NID(nid);
struct f2fs_nat_block *nat_blk;
struct page *page = NULL;
@@ -372,21 +379,20 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- }
- up_read(&nm_i->nat_tree_lock);
- if (e)
+ up_read(&nm_i->nat_tree_lock);
return;
+ }
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
/* Check current segment summary */
- mutex_lock(&curseg->curseg_mutex);
- i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
+ down_read(&curseg->journal_rwsem);
+ i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
if (i >= 0) {
- ne = nat_in_journal(sum, i);
+ ne = nat_in_journal(journal, i);
node_info_from_raw_nat(ni, &ne);
}
- mutex_unlock(&curseg->curseg_mutex);
+ up_read(&curseg->journal_rwsem);
if (i >= 0)
goto cache;
@@ -397,18 +403,75 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
+ up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
- cache_nat_entry(NM_I(sbi), nid, &ne);
+ down_write(&nm_i->nat_tree_lock);
+ cache_nat_entry(sbi, nid, &ne);
+ up_write(&nm_i->nat_tree_lock);
+}
+
+/*
+ * readahead MAX_RA_NODE number of node pages.
+ */
+static void ra_node_pages(struct page *parent, int start, int n)
+{
+ struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
+ struct blk_plug plug;
+ int i, end;
+ nid_t nid;
+
+ blk_start_plug(&plug);
+
+ /* Then, try readahead for siblings of the desired node */
+ end = start + n;
+ end = min(end, NIDS_PER_BLOCK);
+ for (i = start; i < end; i++) {
+ nid = get_nid(parent, i, false);
+ ra_node_page(sbi, nid);
+ }
+
+ blk_finish_plug(&plug);
+}
+
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+ const long direct_index = ADDRS_PER_INODE(dn->inode);
+ const long direct_blks = ADDRS_PER_BLOCK;
+ const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+ unsigned int skipped_unit = ADDRS_PER_BLOCK;
+ int cur_level = dn->cur_level;
+ int max_level = dn->max_level;
+ pgoff_t base = 0;
+
+ if (!dn->max_level)
+ return pgofs + 1;
+
+ while (max_level-- > cur_level)
+ skipped_unit *= NIDS_PER_BLOCK;
+
+ switch (dn->max_level) {
+ case 3:
+ base += 2 * indirect_blks;
+ case 2:
+ base += 2 * direct_blks;
+ case 1:
+ base += direct_index;
+ break;
+ default:
+ f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+ }
+
+ return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
}
/*
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-static int get_node_path(struct f2fs_inode_info *fi, long block,
+static int get_node_path(struct inode *inode, long block,
int offset[4], unsigned int noffset[4])
{
- const long direct_index = ADDRS_PER_INODE(fi);
+ const long direct_index = ADDRS_PER_INODE(inode);
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -493,10 +556,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
int offset[4];
unsigned int noffset[4];
nid_t nids[4];
- int level, i;
+ int level, i = 0;
int err = 0;
- level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
+ level = get_node_path(dn->inode, index, offset, noffset);
nids[0] = dn->inode->i_ino;
npage[0] = dn->inode_page;
@@ -583,6 +646,11 @@ release_pages:
release_out:
dn->inode_page = NULL;
dn->node_page = NULL;
+ if (err == -ENOENT) {
+ dn->cur_level = i;
+ dn->max_level = level;
+ dn->ofs_in_node = offset[level];
+ }
return err;
}
@@ -606,8 +674,7 @@ static void truncate_node(struct dnode_of_data *dn)
if (dn->nid == dn->inode->i_ino) {
remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
- } else {
- sync_inode_page(dn);
+ f2fs_inode_synced(dn->inode);
}
invalidate:
clear_node_page_dirty(dn->node_page);
@@ -666,6 +733,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
return PTR_ERR(page);
}
+ ra_node_pages(page, ofs, NIDS_PER_BLOCK);
+
rn = F2FS_NODE(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
@@ -676,7 +745,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
ret = truncate_dnode(&rdn);
if (ret < 0)
goto out_err;
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
}
} else {
child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
@@ -689,7 +759,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
rdn.nid = child_nid;
ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
if (ret == (NIDS_PER_BLOCK + 1)) {
- set_nid(page, i, 0, false);
+ if (set_nid(page, i, 0, false))
+ dn->node_changed = true;
child_nofs += ret;
} else if (ret < 0 && ret != -ENOENT) {
goto out_err;
@@ -741,6 +812,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
}
+ ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
+
/* free direct nodes linked to a partial indirect node */
for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
child_nid = get_nid(pages[idx], i, false);
@@ -750,7 +823,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn,
err = truncate_dnode(dn);
if (err < 0)
goto fail;
- set_nid(pages[idx], i, 0, false);
+ if (set_nid(pages[idx], i, 0, false))
+ dn->node_changed = true;
}
if (offset[idx + 1] == 0) {
@@ -787,8 +861,8 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
trace_f2fs_truncate_inode_blocks_enter(inode, from);
- level = get_node_path(F2FS_I(inode), from, offset, noffset);
-restart:
+ level = get_node_path(inode, from, offset, noffset);
+
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
@@ -852,11 +926,8 @@ skip_partial:
if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto restart;
- }
- f2fs_wait_on_page_writeback(page, NODE);
+ BUG_ON(page->mapping != NODE_MAPPING(sbi));
+ f2fs_wait_on_page_writeback(page, NODE, true);
ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
set_page_dirty(page);
unlock_page(page);
@@ -885,7 +956,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
if (IS_ERR(npage))
return PTR_ERR(npage);
- F2FS_I(inode)->i_xattr_nid = 0;
+ f2fs_i_xnid_write(inode, 0);
/* need to do checkpoint during fsync */
F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
@@ -951,10 +1022,10 @@ struct page *new_node_page(struct dnode_of_data *dn,
struct page *page;
int err;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
- page = grab_cache_page(NODE_MAPPING(sbi), dn->nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -971,23 +1042,19 @@ struct page *new_node_page(struct dnode_of_data *dn,
new_ni.ino = dn->inode->i_ino;
set_node_addr(sbi, &new_ni, NEW_ADDR, false);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
- SetPageUptodate(page);
- set_page_dirty(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+ if (set_page_dirty(page))
+ dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
- F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
+ f2fs_i_xnid_write(dn->inode, dn->nid);
- dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
if (ofs == 0)
inc_valid_inode_count(sbi);
-
return page;
fail:
@@ -1013,6 +1080,9 @@ static int read_node_page(struct page *page, int rw)
.encrypted_page = NULL,
};
+ if (PageUptodate(page))
+ return LOCKED_PAGE;
+
get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
@@ -1020,10 +1090,7 @@ static int read_node_page(struct page *page, int rw)
return -ENOENT;
}
- if (PageUptodate(page))
- return LOCKED_PAGE;
-
- fio.blk_addr = ni.blk_addr;
+ fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
return f2fs_submit_page_bio(&fio);
}
@@ -1035,14 +1102,17 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
struct page *apage;
int err;
- apage = find_get_page(NODE_MAPPING(sbi), nid);
- if (apage && PageUptodate(apage)) {
- f2fs_put_page(apage, 0);
+ if (!nid)
+ return;
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
+
+ rcu_read_lock();
+ apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
+ rcu_read_unlock();
+ if (apage)
return;
- }
- f2fs_put_page(apage, 0);
- apage = grab_cache_page(NODE_MAPPING(sbi), nid);
+ apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!apage)
return;
@@ -1050,12 +1120,17 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_put_page(apage, err ? 1 : 0);
}
-struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
+ struct page *parent, int start)
{
struct page *page;
int err;
+
+ if (!nid)
+ return ERR_PTR(-ENOENT);
+ f2fs_bug_on(sbi, check_nid_range(sbi, nid));
repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
+ page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
if (!page)
return ERR_PTR(-ENOMEM);
@@ -1063,106 +1138,301 @@ repeat:
if (err < 0) {
f2fs_put_page(page, 1);
return ERR_PTR(err);
- } else if (err != LOCKED_PAGE) {
- lock_page(page);
+ } else if (err == LOCKED_PAGE) {
+ goto page_hit;
}
- if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
- ClearPageUptodate(page);
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
+ if (parent)
+ ra_node_pages(parent, start + 1, MAX_RA_NODE);
+
+ lock_page(page);
+
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
f2fs_put_page(page, 1);
goto repeat;
}
+
+ if (unlikely(!PageUptodate(page)))
+ goto out_err;
+page_hit:
+ if(unlikely(nid != nid_of_node(page))) {
+ f2fs_bug_on(sbi, 1);
+ ClearPageUptodate(page);
+out_err:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
-/*
- * Return a locked page for the desired node page.
- * And, readahead MAX_RA_NODE number of node pages.
- */
+struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
+{
+ return __get_node_page(sbi, nid, NULL, 0);
+}
+
struct page *get_node_page_ra(struct page *parent, int start)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
- struct blk_plug plug;
+ nid_t nid = get_nid(parent, start, false);
+
+ return __get_node_page(sbi, nid, parent, start);
+}
+
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ struct inode *inode;
struct page *page;
- int err, i, end;
- nid_t nid;
+ int ret;
- /* First, try getting the desired direct node. */
- nid = get_nid(parent, start, false);
- if (!nid)
- return ERR_PTR(-ENOENT);
-repeat:
- page = grab_cache_page(NODE_MAPPING(sbi), nid);
+ /* should flush inline_data before evict_inode */
+ inode = ilookup(sbi->sb, ino);
+ if (!inode)
+ return;
+
+ page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
if (!page)
- return ERR_PTR(-ENOMEM);
+ goto iput_out;
- err = read_node_page(page, READ_SYNC);
- if (err < 0) {
- f2fs_put_page(page, 1);
- return ERR_PTR(err);
- } else if (err == LOCKED_PAGE) {
- goto page_hit;
- }
+ if (!PageUptodate(page))
+ goto page_out;
- blk_start_plug(&plug);
+ if (!PageDirty(page))
+ goto page_out;
- /* Then, try readahead for siblings of the desired node */
- end = start + MAX_RA_NODE;
- end = min(end, NIDS_PER_BLOCK);
- for (i = start + 1; i < end; i++) {
- nid = get_nid(parent, i, false);
- if (!nid)
- continue;
- ra_node_page(sbi, nid);
- }
+ if (!clear_page_dirty_for_io(page))
+ goto page_out;
- blk_finish_plug(&plug);
+ ret = f2fs_write_inline_data(inode, page);
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
+ if (ret)
+ set_page_dirty(page);
+page_out:
+ f2fs_put_page(page, 1);
+iput_out:
+ iput(inode);
+}
- lock_page(page);
- if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
-page_hit:
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
+void move_node_page(struct page *node_page, int gc_type)
+{
+ if (gc_type == FG_GC) {
+ struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 1,
+ .for_reclaim = 0,
+ };
+
+ set_page_dirty(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
+
+ f2fs_bug_on(sbi, PageWriteback(node_page));
+ if (!clear_page_dirty_for_io(node_page))
+ goto out_page;
+
+ if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
+ unlock_page(node_page);
+ goto release_page;
+ } else {
+ /* set page dirty and write it */
+ if (!PageWriteback(node_page))
+ set_page_dirty(node_page);
+ }
+out_page:
+ unlock_page(node_page);
+release_page:
+ f2fs_put_page(node_page, 0);
+}
+
+static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
+{
+ pgoff_t index, end;
+ struct pagevec pvec;
+ struct page *last_page = NULL;
+
+ pagevec_init(&pvec, 0);
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ return ERR_PTR(-EIO);
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
+ continue;
+ if (ino_of_node(page) != ino)
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ if (last_page)
+ f2fs_put_page(last_page, 0);
+
+ get_page(page);
+ last_page = page;
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
}
- return page;
+ return last_page;
}
-void sync_inode_page(struct dnode_of_data *dn)
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
+ struct writeback_control *wbc, bool atomic)
{
- if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
- update_inode(dn->inode, dn->node_page);
- } else if (dn->inode_page) {
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- update_inode(dn->inode, dn->inode_page);
- if (!dn->inode_page_locked)
- unlock_page(dn->inode_page);
- } else {
- update_inode_page(dn->inode);
+ pgoff_t index, end;
+ struct pagevec pvec;
+ int ret = 0;
+ struct page *last_page = NULL;
+ bool marked = false;
+ nid_t ino = inode->i_ino;
+ int nwritten = 0;
+
+ if (atomic) {
+ last_page = last_fsync_dnode(sbi, ino);
+ if (IS_ERR_OR_NULL(last_page))
+ return PTR_ERR_OR_ZERO(last_page);
+ }
+retry:
+ pagevec_init(&pvec, 0);
+ index = 0;
+ end = ULONG_MAX;
+
+ while (index <= end) {
+ int i, nr_pages;
+ nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
+ PAGECACHE_TAG_DIRTY,
+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+ if (nr_pages == 0)
+ break;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (unlikely(f2fs_cp_error(sbi))) {
+ f2fs_put_page(last_page, 0);
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!IS_DNODE(page) || !is_cold_node(page))
+ continue;
+ if (ino_of_node(page) != ino)
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+ if (ino_of_node(page) != ino)
+ goto continue_unlock;
+
+ if (!PageDirty(page) && page != last_page) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ BUG_ON(PageWriteback(page));
+
+ if (!atomic || page == last_page) {
+ set_fsync_mark(page, 1);
+ if (IS_INODE(page)) {
+ if (is_inode_flag_set(inode,
+ FI_DIRTY_INODE))
+ update_inode(inode, page);
+ set_dentry_mark(page,
+ need_dentry_mark(sbi, ino));
+ }
+ /* may be written by other thread */
+ if (!PageDirty(page))
+ set_page_dirty(page);
+ }
+
+ if (!clear_page_dirty_for_io(page))
+ goto continue_unlock;
+
+ ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
+ if (ret) {
+ unlock_page(page);
+ f2fs_put_page(last_page, 0);
+ break;
+ } else {
+ nwritten++;
+ }
+
+ if (page == last_page) {
+ f2fs_put_page(page, 0);
+ marked = true;
+ break;
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+
+ if (ret || marked)
+ break;
}
+ if (!ret && atomic && !marked) {
+ f2fs_msg(sbi->sb, KERN_DEBUG,
+ "Retry to write fsync mark: ino=%u, idx=%lx",
+ ino, last_page->index);
+ lock_page(last_page);
+ f2fs_wait_on_page_writeback(last_page, NODE, true);
+ set_page_dirty(last_page);
+ unlock_page(last_page);
+ goto retry;
+ }
+out:
+ if (nwritten)
+ f2fs_submit_merged_bio_cond(sbi, NULL, NULL, ino, NODE, WRITE);
+ return ret ? -EIO: 0;
}
-int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
- struct writeback_control *wbc)
+int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
{
pgoff_t index, end;
struct pagevec pvec;
- int step = ino ? 2 : 0;
- int nwritten = 0, wrote = 0;
+ int step = 0;
+ int nwritten = 0;
+ int ret = 0;
pagevec_init(&pvec, 0);
next_step:
index = 0;
- end = LONG_MAX;
+ end = ULONG_MAX;
while (index <= end) {
int i, nr_pages;
@@ -1175,6 +1445,12 @@ next_step:
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
+ if (unlikely(f2fs_cp_error(sbi))) {
+ pagevec_release(&pvec);
+ ret = -EIO;
+ goto out;
+ }
+
/*
* flushing sequence with step:
* 0. indirect nodes
@@ -1189,14 +1465,8 @@ next_step:
if (step == 2 && (!IS_DNODE(page) ||
!is_cold_node(page)))
continue;
-
- /*
- * If an fsync mode,
- * we should not skip writing node pages.
- */
- if (ino && ino_of_node(page) == ino)
- lock_page(page);
- else if (!trylock_page(page))
+lock_node:
+ if (!trylock_page(page))
continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
@@ -1204,33 +1474,33 @@ continue_unlock:
unlock_page(page);
continue;
}
- if (ino && ino_of_node(page) != ino)
- goto continue_unlock;
if (!PageDirty(page)) {
/* someone wrote it for us */
goto continue_unlock;
}
+ /* flush inline_data */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ goto lock_node;
+ }
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+
+ BUG_ON(PageWriteback(page));
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- /* called by fsync() */
- if (ino && IS_DNODE(page)) {
- set_fsync_mark(page, 1);
- if (IS_INODE(page))
- set_dentry_mark(page,
- need_dentry_mark(sbi, ino));
- nwritten++;
- } else {
- set_fsync_mark(page, 0);
- set_dentry_mark(page, 0);
- }
+ set_fsync_mark(page, 0);
+ set_dentry_mark(page, 0);
if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
unlock_page(page);
else
- wrote++;
+ nwritten++;
if (--wbc->nr_to_write == 0)
break;
@@ -1248,15 +1518,15 @@ continue_unlock:
step++;
goto next_step;
}
-
- if (wrote)
+out:
+ if (nwritten)
f2fs_submit_merged_bio(sbi, NODE, WRITE);
- return nwritten;
+ return ret;
}
int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
{
- pgoff_t index = 0, end = LONG_MAX;
+ pgoff_t index = 0, end = ULONG_MAX;
struct pagevec pvec;
int ret2 = 0, ret = 0;
@@ -1278,7 +1548,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
continue;
if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
if (TestClearPageError(page))
ret = -EIO;
}
@@ -1317,8 +1587,6 @@ static int f2fs_write_node_page(struct page *page,
if (unlikely(f2fs_cp_error(sbi)))
goto redirty_out;
- f2fs_wait_on_page_writeback(page, NODE);
-
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
@@ -1342,14 +1610,18 @@ static int f2fs_write_node_page(struct page *page,
}
set_page_writeback(page);
- fio.blk_addr = ni.blk_addr;
+ fio.old_blkaddr = ni.blk_addr;
write_node_page(nid, &fio);
- set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
+ set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
dec_page_count(sbi, F2FS_DIRTY_NODES);
up_read(&sbi->node_write);
- unlock_page(page);
if (wbc->for_reclaim)
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
+
+ unlock_page(page);
+
+ if (unlikely(f2fs_cp_error(sbi)))
f2fs_submit_merged_bio(sbi, NODE, WRITE);
return 0;
@@ -1363,10 +1635,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff;
- trace_f2fs_writepages(mapping->host, wbc, NODE);
-
/* balancing f2fs's metadata in background */
f2fs_balance_fs_bg(sbi);
@@ -1374,14 +1645,19 @@ static int f2fs_write_node_pages(struct address_space *mapping,
if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
goto skip_write;
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
+
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
- sync_node_pages(sbi, 0, wbc);
+ blk_start_plug(&plug);
+ sync_node_pages(sbi, wbc);
+ blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;
skip_write:
wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+ trace_f2fs_writepages(mapping->host, wbc, NODE);
return 0;
}
@@ -1389,9 +1665,10 @@ static int f2fs_set_node_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, NODE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -1409,6 +1686,9 @@ const struct address_space_operations f2fs_node_aops = {
.set_page_dirty = f2fs_set_node_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
+#ifdef CONFIG_MIGRATION
+ .migratepage = f2fs_migrate_page,
+#endif
};
static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
@@ -1417,11 +1697,35 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
return radix_tree_lookup(&nm_i->free_nid_root, n);
}
-static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
- struct free_nid *i)
+static int __insert_nid_to_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool new)
+{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ if (new) {
+ int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
+ if (err)
+ return err;
+ }
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]++;
+ list_add_tail(&i->list, &nm_i->nid_list[list]);
+ return 0;
+}
+
+static void __remove_nid_from_list(struct f2fs_sb_info *sbi,
+ struct free_nid *i, enum nid_list list, bool reuse)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
+
+ f2fs_bug_on(sbi, list == FREE_NID_LIST ? i->state != NID_NEW :
+ i->state != NID_ALLOC);
+ nm_i->nid_cnt[list]--;
list_del(&i->list);
- radix_tree_delete(&nm_i->free_nid_root, i->nid);
+ if (!reuse)
+ radix_tree_delete(&nm_i->free_nid_root, i->nid);
}
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
@@ -1429,10 +1733,7 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
struct nat_entry *ne;
- bool allocated = false;
-
- if (!available_free_memory(sbi, FREE_NIDS))
- return -1;
+ int err;
/* 0 nid should not be used */
if (unlikely(nid == 0))
@@ -1440,14 +1741,9 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
if (build) {
/* do not add allocated nids */
- down_read(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
- if (ne &&
- (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
nat_get_blkaddr(ne) != NULL_ADDR))
- allocated = true;
- up_read(&nm_i->nat_tree_lock);
- if (allocated)
return 0;
}
@@ -1460,33 +1756,30 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
return 0;
}
- spin_lock(&nm_i->free_nid_list_lock);
- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
+ spin_lock(&nm_i->nid_list_lock);
+ err = __insert_nid_to_list(sbi, i, FREE_NID_LIST, true);
+ spin_unlock(&nm_i->nid_list_lock);
+ radix_tree_preload_end();
+ if (err) {
kmem_cache_free(free_nid_slab, i);
return 0;
}
- list_add_tail(&i->list, &nm_i->free_nid_list);
- nm_i->fcnt++;
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
return 1;
}
-static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
+static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
{
+ struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
bool need_free = false;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
if (i && i->state == NID_NEW) {
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
need_free = true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1509,29 +1802,32 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
- if (blk_addr == NULL_ADDR) {
- if (add_free_nid(sbi, start_nid, true) < 0)
- break;
- }
+ if (blk_addr == NULL_ADDR)
+ add_free_nid(sbi, start_nid, true);
}
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i = 0;
nid_t nid = nm_i->next_scan_nid;
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+ if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
+ return;
+
+ if (!sync && !available_free_memory(sbi, FREE_NIDS))
return;
/* readahead nat pages to be scanned */
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
+ down_read(&nm_i->nat_tree_lock);
+
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1550,21 +1846,31 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nm_i->next_scan_nid = nid;
/* find free nids from current sum_pages */
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
- block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
- nid = le32_to_cpu(nid_in_journal(sum, i));
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ block_t addr;
+
+ addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
+ nid = le32_to_cpu(nid_in_journal(journal, i));
if (addr == NULL_ADDR)
add_free_nid(sbi, nid, true);
else
- remove_free_nid(nm_i, nid);
+ remove_free_nid(sbi, nid);
}
- mutex_unlock(&curseg->curseg_mutex);
+ up_read(&curseg->journal_rwsem);
+ up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
}
+void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
+{
+ mutex_lock(&NM_I(sbi)->build_lock);
+ __build_free_nids(sbi, sync);
+ mutex_unlock(&NM_I(sbi)->build_lock);
+}
+
/*
* If this function returns success, caller can obtain a new nid
* from second parameter of this function.
@@ -1575,40 +1881,35 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
- if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_ALLOC_NID))
return false;
+#endif
+ spin_lock(&nm_i->nid_list_lock);
- spin_lock(&nm_i->free_nid_list_lock);
+ if (unlikely(nm_i->available_nids == 0)) {
+ spin_unlock(&nm_i->nid_list_lock);
+ return false;
+ }
/* We should not use stale free nids created by build_free_nids */
- if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
- struct node_info ni;
-
- f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
- list_for_each_entry(i, &nm_i->free_nid_list, list)
- if (i->state == NID_NEW)
- break;
-
- f2fs_bug_on(sbi, i->state != NID_NEW);
+ if (nm_i->nid_cnt[FREE_NID_LIST] && !on_build_free_nids(nm_i)) {
+ f2fs_bug_on(sbi, list_empty(&nm_i->nid_list[FREE_NID_LIST]));
+ i = list_first_entry(&nm_i->nid_list[FREE_NID_LIST],
+ struct free_nid, list);
*nid = i->nid;
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, true);
i->state = NID_ALLOC;
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
-
- /* check nid is allocated already */
- get_node_info(sbi, *nid, &ni);
- if (ni.blk_addr != NULL_ADDR) {
- alloc_nid_done(sbi, *nid);
- goto retry;
- }
+ __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
return true;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
/* Let's scan nat pages and its caches to get free nids */
- mutex_lock(&nm_i->build_lock);
- build_free_nids(sbi);
- mutex_unlock(&nm_i->build_lock);
+ build_free_nids(sbi, true);
goto retry;
}
@@ -1620,11 +1921,11 @@ void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, !i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
}
@@ -1641,17 +1942,22 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
if (!nid)
return;
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
i = __lookup_free_nid_list(nm_i, nid);
- f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
+ f2fs_bug_on(sbi, !i);
+
if (!available_free_memory(sbi, FREE_NIDS)) {
- __del_from_free_nid_list(nm_i, i);
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, false);
need_free = true;
} else {
+ __remove_nid_from_list(sbi, i, ALLOC_NID_LIST, true);
i->state = NID_NEW;
- nm_i->fcnt++;
+ __insert_nid_to_list(sbi, i, FREE_NID_LIST, false);
}
- spin_unlock(&nm_i->free_nid_list_lock);
+
+ nm_i->available_nids++;
+
+ spin_unlock(&nm_i->nid_list_lock);
if (need_free)
kmem_cache_free(free_nid_slab, i);
@@ -1663,21 +1969,24 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
+ return 0;
+
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ if (nr_shrink <= 0 ||
+ nm_i->nid_cnt[FREE_NID_LIST] <= MAX_FREE_NIDS)
break;
- if (i->state == NID_ALLOC)
- continue;
- __del_from_free_nid_list(nm_i, i);
+
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
kmem_cache_free(free_nid_slab, i);
- nm_i->fcnt--;
nr_shrink--;
}
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
@@ -1695,7 +2004,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
ri = F2FS_INODE(page);
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
- clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+ clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
@@ -1703,7 +2012,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
src_addr = inline_xattr_addr(page);
inline_size = inline_xattr_size(inode);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(dst_addr, src_addr, inline_size);
update_inode:
update_inode(inode, ipage);
@@ -1733,17 +2042,15 @@ recover_xnid:
if (unlikely(!inc_valid_node_count(sbi, inode)))
f2fs_bug_on(sbi, 1);
- remove_free_nid(NM_I(sbi), new_xnid);
+ remove_free_nid(sbi, new_xnid);
get_node_info(sbi, new_xnid, &ni);
ni.ino = inode->i_ino;
set_node_addr(sbi, &ni, NEW_ADDR, false);
- F2FS_I(inode)->i_xattr_nid = new_xnid;
+ f2fs_i_xnid_write(inode, new_xnid);
/* 3: update xattr blkaddr */
refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
set_node_addr(sbi, &ni, blkaddr, false);
-
- update_inode_page(inode);
}
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1757,15 +2064,18 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
-
- ipage = grab_cache_page(NODE_MAPPING(sbi), ino);
- if (!ipage)
- return -ENOMEM;
+retry:
+ ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
+ if (!ipage) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
/* Should not use this inode from free nid list */
- remove_free_nid(NM_I(sbi), ino);
+ remove_free_nid(sbi, ino);
- SetPageUptodate(ipage);
+ if (!PageUptodate(ipage))
+ SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
src = F2FS_INODE(page);
@@ -1796,7 +2106,6 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
struct f2fs_node *rn;
struct f2fs_summary *sum_entry;
block_t addr;
- int bio_blocks = MAX_BIO_BLOCKS(sbi);
int i, idx, last_offset, nrpages;
/* scan the node segment */
@@ -1805,7 +2114,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
sum_entry = &sum->entries[0];
for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
- nrpages = min(last_offset - i, bio_blocks);
+ nrpages = min(last_offset - i, BIO_MAX_PAGES);
/* readahead node pages */
ra_meta_pages(sbi, addr, nrpages, META_POR, true);
@@ -1831,28 +2140,39 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < nats_in_cursum(sum); i++) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < nats_in_cursum(journal); i++) {
struct nat_entry *ne;
struct f2fs_nat_entry raw_ne;
- nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
+ nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
- raw_ne = nat_in_journal(sum, i);
+ raw_ne = nat_in_journal(journal, i);
- down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
ne = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&ne->ni, &raw_ne);
}
+
+ /*
+ * if a free nat in journal has not been used after last
+ * checkpoint, we should remove it from available nids,
+ * since later we will add it again.
+ */
+ if (!get_nat_flag(ne, IS_DIRTY) &&
+ le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
+ spin_lock(&nm_i->nid_list_lock);
+ nm_i->available_nids--;
+ spin_unlock(&nm_i->nid_list_lock);
+ }
+
__set_nat_cache_dirty(nm_i, ne);
- up_write(&nm_i->nat_tree_lock);
}
- update_nats_in_cursum(sum, -i);
- mutex_unlock(&curseg->curseg_mutex);
+ update_nats_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
static void __adjust_nat_entry_set(struct nat_entry_set *nes,
@@ -1877,24 +2197,23 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct nat_entry_set *set)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
bool to_journal = true;
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
struct page *page = NULL;
- struct f2fs_nm_info *nm_i = NM_I(sbi);
/*
* there are two steps to flush nat entries:
* #1, flush nat entries to journal in current hot data summary block.
* #2, flush nat entries to nat page.
*/
- if (!__has_cursum_space(sum, set->entry_cnt, NAT_JOURNAL))
+ if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
to_journal = false;
if (to_journal) {
- mutex_lock(&curseg->curseg_mutex);
+ down_write(&curseg->journal_rwsem);
} else {
page = get_next_nat_page(sbi, start_nid);
nat_blk = page_address(page);
@@ -1911,35 +2230,33 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
continue;
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
NAT_JOURNAL, nid, 1);
f2fs_bug_on(sbi, offset < 0);
- raw_ne = &nat_in_journal(sum, offset);
- nid_in_journal(sum, offset) = cpu_to_le32(nid);
+ raw_ne = &nat_in_journal(journal, offset);
+ nid_in_journal(journal, offset) = cpu_to_le32(nid);
} else {
raw_ne = &nat_blk->entries[nid - start_nid];
}
raw_nat_from_node_info(raw_ne, &ne->ni);
-
- down_write(&NM_I(sbi)->nat_tree_lock);
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), ne);
- up_write(&NM_I(sbi)->nat_tree_lock);
-
- if (nat_get_blkaddr(ne) == NULL_ADDR)
+ if (nat_get_blkaddr(ne) == NULL_ADDR) {
add_free_nid(sbi, nid, false);
+ spin_lock(&NM_I(sbi)->nid_list_lock);
+ NM_I(sbi)->available_nids++;
+ spin_unlock(&NM_I(sbi)->nid_list_lock);
+ }
}
if (to_journal)
- mutex_unlock(&curseg->curseg_mutex);
+ up_write(&curseg->journal_rwsem);
else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, set->entry_cnt);
- down_write(&nm_i->nat_tree_lock);
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
- up_write(&nm_i->nat_tree_lock);
kmem_cache_free(nat_entry_set_slab, set);
}
@@ -1950,7 +2267,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct nat_entry_set *setvec[SETVEC_SIZE];
struct nat_entry_set *set, *tmp;
unsigned int found;
@@ -1959,29 +2276,32 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!nm_i->dirty_nat_cnt)
return;
+
+ down_write(&nm_i->nat_tree_lock);
+
/*
* if there are no enough space in journal to store dirty nat
* entries, remove all entries from journal and merge them
* into nat entry set.
*/
- if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
+ if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
remove_nats_in_journal(sbi);
- down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_set(nm_i,
set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
set_idx = setvec[found - 1]->set + 1;
for (idx = 0; idx < found; idx++)
__adjust_nat_entry_set(setvec[idx], &sets,
- MAX_NAT_JENTRIES(sum));
+ MAX_NAT_JENTRIES(journal));
}
- up_write(&nm_i->nat_tree_lock);
/* flush dirty nats in nat entry set */
list_for_each_entry_safe(set, tmp, &sets, set_list)
__flush_nat_entry_set(sbi, set);
+ up_write(&nm_i->nat_tree_lock);
+
f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
@@ -2001,20 +2321,24 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
/* not used nids: 0, node, meta, (and root counted as valid node) */
- nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
- nm_i->fcnt = 0;
+ nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
+ F2FS_RESERVED_NODE_NUM;
+ nm_i->nid_cnt[FREE_NID_LIST] = 0;
+ nm_i->nid_cnt[ALLOC_NID_LIST] = 0;
nm_i->nat_cnt = 0;
nm_i->ram_thresh = DEF_RAM_THRESHOLD;
nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+ nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
- INIT_LIST_HEAD(&nm_i->free_nid_list);
+ INIT_LIST_HEAD(&nm_i->nid_list[FREE_NID_LIST]);
+ INIT_LIST_HEAD(&nm_i->nid_list[ALLOC_NID_LIST]);
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);
- spin_lock_init(&nm_i->free_nid_list_lock);
+ spin_lock_init(&nm_i->nid_list_lock);
init_rwsem(&nm_i->nat_tree_lock);
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
@@ -2042,7 +2366,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
if (err)
return err;
- build_free_nids(sbi);
+ build_free_nids(sbi, true);
return 0;
}
@@ -2059,17 +2383,18 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
return;
/* destroy free nid list */
- spin_lock(&nm_i->free_nid_list_lock);
- list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
- f2fs_bug_on(sbi, i->state == NID_ALLOC);
- __del_from_free_nid_list(nm_i, i);
- nm_i->fcnt--;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next_i, &nm_i->nid_list[FREE_NID_LIST],
+ list) {
+ __remove_nid_from_list(sbi, i, FREE_NID_LIST, false);
+ spin_unlock(&nm_i->nid_list_lock);
kmem_cache_free(free_nid_slab, i);
- spin_lock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
}
- f2fs_bug_on(sbi, nm_i->fcnt);
- spin_unlock(&nm_i->free_nid_list_lock);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID_LIST]);
+ f2fs_bug_on(sbi, nm_i->nid_cnt[ALLOC_NID_LIST]);
+ f2fs_bug_on(sbi, !list_empty(&nm_i->nid_list[ALLOC_NID_LIST]));
+ spin_unlock(&nm_i->nid_list_lock);
/* destroy nat cache */
down_write(&nm_i->nat_tree_lock);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index e4fffd2d98c4..e7997e240366 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -15,15 +15,21 @@
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */
-#define FREE_NID_PAGES 4
+#define FREE_NID_PAGES 8
+#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
-#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
+#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128
/* control the memory footprint threshold (10MB per 1GB ram) */
-#define DEF_RAM_THRESHOLD 10
+#define DEF_RAM_THRESHOLD 1
+
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
@@ -117,6 +123,17 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
raw_ne->version = ni->version;
}
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+ NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
@@ -152,14 +169,15 @@ static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *fnid;
- spin_lock(&nm_i->free_nid_list_lock);
- if (nm_i->fcnt <= 0) {
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_lock(&nm_i->nid_list_lock);
+ if (nm_i->nid_cnt[FREE_NID_LIST] <= 0) {
+ spin_unlock(&nm_i->nid_list_lock);
return;
}
- fnid = list_entry(nm_i->free_nid_list.next, struct free_nid, list);
+ fnid = list_entry(nm_i->nid_list[FREE_NID_LIST].next,
+ struct free_nid, list);
*nid = fnid->nid;
- spin_unlock(&nm_i->free_nid_list_lock);
+ spin_unlock(&nm_i->nid_list_lock);
}
/*
@@ -183,7 +201,7 @@ static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
block_addr = (pgoff_t)(nm_i->nat_blkaddr +
(seg_off << sbi->log_blocks_per_seg << 1) +
- (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
+ (block_off & (sbi->blocks_per_seg - 1)));
if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
block_addr += sbi->blocks_per_seg;
@@ -212,6 +230,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
f2fs_change_bit(block_off, nm_i->nat_bitmap);
}
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline __u64 cpver_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.next_blkaddr);
+}
+
static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
@@ -242,40 +291,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page);
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
- rn->footer.cp_ver = ckpt->checkpoint_ver;
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ rn->footer.cp_ver = cpu_to_le64(cp_ver);
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline nid_t ino_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.ino);
-}
-
-static inline nid_t nid_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.nid);
-}
-
-static inline unsigned int ofs_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- unsigned flag = le32_to_cpu(rn->footer.flag);
- return flag >> OFFSET_BIT_SHIFT;
-}
-
-static inline unsigned long long cpver_of_node(struct page *node_page)
+static inline bool is_recoverable_dnode(struct page *page)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le64_to_cpu(rn->footer.cp_ver);
-}
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = cur_cp_version(ckpt);
-static inline block_t next_blkaddr_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.next_blkaddr);
+ if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ return cp_ver == cpver_of_node(page);
}
/*
@@ -317,17 +356,17 @@ static inline bool IS_DNODE(struct page *node_page)
return true;
}
-static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
+static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
{
struct f2fs_node *rn = F2FS_NODE(p);
- f2fs_wait_on_page_writeback(p, NODE);
+ f2fs_wait_on_page_writeback(p, NODE, true);
if (i)
rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
else
rn->in.nid[off] = cpu_to_le32(nid);
- set_page_dirty(p);
+ return set_page_dirty(p);
}
static inline nid_t get_nid(struct page *p, int off, bool i)
@@ -370,6 +409,21 @@ static inline int is_node(struct page *page, int type)
#define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
#define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
+static inline int is_inline_node(struct page *page)
+{
+ return PageChecked(page);
+}
+
+static inline void set_inline_node(struct page *page)
+{
+ SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+ ClearPageChecked(page);
+}
+
static inline void set_cold_node(struct inode *inode, struct page *page)
{
struct f2fs_node *rn = F2FS_NODE(page);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index cbf74f47cce8..983c35da6bce 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -49,8 +49,9 @@ static struct kmem_cache *fsync_entry_slab;
bool space_for_roll_forward(struct f2fs_sb_info *sbi)
{
- if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
- > sbi->user_block_count)
+ s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
+
+ if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
return false;
return true;
}
@@ -67,42 +68,71 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL;
}
-static int recover_dentry(struct inode *inode, struct page *ipage)
+static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
+ struct list_head *head, nid_t ino)
+{
+ struct inode *inode;
+ struct fsync_inode_entry *entry;
+
+ inode = f2fs_iget_retry(sbi->sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+ entry->inode = inode;
+ list_add_tail(&entry->list, head);
+
+ return entry;
+}
+
+static void del_fsync_inode(struct fsync_inode_entry *entry)
+{
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+}
+
+static int recover_dentry(struct inode *inode, struct page *ipage,
+ struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct qstr name;
+ struct fscrypt_name fname;
struct page *page;
struct inode *dir, *einode;
+ struct fsync_inode_entry *entry;
int err = 0;
+ char *name;
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
+ entry = get_fsync_inode(dir_list, pino);
+ if (!entry) {
+ entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino);
+ if (IS_ERR(entry)) {
+ dir = ERR_CAST(entry);
+ err = PTR_ERR(entry);
+ goto out;
+ }
}
- if (file_enc_name(inode)) {
- iput(dir);
- return 0;
- }
+ dir = entry->inode;
- name.len = le32_to_cpu(raw_inode->i_namelen);
- name.name = raw_inode->i_name;
+ memset(&fname, 0, sizeof(struct fscrypt_name));
+ fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname.disk_name.name = raw_inode->i_name;
- if (unlikely(name.len > F2FS_NAME_LEN)) {
+ if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
- goto out_err;
+ goto out;
}
retry:
- de = f2fs_find_entry(dir, &name, &page);
+ de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
goto out_unmap_put;
if (de) {
- einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
+ einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
if (IS_ERR(einode)) {
WARN_ON(1);
err = PTR_ERR(einode);
@@ -118,29 +148,27 @@ retry:
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
- }
- err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
- if (err)
- goto out_err;
-
- if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
- iput(dir);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
- add_dirty_dir_inode(dir);
- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
+ err = __f2fs_do_add_link(dir, &fname, inode,
+ inode->i_ino, inode->i_mode);
}
-
+ if (err == -ENOMEM)
+ goto retry;
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
-out_err:
- iput(dir);
out:
+ if (file_enc_name(inode))
+ name = "<encrypted>";
+ else
+ name = raw_inode->i_name;
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
- __func__, ino_of_node(ipage), raw_inode->i_name,
+ __func__, ino_of_node(ipage), name,
IS_ERR(dir) ? 0 : dir->i_ino, err);
return err;
}
@@ -151,14 +179,16 @@ static void recover_inode(struct inode *inode, struct page *page)
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
- i_size_write(inode, le64_to_cpu(raw->i_size));
- inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
+ f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
+ inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
- inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
+ F2FS_I(inode)->i_advise = raw->i_advise;
+
if (file_enc_name(inode))
name = "<encrypted>";
else
@@ -170,7 +200,6 @@ static void recover_inode(struct inode *inode, struct page *page)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
block_t blkaddr;
@@ -180,8 +209,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
- ra_meta_pages(sbi, blkaddr, 1, META_POR, true);
-
while (1) {
struct fsync_inode_entry *entry;
@@ -190,7 +217,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page))
+ if (!is_recoverable_dnode(page))
break;
if (!is_fsync_dnode(page))
@@ -204,35 +231,24 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
break;
}
- /* add this fsync inode to the list */
- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
- if (!entry) {
- err = -ENOMEM;
- break;
- }
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
- if (IS_ERR(entry->inode)) {
- err = PTR_ERR(entry->inode);
- kmem_cache_free(fsync_entry_slab, entry);
+ entry = add_fsync_inode(sbi, head, ino_of_node(page));
+ if (IS_ERR(entry)) {
+ err = PTR_ERR(entry);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
- list_add_tail(&entry->list, head);
}
entry->blkaddr = blkaddr;
- if (IS_INODE(page)) {
- entry->last_inode = blkaddr;
- if (is_dent_dnode(page))
- entry->last_dentry = blkaddr;
- }
+ if (IS_INODE(page) && is_dent_dnode(page))
+ entry->last_dentry = blkaddr;
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -248,11 +264,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, head, list) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ list_for_each_entry_safe(entry, tmp, head, list)
+ del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
@@ -276,7 +289,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
return 0;
/* Get the previous summary */
- for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];
@@ -314,15 +327,14 @@ got_it:
if (ino != dn->inode->i_ino) {
/* Deallocate previous index in the node page */
- inode = f2fs_iget(sbi->sb, ino);
+ inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode))
return PTR_ERR(inode);
} else {
inode = dn->inode;
}
- bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
- le16_to_cpu(sum.ofs_in_node);
+ bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
/*
* if inode page is locked, unlock temporarily, but its reference
@@ -357,10 +369,9 @@ truncate_out:
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
- unsigned int start, end;
struct dnode_of_data dn;
struct node_info ni;
+ unsigned int start, end;
int err = 0, recovered = 0;
/* step 1: recover xattr */
@@ -380,16 +391,21 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
goto out;
/* step 3: recover data indices */
- start = start_bidx_of_node(ofs_of_node(page), fi);
- end = start + ADDRS_PER_PAGE(page, fi);
+ start = start_bidx_of_node(ofs_of_node(page), inode);
+ end = start + ADDRS_PER_PAGE(page, inode);
set_new_dnode(&dn, inode, NULL, NULL, 0);
-
+retry_dn:
err = get_dnode_of_data(&dn, start, ALLOC_NODE);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_dn;
+ }
goto out;
+ }
- f2fs_wait_on_page_writeback(dn.node_page, NODE);
+ f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
get_node_info(sbi, dn.nid, &ni);
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
@@ -411,14 +427,17 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
continue;
}
+ if (!file_keep_isize(inode) &&
+ (i_size_read(inode) <= (start << PAGE_SHIFT)))
+ f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
+
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
*/
if (dest == NEW_ADDR) {
truncate_data_blocks_range(&dn, 1);
- err = reserve_new_block(&dn);
- f2fs_bug_on(sbi, err);
+ reserve_new_block(&dn);
continue;
}
@@ -427,25 +446,33 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ while (err)
+ err = reserve_new_block(&dn);
+#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
+ if (err)
+ goto err;
}
-
+retry_prev:
/* Check the previous node page having this index */
err = check_index_in_prev_nodes(sbi, dest, &dn);
- if (err)
+ if (err) {
+ if (err == -ENOMEM) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_prev;
+ }
goto err;
+ }
/* write dummy data page */
f2fs_replace_block(sbi, &dn, src, dest,
- ni.version, false);
+ ni.version, false, false);
recovered++;
}
}
- if (IS_INODE(dn.node_page))
- sync_inode_page(&dn);
-
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
@@ -454,22 +481,23 @@ err:
f2fs_put_dnode(&dn);
out:
f2fs_msg(sbi->sb, KERN_NOTICE,
- "recover_data: ino = %lx, recovered = %d blocks, err = %d",
- inode->i_ino, recovered, err);
+ "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
+ inode->i_ino,
+ file_keep_isize(inode) ? "keep" : "recover",
+ recovered, err);
return err;
}
-static int recover_data(struct f2fs_sb_info *sbi,
- struct list_head *head, int type)
+static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+ struct list_head *dir_list)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
- curseg = CURSEG_I(sbi, type);
+ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
@@ -482,12 +510,12 @@ static int recover_data(struct f2fs_sb_info *sbi,
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page)) {
+ if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
break;
}
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
@@ -495,10 +523,10 @@ static int recover_data(struct f2fs_sb_info *sbi,
* In this case, we can lose the latest inode(x).
* So, call recover_inode for the inode update.
*/
- if (entry->last_inode == blkaddr)
+ if (IS_INODE(page))
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page);
+ err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
@@ -510,11 +538,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
break;
}
- if (entry->blkaddr == blkaddr) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ if (entry->blkaddr == blkaddr)
+ del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -525,12 +550,14 @@ next:
return err;
}
-int recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
+ struct list_head dir_list;
block_t blkaddr;
int err;
+ int ret = 0;
bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -539,6 +566,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
return -ENOMEM;
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
@@ -547,25 +575,26 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list);
- if (err)
+ if (err || list_empty(&inode_list))
goto out;
- if (list_empty(&inode_list))
+ if (check_only) {
+ ret = 1;
goto out;
+ }
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
- kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
- (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1);
+ (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
if (err) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
@@ -573,31 +602,20 @@ out:
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err) {
- bool invalidate = false;
-
- if (discard_next_dnode(sbi, blkaddr))
- invalidate = true;
-
- /* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
- sync_meta_pages(sbi, META, LONG_MAX);
+ if (err)
+ set_ckpt_flags(sbi, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
- /* invalidate temporary meta page */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi),
- blkaddr, blkaddr);
+ /* let's drop all the directory inodes for clean checkpoint */
+ destroy_fsync_dnodes(&dir_list);
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- mutex_unlock(&sbi->cp_mutex);
- } else if (need_writecp) {
+ if (!err && need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
- mutex_unlock(&sbi->cp_mutex);
- write_checkpoint(sbi, &cpc);
- } else {
- mutex_unlock(&sbi->cp_mutex);
+ err = write_checkpoint(sbi, &cpc);
}
- return err;
+
+ kmem_cache_destroy(fsync_entry_slab);
+ return ret ? ret: err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 7965957dd0e6..a288de069164 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -86,6 +86,7 @@ static inline unsigned long __reverse_ffs(unsigned long word)
/*
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
+ * @size must be integral times of unsigned long.
* Example:
* MSB <--> LSB
* f2fs_set_bit(0, bitmap) => 1000 0000
@@ -95,94 +96,73 @@ static unsigned long __find_rev_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp &= ~0UL >> offset;
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG-1)) {
+
+ while (1) {
+ if (*p == 0)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ tmp &= ~0UL >> offset;
+ if (size < BITS_PER_LONG)
+ tmp &= (~0UL << (BITS_PER_LONG - size));
if (tmp)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
-
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp &= (~0UL << (BITS_PER_LONG - size));
- if (!tmp) /* Are any bits set? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffs(tmp);
+ return result;
+found:
+ return result - size + __reverse_ffs(tmp);
}
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
- unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
- size -= result;
+ size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
- if (!offset)
- goto aligned;
-
- tmp = __reverse_ulong((unsigned char *)p);
- tmp |= ~((~0UL << offset) >> offset);
-
- if (size < BITS_PER_LONG)
- goto found_first;
- if (tmp != ~0UL)
- goto found_middle;
-
- size -= BITS_PER_LONG;
- result += BITS_PER_LONG;
- p++;
-aligned:
- while (size & ~(BITS_PER_LONG - 1)) {
+
+ while (1) {
+ if (*p == ~0UL)
+ goto pass;
+
tmp = __reverse_ulong((unsigned char *)p);
+
+ if (offset)
+ tmp |= ~0UL << (BITS_PER_LONG - offset);
+ if (size < BITS_PER_LONG)
+ tmp |= ~0UL >> size;
if (tmp != ~0UL)
- goto found_middle;
- result += BITS_PER_LONG;
+ goto found;
+pass:
+ if (size <= BITS_PER_LONG)
+ break;
size -= BITS_PER_LONG;
+ offset = 0;
p++;
}
- if (!size)
- return result;
-
- tmp = __reverse_ulong((unsigned char *)p);
-found_first:
- tmp |= ~(~0UL << (BITS_PER_LONG - size));
- if (tmp == ~0UL) /* Are any bits zero? */
- return result + size; /* Nope. */
-found_middle:
- return result + __reverse_ffz(tmp);
+ return result;
+found:
+ return result - size + __reverse_ffz(tmp);
}
void register_inmem_page(struct inode *inode, struct page *page)
@@ -211,69 +191,150 @@ void register_inmem_page(struct inode *inode, struct page *page)
trace_f2fs_register_inmem_page(page, INMEM);
}
-int commit_inmem_pages(struct inode *inode, bool abort)
+static int __revoke_inmem_pages(struct inode *inode,
+ struct list_head *head, bool drop, bool recover)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct inmem_pages *cur, *tmp;
+ int err = 0;
+
+ list_for_each_entry_safe(cur, tmp, head, list) {
+ struct page *page = cur->page;
+
+ if (drop)
+ trace_f2fs_commit_inmem_page(page, INMEM_DROP);
+
+ lock_page(page);
+
+ if (recover) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+
+ trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
+ err = -EAGAIN;
+ goto next;
+ }
+ get_node_info(sbi, dn.nid, &ni);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ cur->old_addr, ni.version, true, true);
+ f2fs_put_dnode(&dn);
+ }
+next:
+ /* we don't need to invalidate this in the sccessful status */
+ if (drop || recover)
+ ClearPageUptodate(page);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+ f2fs_put_page(page, 1);
+
+ list_del(&cur->list);
+ kmem_cache_free(inmem_entry_slab, cur);
+ dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ }
+ return err;
+}
+
+void drop_inmem_pages(struct inode *inode)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
+
+ mutex_lock(&fi->inmem_lock);
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
+ mutex_unlock(&fi->inmem_lock);
+}
+
+static int __commit_inmem_pages(struct inode *inode,
+ struct list_head *revoke_list)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *cur, *tmp;
- bool submit_bio = false;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.rw = WRITE_SYNC | REQ_PRIO,
.encrypted_page = NULL,
};
+ bool submit_bio = false;
int err = 0;
- /*
- * The abort is true only when f2fs_evict_inode is called.
- * Basically, the f2fs_evict_inode doesn't produce any data writes, so
- * that we don't need to call f2fs_balance_fs.
- * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
- * inode becomes free by iget_locked in f2fs_iget.
- */
- if (!abort) {
- f2fs_balance_fs(sbi);
- f2fs_lock_op(sbi);
- }
-
- mutex_lock(&fi->inmem_lock);
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
- lock_page(cur->page);
- if (!abort) {
- if (cur->page->mapping == inode->i_mapping) {
- set_page_dirty(cur->page);
- f2fs_wait_on_page_writeback(cur->page, DATA);
- if (clear_page_dirty_for_io(cur->page))
- inode_dec_dirty_pages(inode);
- trace_f2fs_commit_inmem_page(cur->page, INMEM);
- fio.page = cur->page;
- err = do_write_data_page(&fio);
- if (err) {
- unlock_page(cur->page);
- break;
- }
- clear_cold_data(cur->page);
- submit_bio = true;
+ struct page *page = cur->page;
+
+ lock_page(page);
+ if (page->mapping == inode->i_mapping) {
+ trace_f2fs_commit_inmem_page(page, INMEM);
+
+ set_page_dirty(page);
+ f2fs_wait_on_page_writeback(page, DATA, true);
+ if (clear_page_dirty_for_io(page)) {
+ inode_dec_dirty_pages(inode);
+ remove_dirty_inode(inode);
}
- } else {
- trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
+
+ fio.page = page;
+ err = do_write_data_page(&fio);
+ if (err) {
+ unlock_page(page);
+ break;
+ }
+
+ /* record old blkaddr for revoking */
+ cur->old_addr = fio.old_blkaddr;
+
+ submit_bio = true;
}
- set_page_private(cur->page, 0);
- ClearPagePrivate(cur->page);
- f2fs_put_page(cur->page, 1);
+ unlock_page(page);
+ list_move_tail(&cur->list, revoke_list);
+ }
- list_del(&cur->list);
- kmem_cache_free(inmem_entry_slab, cur);
- dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
+ if (submit_bio)
+ f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
+
+ if (!err)
+ __revoke_inmem_pages(inode, revoke_list, false, false);
+
+ return err;
+}
+
+int commit_inmem_pages(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct list_head revoke_list;
+ int err;
+
+ INIT_LIST_HEAD(&revoke_list);
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ mutex_lock(&fi->inmem_lock);
+ err = __commit_inmem_pages(inode, &revoke_list);
+ if (err) {
+ int ret;
+ /*
+ * try to revoke all committed pages, but still we could fail
+ * due to no memory or other reason, if that happened, EAGAIN
+ * will be returned, which means in such case, transaction is
+ * already not integrity, caller should use journal to do the
+ * recovery or rewrite & commit last transaction. For other
+ * error number, revoking was done by filesystem itself.
+ */
+ ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
+ if (ret)
+ err = ret;
+
+ /* drop all uncommitted pages */
+ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
mutex_unlock(&fi->inmem_lock);
- if (!abort) {
- f2fs_unlock_op(sbi);
- if (submit_bio)
- f2fs_submit_merged_bio(sbi, DATA, WRITE);
- }
+ f2fs_unlock_op(sbi);
return err;
}
@@ -281,15 +342,27 @@ int commit_inmem_pages(struct inode *inode, bool abort)
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
-void f2fs_balance_fs(struct f2fs_sb_info *sbi)
+void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(sbi, FAULT_CHECKPOINT))
+ f2fs_stop_checkpoint(sbi, false);
+#endif
+
+ if (!need)
+ return;
+
+ /* balance_fs_bg is able to be pending */
+ if (excess_cached_nats(sbi))
+ f2fs_balance_fs_bg(sbi);
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
- if (has_not_enough_free_secs(sbi, 0)) {
+ if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
- f2fs_gc(sbi, false);
+ f2fs_gc(sbi, false, false);
}
}
@@ -304,14 +377,55 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
- try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+ try_to_free_nids(sbi, MAX_FREE_NIDS);
+ else
+ build_free_nids(sbi, false);
+
+ if (!is_idle(sbi))
+ return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
- excess_prefree_segs(sbi) ||
!available_free_memory(sbi, INO_ENTRIES) ||
- jiffies > sbi->cp_expires)
+ excess_prefree_segs(sbi) ||
+ excess_dirty_nats(sbi) ||
+ f2fs_time_over(sbi, CP_TIME)) {
+ if (test_opt(sbi, DATA_FLUSH)) {
+ struct blk_plug plug;
+
+ blk_start_plug(&plug);
+ sync_dirty_inodes(sbi, FILE_INODE);
+ blk_finish_plug(&plug);
+ }
f2fs_sync_fs(sbi->sb, true);
+ stat_inc_bg_cp_count(sbi->stat_info);
+ }
+}
+
+static int __submit_flush_wait(struct block_device *bdev)
+{
+ struct bio *bio = f2fs_bio_alloc(0);
+ int ret;
+
+ bio->bi_bdev = bdev;
+ ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio_put(bio);
+ return ret;
+}
+
+static int submit_flush_wait(struct f2fs_sb_info *sbi)
+{
+ int ret = __submit_flush_wait(sbi->sb->s_bdev);
+ int i;
+
+ if (sbi->s_ndevs && !ret) {
+ for (i = 1; i < sbi->s_ndevs; i++) {
+ ret = __submit_flush_wait(FDEV(i).bdev);
+ if (ret)
+ break;
+ }
+ }
+ return ret;
}
static int issue_flush_thread(void *data)
@@ -324,24 +438,18 @@ repeat:
return 0;
if (!llist_empty(&fcc->issue_list)) {
- struct bio *bio;
struct flush_cmd *cmd, *next;
int ret;
- bio = f2fs_bio_alloc(0);
-
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
-
+ ret = submit_flush_wait(sbi);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
- bio_put(bio);
fcc->dispatch_list = NULL;
}
@@ -361,24 +469,30 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (test_opt(sbi, NOBARRIER))
return 0;
- if (!test_opt(sbi, FLUSH_MERGE)) {
- struct bio *bio = f2fs_bio_alloc(0);
+ if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
int ret;
- bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
- bio_put(bio);
+ atomic_inc(&fcc->submit_flush);
+ ret = submit_flush_wait(sbi);
+ atomic_dec(&fcc->submit_flush);
return ret;
}
init_completion(&cmd.wait);
+ atomic_inc(&fcc->submit_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
- wait_for_completion(&cmd.wait);
+ if (fcc->f2fs_issue_flush) {
+ wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->submit_flush);
+ } else {
+ llist_del_all(&fcc->issue_list);
+ atomic_set(&fcc->submit_flush, 0);
+ }
return cmd.ret;
}
@@ -389,12 +503,19 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
struct flush_cmd_control *fcc;
int err = 0;
+ if (SM_I(sbi)->cmd_control_info) {
+ fcc = SM_I(sbi)->cmd_control_info;
+ goto init_thread;
+ }
+
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
+ atomic_set(&fcc->submit_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
+init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -407,14 +528,20 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
return err;
}
-void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
+void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
- if (fcc && fcc->f2fs_issue_flush)
- kthread_stop(fcc->f2fs_issue_flush);
- kfree(fcc);
- SM_I(sbi)->cmd_control_info = NULL;
+ if (fcc && fcc->f2fs_issue_flush) {
+ struct task_struct *flush_thread = fcc->f2fs_issue_flush;
+
+ fcc->f2fs_issue_flush = NULL;
+ kthread_stop(flush_thread);
+ }
+ if (free) {
+ kfree(fcc);
+ SM_I(sbi)->cmd_control_info = NULL;
+ }
}
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
@@ -493,46 +620,107 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
mutex_unlock(&dirty_i->seglist_lock);
}
-static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
- block_t blkstart, block_t blklen)
+#ifdef CONFIG_BLK_DEV_ZONED
+static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
+{
+ sector_t nr_sects = SECTOR_FROM_BLOCK(blklen);
+ sector_t sector;
+ int devi = 0;
+
+ if (sbi->s_ndevs) {
+ devi = f2fs_target_device_index(sbi, blkstart);
+ blkstart -= FDEV(devi).start_blk;
+ }
+ sector = SECTOR_FROM_BLOCK(blkstart);
+
+ if (sector & (bdev_zone_size(bdev) - 1) ||
+ nr_sects != bdev_zone_size(bdev)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "(%d) %s: Unaligned discard attempted (block %x + %x)",
+ devi, sbi->s_ndevs ? FDEV(devi).path: "",
+ blkstart, blklen);
+ return -EIO;
+ }
+
+ /*
+ * We need to know the type of the zone: for conventional zones,
+ * use regular discard if the drive supports it. For sequential
+ * zones, reset the zone write pointer.
+ */
+ switch (get_blkz_type(sbi, bdev, blkstart)) {
+
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (!blk_queue_discard(bdev_get_queue(bdev)))
+ return 0;
+ return blkdev_issue_discard(bdev, sector, nr_sects,
+ GFP_NOFS, 0);
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ trace_f2fs_issue_reset_zone(sbi->sb, blkstart);
+ return blkdev_reset_zones(bdev, sector,
+ nr_sects, GFP_NOFS);
+ default:
+ /* Unknown zone type: broken device ? */
+ return -EIO;
+ }
+}
+#endif
+
+static int __issue_discard_async(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t blkstart, block_t blklen)
{
sector_t start = SECTOR_FROM_BLOCK(blkstart);
sector_t len = SECTOR_FROM_BLOCK(blklen);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
+ bdev_zoned_model(bdev) != BLK_ZONED_NONE)
+ return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
+#endif
+ return blkdev_issue_discard(bdev, start, len, GFP_NOFS, 0);
+}
+
+static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
+ block_t blkstart, block_t blklen)
+{
+ sector_t start = blkstart, len = 0;
+ struct block_device *bdev;
struct seg_entry *se;
unsigned int offset;
block_t i;
+ int err = 0;
+
+ bdev = f2fs_target_device(sbi, blkstart, NULL);
+
+ for (i = blkstart; i < blkstart + blklen; i++, len++) {
+ if (i != start) {
+ struct block_device *bdev2 =
+ f2fs_target_device(sbi, i, NULL);
+
+ if (bdev2 != bdev) {
+ err = __issue_discard_async(sbi, bdev,
+ start, len);
+ if (err)
+ return err;
+ bdev = bdev2;
+ start = i;
+ len = 0;
+ }
+ }
- for (i = blkstart; i < blkstart + blklen; i++) {
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
- trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
- return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
-}
-
-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
-{
- int err = -ENOTSUPP;
-
- if (test_opt(sbi, DISCARD)) {
- struct seg_entry *se = get_seg_entry(sbi,
- GET_SEGNO(sbi, blkaddr));
- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
- if (f2fs_test_bit(offset, se->discard_map))
- return false;
-
- err = f2fs_issue_discard(sbi, blkaddr, 1);
- }
+ if (len)
+ err = __issue_discard_async(sbi, bdev, start, len);
- if (err) {
- update_meta_page(sbi, NULL, blkaddr);
- return true;
- }
- return false;
+ trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
+ return err;
}
static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -573,7 +761,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
bool force = (cpc->reason == CP_DISCARD);
int i;
- if (se->valid_blocks == max_blocks)
+ if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
return;
if (!force) {
@@ -593,6 +781,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+ if (force && start && end != max_blocks
+ && (end - start) < cpc->trim_minlen)
+ continue;
+
__add_discard_entry(sbi, cpc, se, start, end);
}
}
@@ -630,6 +822,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
+ unsigned int secno, start_segno;
+ bool force = (cpc->reason == CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
@@ -646,17 +840,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
dirty_i->nr_dirty[PRE] -= end - start;
- if (!test_opt(sbi, DISCARD))
+ if (force || !test_opt(sbi, DISCARD))
continue;
- f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+ if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
+ continue;
+ }
+next:
+ secno = GET_SECNO(sbi, start);
+ start_segno = secno * sbi->segs_per_sec;
+ if (!IS_CURSEC(sbi, secno) &&
+ !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+ sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+ start = start_segno + sbi->segs_per_sec;
+ if (start < end)
+ goto next;
}
mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
- if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+ if (force && entry->len < cpc->trim_minlen)
goto skip;
f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
cpc->trimmed += entry->len;
@@ -711,12 +919,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (!f2fs_test_and_set_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ !f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
} else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
f2fs_bug_on(sbi, 1);
- if (f2fs_test_and_clear_bit(offset, se->discard_map))
+ if (f2fs_discard_en(sbi) &&
+ f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
@@ -817,12 +1027,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
}
}
- sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
+ sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
SUM_FOOTER_SIZE) / SUMMARY_SIZE;
if (valid_sum_count <= sum_in_page)
return 1;
else if ((valid_sum_count - sum_in_page) <=
- (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
+ (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
@@ -841,9 +1051,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
void *dst = page_address(page);
if (src)
- memcpy(dst, src, PAGE_CACHE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
else
- memset(dst, 0, PAGE_CACHE_SIZE);
+ memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
@@ -854,6 +1064,31 @@ static void write_sum_page(struct f2fs_sb_info *sbi,
update_meta_page(sbi, (void *)sum_blk, blk_addr);
}
+static void write_current_sum_page(struct f2fs_sb_info *sbi,
+ int type, block_t blk_addr)
+{
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
+ struct page *page = grab_meta_page(sbi, blk_addr);
+ struct f2fs_summary_block *src = curseg->sum_blk;
+ struct f2fs_summary_block *dst;
+
+ dst = (struct f2fs_summary_block *)page_address(page);
+
+ mutex_lock(&curseg->curseg_mutex);
+
+ down_read(&curseg->journal_rwsem);
+ memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
+ up_read(&curseg->journal_rwsem);
+
+ memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
+ memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
+
+ mutex_unlock(&curseg->curseg_mutex);
+
+ set_page_dirty(page);
+ f2fs_put_page(page, 1);
+}
+
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -886,9 +1121,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
- MAIN_SEGS(sbi), *newseg + 1);
- if (segno - *newseg < sbi->segs_per_sec -
- (*newseg % sbi->segs_per_sec))
+ (hint + 1) * sbi->segs_per_sec, *newseg + 1);
+ if (segno < (hint + 1) * sbi->segs_per_sec)
goto got_it;
}
find_other_zone:
@@ -1071,7 +1305,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
- if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
+ if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0, 0))
return v_ops->get_victim(sbi,
&(curseg)->next_segno, BG_GC, type, SSR);
@@ -1106,22 +1340,21 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
stat_inc_seg_type(sbi, curseg);
}
-static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
-{
- struct curseg_info *curseg = CURSEG_I(sbi, type);
- unsigned int old_segno;
-
- old_segno = curseg->segno;
- SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
- locate_dirty_segment(sbi, old_segno);
-}
-
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
+ struct curseg_info *curseg;
+ unsigned int old_segno;
int i;
- for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
- __allocate_new_segments(sbi, i);
+ if (test_opt(sbi, LFS))
+ return;
+
+ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ curseg = CURSEG_I(sbi, i);
+ old_segno = curseg->segno;
+ SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
+ locate_dirty_segment(sbi, old_segno);
+ }
}
static const struct segment_allocation default_salloc_ops = {
@@ -1134,6 +1367,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno;
struct cp_control cpc;
+ int err = 0;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
@@ -1142,6 +1376,12 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
if (end <= MAIN_BLKADDR(sbi))
goto out;
+ if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "Found FS corruption, run fsck to fix.");
+ goto out;
+ }
+
/* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
@@ -1164,12 +1404,16 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
+ if (err)
+ break;
+
+ schedule();
}
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
- return 0;
+ return err;
}
static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
@@ -1244,21 +1488,11 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
struct f2fs_summary *sum, int type)
{
struct sit_info *sit_i = SIT_I(sbi);
- struct curseg_info *curseg;
- bool direct_io = (type == CURSEG_DIRECT_IO);
-
- type = direct_io ? CURSEG_WARM_DATA : type;
-
- curseg = CURSEG_I(sbi, type);
+ struct curseg_info *curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
- /* direct_io'ed data is aligned to the segment for better performance */
- if (direct_io && curseg->next_blkoff &&
- !has_not_enough_free_secs(sbi, 0))
- __allocate_new_segments(sbi, type);
-
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
/*
@@ -1292,11 +1526,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
int type = __get_segment_type(fio->page, fio->type);
- allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
- &fio->blk_addr, sum, type);
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
+ allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
+ &fio->new_blkaddr, sum, type);
/* writeout dirty page into bdev */
f2fs_submit_page_mbio(fio);
+
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
}
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1305,7 +1545,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
.sbi = sbi,
.type = META,
.rw = WRITE_SYNC | REQ_META | REQ_PRIO,
- .blk_addr = page->index,
+ .old_blkaddr = page->index,
+ .new_blkaddr = page->index,
.page = page,
.encrypted_page = NULL,
};
@@ -1335,19 +1576,19 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
do_write_page(&sum, fio);
- dn->data_blkaddr = fio->blk_addr;
+ f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
}
void rewrite_data_page(struct f2fs_io_info *fio)
{
+ fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
f2fs_submit_page_mbio(fio);
}
-static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
- struct f2fs_summary *sum,
+void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
- bool recover_curseg)
+ bool recover_curseg, bool recover_newaddr)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg;
@@ -1390,7 +1631,7 @@ static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
__add_sum_entry(sbi, type, sum);
- if (!recover_curseg)
+ if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
update_sit_entry(sbi, old_blkaddr, -1);
@@ -1414,66 +1655,30 @@ static void __f2fs_replace_block(struct f2fs_sb_info *sbi,
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
- unsigned char version, bool recover_curseg)
+ unsigned char version, bool recover_curseg,
+ bool recover_newaddr)
{
struct f2fs_summary sum;
set_summary(&sum, dn->nid, dn->ofs_in_node, version);
- __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg);
-
- dn->data_blkaddr = new_addr;
- set_data_blkaddr(dn);
- f2fs_update_extent_cache(dn);
-}
-
-static inline bool is_merged_page(struct f2fs_sb_info *sbi,
- struct page *page, enum page_type type)
-{
- enum page_type btype = PAGE_TYPE_OF_BIO(type);
- struct f2fs_bio_info *io = &sbi->write_io[btype];
- struct bio_vec *bvec;
- struct page *target;
- int i;
-
- down_read(&io->io_rwsem);
- if (!io->bio) {
- up_read(&io->io_rwsem);
- return false;
- }
-
- bio_for_each_segment_all(bvec, io->bio, i) {
-
- if (bvec->bv_page->mapping) {
- target = bvec->bv_page;
- } else {
- struct f2fs_crypto_ctx *ctx;
-
- /* encrypted page */
- ctx = (struct f2fs_crypto_ctx *)page_private(
- bvec->bv_page);
- target = ctx->w.control_page;
- }
-
- if (page == target) {
- up_read(&io->io_rwsem);
- return true;
- }
- }
+ __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
+ recover_curseg, recover_newaddr);
- up_read(&io->io_rwsem);
- return false;
+ f2fs_update_data_blkaddr(dn, new_addr);
}
void f2fs_wait_on_page_writeback(struct page *page,
- enum page_type type)
+ enum page_type type, bool ordered)
{
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
- if (is_merged_page(sbi, page, type))
- f2fs_submit_merged_bio(sbi, type, WRITE);
- wait_on_page_writeback(page);
+ f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
+ if (ordered)
+ wait_on_page_writeback(page);
+ else
+ wait_for_stable_page(page);
}
}
@@ -1482,14 +1687,12 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
{
struct page *cpage;
- if (blkaddr == NEW_ADDR)
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return;
- f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
-
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
- f2fs_wait_on_page_writeback(cpage, DATA);
+ f2fs_wait_on_page_writeback(cpage, DATA, true);
f2fs_put_page(cpage, 1);
}
}
@@ -1510,12 +1713,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
- SUM_JOURNAL_SIZE);
+ memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
@@ -1539,7 +1741,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
s = (struct f2fs_summary *)(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
- if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (offset + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1611,7 +1813,14 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
/* set uncompleted segment to curseg */
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
- memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
+
+ /* update journal info */
+ down_write(&curseg->journal_rwsem);
+ memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
+ up_write(&curseg->journal_rwsem);
+
+ memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
+ memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
curseg->next_segno = segno;
reset_curseg(sbi, type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
@@ -1623,14 +1832,10 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
- struct f2fs_summary_block *s_sits =
- CURSEG_I(sbi, CURSEG_COLD_DATA)->sum_blk;
- struct f2fs_summary_block *s_nats =
- CURSEG_I(sbi, CURSEG_HOT_DATA)->sum_blk;
int type = CURSEG_HOT_DATA;
int err;
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2)
@@ -1653,11 +1858,6 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
return err;
}
- /* sanity check for summary blocks */
- if (nats_in_cursum(s_nats) > NAT_JOURNAL_ENTRIES ||
- sits_in_cursum(s_sits) > SIT_JOURNAL_ENTRIES)
- return -EINVAL;
-
return 0;
}
@@ -1675,13 +1875,12 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
/* Step 1: write nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
- memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
+ memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
- memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
- SUM_JOURNAL_SIZE);
+ memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 3: write summary entries */
@@ -1703,7 +1902,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
- if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
+ if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
@@ -1727,17 +1926,13 @@ static void write_normal_summaries(struct f2fs_sb_info *sbi,
else
end = type + NR_CURSEG_NODE_TYPE;
- for (i = type; i < end; i++) {
- struct curseg_info *sum = CURSEG_I(sbi, i);
- mutex_lock(&sum->curseg_mutex);
- write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
- mutex_unlock(&sum->curseg_mutex);
- }
+ for (i = type; i < end; i++)
+ write_current_sum_page(sbi, i, blkaddr + (i - type));
}
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
- if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
+ if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
@@ -1748,24 +1943,24 @@ void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
}
-int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
+int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc)
{
int i;
if (type == NAT_JOURNAL) {
- for (i = 0; i < nats_in_cursum(sum); i++) {
- if (le32_to_cpu(nid_in_journal(sum, i)) == val)
+ for (i = 0; i < nats_in_cursum(journal); i++) {
+ if (le32_to_cpu(nid_in_journal(journal, i)) == val)
return i;
}
- if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
- return update_nats_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
+ return update_nats_in_cursum(journal, 1);
} else if (type == SIT_JOURNAL) {
- for (i = 0; i < sits_in_cursum(sum); i++)
- if (le32_to_cpu(segno_in_journal(sum, i)) == val)
+ for (i = 0; i < sits_in_cursum(journal); i++)
+ if (le32_to_cpu(segno_in_journal(journal, i)) == val)
return i;
- if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
- return update_sits_in_cursum(sum, 1);
+ if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
+ return update_sits_in_cursum(journal, 1);
}
return -1;
}
@@ -1794,7 +1989,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
- memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
+ memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
@@ -1869,20 +2064,22 @@ static void add_sits_in_set(struct f2fs_sb_info *sbi)
static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
int i;
- for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
+ down_write(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int segno;
bool dirtied;
- segno = le32_to_cpu(segno_in_journal(sum, i));
+ segno = le32_to_cpu(segno_in_journal(journal, i));
dirtied = __mark_sit_entry_dirty(sbi, segno);
if (!dirtied)
add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
}
- update_sits_in_cursum(sum, -sits_in_cursum(sum));
+ update_sits_in_cursum(journal, -i);
+ up_write(&curseg->journal_rwsem);
}
/*
@@ -1894,13 +2091,12 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct sit_info *sit_i = SIT_I(sbi);
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
struct sit_entry_set *ses, *tmp;
struct list_head *head = &SM_I(sbi)->sit_entry_set;
bool to_journal = true;
struct seg_entry *se;
- mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
if (!sit_i->dirty_sentries)
@@ -1917,7 +2113,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* entries, remove all entries from journal and add and account
* them in sit entry set.
*/
- if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
+ if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
remove_sits_in_journal(sbi);
/*
@@ -1934,10 +2130,12 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned int segno = start_segno;
if (to_journal &&
- !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
+ !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
to_journal = false;
- if (!to_journal) {
+ if (to_journal) {
+ down_write(&curseg->journal_rwsem);
+ } else {
page = get_next_sit_page(sbi, start_segno);
raw_sit = page_address(page);
}
@@ -1955,13 +2153,13 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
if (to_journal) {
- offset = lookup_journal_in_cursum(sum,
+ offset = lookup_journal_in_cursum(journal,
SIT_JOURNAL, segno, 1);
f2fs_bug_on(sbi, offset < 0);
- segno_in_journal(sum, offset) =
+ segno_in_journal(journal, offset) =
cpu_to_le32(segno);
seg_info_to_raw_sit(se,
- &sit_in_journal(sum, offset));
+ &sit_in_journal(journal, offset));
} else {
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
seg_info_to_raw_sit(se,
@@ -1973,7 +2171,9 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ses->entry_cnt--;
}
- if (!to_journal)
+ if (to_journal)
+ up_write(&curseg->journal_rwsem);
+ else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, ses->entry_cnt);
@@ -1988,7 +2188,6 @@ out:
add_discard_addrs(sbi, cpc);
}
mutex_unlock(&sit_i->sentry_lock);
- mutex_unlock(&curseg->curseg_mutex);
set_prefree_as_free_segments(sbi);
}
@@ -1996,7 +2195,6 @@ out:
static int build_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
char *src_bitmap, *dst_bitmap;
@@ -2024,12 +2222,16 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
- sit_i->sentries[start].discard_map
- = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
- !sit_i->sentries[start].ckpt_valid_map ||
- !sit_i->sentries[start].discard_map)
+ !sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
+
+ if (f2fs_discard_en(sbi)) {
+ sit_i->sentries[start].discard_map
+ = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
+ if (!sit_i->sentries[start].discard_map)
+ return -ENOMEM;
+ }
}
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
@@ -2059,7 +2261,7 @@ static int build_sit_info(struct f2fs_sb_info *sbi)
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
- sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
+ sit_i->written_valid_blocks = 0;
sit_i->sit_bitmap = dst_bitmap;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
@@ -2117,9 +2319,14 @@ static int build_curseg(struct f2fs_sb_info *sbi)
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
- array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
+ array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
+ init_rwsem(&array[i].journal_rwsem);
+ array[i].journal = kzalloc(sizeof(struct f2fs_journal),
+ GFP_KERNEL);
+ if (!array[i].journal)
+ return -ENOMEM;
array[i].segno = NULL_SEGNO;
array[i].next_blkoff = 0;
}
@@ -2130,54 +2337,73 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
- struct f2fs_summary_block *sum = curseg->sum_blk;
+ struct f2fs_journal *journal = curseg->journal;
+ struct seg_entry *se;
+ struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
- int nrpages = MAX_BIO_BLOCKS(sbi);
do {
- readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
+ readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+ META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) {
- struct seg_entry *se = &sit_i->sentries[start];
struct f2fs_sit_block *sit_blk;
- struct f2fs_sit_entry sit;
struct page *page;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < sits_in_cursum(sum); i++) {
- if (le32_to_cpu(segno_in_journal(sum, i))
- == start) {
- sit = sit_in_journal(sum, i);
- mutex_unlock(&curseg->curseg_mutex);
- goto got_it;
- }
- }
- mutex_unlock(&curseg->curseg_mutex);
-
+ se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
-got_it:
+
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
- memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
- sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
-
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += sbi->blocks_per_seg -
+ se->valid_blocks;
}
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
+
+ down_read(&curseg->journal_rwsem);
+ for (i = 0; i < sits_in_cursum(journal); i++) {
+ unsigned int old_valid_blocks;
+
+ start = le32_to_cpu(segno_in_journal(journal, i));
+ se = &sit_i->sentries[start];
+ sit = sit_in_journal(journal, i);
+
+ old_valid_blocks = se->valid_blocks;
+
+ check_block_count(sbi, start, &sit);
+ seg_info_from_raw_sit(se, &sit);
+
+ if (f2fs_discard_en(sbi)) {
+ memcpy(se->discard_map, se->cur_valid_map,
+ SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks -
+ se->valid_blocks;
+ }
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks - old_valid_blocks;
+ }
+ up_read(&curseg->journal_rwsem);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -2189,6 +2415,9 @@ static void init_free_segmap(struct f2fs_sb_info *sbi)
struct seg_entry *sentry = get_seg_entry(sbi, start);
if (!sentry->valid_blocks)
__set_free(sbi, start);
+ else
+ SIT_I(sbi)->written_valid_blocks +=
+ sentry->valid_blocks;
}
/* set use the current segments */
@@ -2310,7 +2539,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
- sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+ if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+ sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+ if (!test_opt(sbi, LFS))
+ sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
@@ -2392,8 +2625,10 @@ static void destroy_curseg(struct f2fs_sb_info *sbi)
if (!array)
return;
SM_I(sbi)->curseg_array = NULL;
- for (i = 0; i < NR_CURSEG_TYPE; i++)
+ for (i = 0; i < NR_CURSEG_TYPE; i++) {
kfree(array[i].sum_blk);
+ kfree(array[i].journal);
+ }
kfree(array);
}
@@ -2440,7 +2675,7 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi)
if (!sm_info)
return;
- destroy_flush_cmd_control(sbi);
+ destroy_flush_cmd_control(sbi, true);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
@@ -2459,7 +2694,7 @@ int __init create_segment_manager_caches(void)
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
- goto destory_discard_entry;
+ goto destroy_discard_entry;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages));
@@ -2469,7 +2704,7 @@ int __init create_segment_manager_caches(void)
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
-destory_discard_entry:
+destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab);
fail:
return -ENOMEM;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ee44d346ea44..9d44ce83acb2 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -16,6 +16,9 @@
#define NULL_SECNO ((unsigned int)(~0))
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
+
+#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
@@ -101,8 +104,6 @@
(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
#define SECTOR_TO_BLOCK(sectors) \
(sectors >> F2FS_LOG_SECTORS_PER_BLOCK)
-#define MAX_BIO_BLOCKS(sbi) \
- ((int)min((int)max_hw_blocks(sbi), BIO_MAX_PAGES))
/*
* indicate a block allocation direction: RIGHT and LEFT.
@@ -158,16 +159,17 @@ struct victim_sel_policy {
};
struct seg_entry {
- unsigned short valid_blocks; /* # of valid blocks */
+ unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
+ unsigned int valid_blocks:10; /* # of valid blocks */
+ unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
+ unsigned int padding:6; /* padding */
unsigned char *cur_valid_map; /* validity bitmap of blocks */
/*
* # of valid blocks and the validity bitmap stored in the the last
* checkpoint pack. This information is used by the SSR mode.
*/
- unsigned short ckpt_valid_blocks;
- unsigned char *ckpt_valid_map;
+ unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
unsigned char *discard_map;
- unsigned char type; /* segment type like CURSEG_XXX_TYPE */
unsigned long long mtime; /* modification time of the segment */
};
@@ -183,7 +185,7 @@ struct segment_allocation {
* this value is set in page as a private data which indicate that
* the page is atomically written, and it is in inmem_pages list.
*/
-#define ATOMIC_WRITTEN_PAGE 0x0000ffff
+#define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
#define IS_ATOMIC_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
@@ -191,6 +193,7 @@ struct segment_allocation {
struct inmem_pages {
struct list_head list;
struct page *page;
+ block_t old_addr; /* for revoking when fail to commit */
};
struct sit_info {
@@ -257,6 +260,8 @@ struct victim_selection {
struct curseg_info {
struct mutex curseg_mutex; /* lock for consistency */
struct f2fs_summary_block *sum_blk; /* cached summary block */
+ struct rw_semaphore journal_rwsem; /* protect journal area */
+ struct f2fs_journal *journal; /* cached journal info */
unsigned char alloc_type; /* current allocation type */
unsigned int segno; /* current segment number */
unsigned short next_blkoff; /* next block offset to write */
@@ -466,20 +471,28 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
- return free_sections(sbi) <= (node_secs + 2 * dent_secs +
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
+ if (test_opt(sbi, LFS))
+ return false;
+
+ return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
reserved_sections(sbi) + 1);
}
-static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
+static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
+ int freed, int needed)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
- return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
- reserved_sections(sbi));
+ return (free_sections(sbi) + freed) <=
+ (node_secs + 2 * dent_secs + imeta_secs +
+ reserved_sections(sbi) + needed);
}
static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
@@ -527,6 +540,9 @@ static inline bool need_inplace_update(struct inode *inode)
if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
return false;
+ if (test_opt(sbi, LFS))
+ return false;
+
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
@@ -540,7 +556,7 @@ static inline bool need_inplace_update(struct inode *inode)
/* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
+ is_inode_flag_set(inode, FI_NEED_IPU))
return true;
return false;
@@ -573,8 +589,8 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
{
- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
+ || blk_addr >= MAX_BLKADDR(sbi));
}
/*
@@ -680,13 +696,6 @@ static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
return false;
}
-static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
-{
- struct block_device *bdev = sbi->sb->s_bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- return SECTOR_TO_BLOCK(queue_max_sectors(q));
-}
-
/*
* It is very important to gather dirty pages and write at once, so that we can
* submit a big bio without interfering other data writes.
@@ -702,9 +711,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
if (type == DATA)
return sbi->blocks_per_seg;
else if (type == NODE)
- return 3 * sbi->blocks_per_seg;
+ return 8 * sbi->blocks_per_seg;
else if (type == META)
- return MAX_BIO_BLOCKS(sbi);
+ return 8 * BIO_MAX_PAGES;
else
return 0;
}
@@ -721,13 +730,9 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
return 0;
nr_to_write = wbc->nr_to_write;
-
- if (type == DATA)
- desired = 4096;
- else if (type == NODE)
- desired = 3 * max_hw_blocks(sbi);
- else
- desired = MAX_BIO_BLOCKS(sbi);
+ desired = BIO_MAX_PAGES;
+ if (type == NODE)
+ desired <<= 1;
wbc->nr_to_write = desired;
return desired - nr_to_write;
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index da0d8e0b55a5..5c60fc28ec75 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -13,6 +13,7 @@
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock);
@@ -20,19 +21,22 @@ static unsigned int shrinker_run_no;
static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
{
- return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+ long count = NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
- return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
- return 0;
+ long count = NM_I(sbi)->nid_cnt[FREE_NID_LIST] - MAX_FREE_NIDS;
+
+ return count > 0 ? count : 0;
}
static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
- return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+ return atomic_read(&sbi->total_zombie_tree) +
+ atomic_read(&sbi->total_ext_node);
}
unsigned long f2fs_shrink_count(struct shrinker *shrink,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5d3e745d33ae..e6d8d011786c 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -39,6 +39,35 @@ static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep;
static struct kset *f2fs_kset;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+
+char *fault_name[FAULT_MAX] = {
+ [FAULT_KMALLOC] = "kmalloc",
+ [FAULT_PAGE_ALLOC] = "page alloc",
+ [FAULT_ALLOC_NID] = "alloc nid",
+ [FAULT_ORPHAN] = "orphan",
+ [FAULT_BLOCK] = "no more block",
+ [FAULT_DIR_DEPTH] = "too big dir depth",
+ [FAULT_EVICT_INODE] = "evict_inode fail",
+ [FAULT_IO] = "IO error",
+ [FAULT_CHECKPOINT] = "checkpoint error",
+};
+
+static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
+ unsigned int rate)
+{
+ struct f2fs_fault_info *ffi = &sbi->fault_info;
+
+ if (rate) {
+ atomic_set(&ffi->inject_ops, 0);
+ ffi->inject_rate = rate;
+ ffi->inject_type = (1 << FAULT_MAX) - 1;
+ } else {
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
+ }
+}
+#endif
+
/* f2fs-wide shrinker description */
static struct shrinker f2fs_shrinker_info = {
.scan_objects = f2fs_shrink_scan,
@@ -51,6 +80,7 @@ enum {
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
+ Opt_nodiscard,
Opt_noheap,
Opt_user_xattr,
Opt_nouser_xattr,
@@ -61,12 +91,19 @@ enum {
Opt_inline_xattr,
Opt_inline_data,
Opt_inline_dentry,
+ Opt_noinline_dentry,
Opt_flush_merge,
+ Opt_noflush_merge,
Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
Opt_noextent_cache,
Opt_noinline_data,
+ Opt_data_flush,
+ Opt_mode,
+ Opt_fault_injection,
+ Opt_lazytime,
+ Opt_nolazytime,
Opt_err,
};
@@ -75,6 +112,7 @@ static match_table_t f2fs_tokens = {
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
@@ -85,12 +123,19 @@ static match_table_t f2fs_tokens = {
{Opt_inline_xattr, "inline_xattr"},
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
+ {Opt_noinline_dentry, "noinline_dentry"},
{Opt_flush_merge, "flush_merge"},
+ {Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
+ {Opt_data_flush, "data_flush"},
+ {Opt_mode, "mode=%s"},
+ {Opt_fault_injection, "fault_injection=%u"},
+ {Opt_lazytime, "lazytime"},
+ {Opt_nolazytime, "nolazytime"},
{Opt_err, NULL},
};
@@ -100,6 +145,10 @@ enum {
SM_INFO, /* struct f2fs_sm_info */
NM_INFO, /* struct f2fs_nm_info */
F2FS_SBI, /* struct f2fs_sb_info */
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ FAULT_INFO_RATE, /* struct f2fs_fault_info */
+ FAULT_INFO_TYPE, /* struct f2fs_fault_info */
+#endif
};
struct f2fs_attr {
@@ -121,9 +170,27 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
return (unsigned char *)NM_I(sbi);
else if (struct_type == F2FS_SBI)
return (unsigned char *)sbi;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ else if (struct_type == FAULT_INFO_RATE ||
+ struct_type == FAULT_INFO_TYPE)
+ return (unsigned char *)&sbi->fault_info;
+#endif
return NULL;
}
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct super_block *sb = sbi->sb;
+
+ if (!sb->s_bdev->bd_part)
+ return snprintf(buf, PAGE_SIZE, "0\n");
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (unsigned long long)(sbi->kbytes_written +
+ BD_PART_WRITTEN(sbi)));
+}
+
static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -157,6 +224,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret < 0)
return ret;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX))
+ return -EINVAL;
+#endif
*ui = t;
return count;
}
@@ -202,6 +273,9 @@ static struct f2fs_attr f2fs_attr_##_name = { \
f2fs_sbi_show, f2fs_sbi_store, \
offsetof(struct struct_name, elname))
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
@@ -214,9 +288,16 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
-F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, cp_interval);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
+F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
+#endif
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -234,7 +315,14 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(dir_level),
ATTR_LIST(ram_thresh),
ATTR_LIST(ra_nid_pages),
+ ATTR_LIST(dirty_nats_ratio),
ATTR_LIST(cp_interval),
+ ATTR_LIST(idle_interval),
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ ATTR_LIST(inject_rate),
+ ATTR_LIST(inject_type),
+#endif
+ ATTR_LIST(lifetime_write_kbytes),
NULL,
};
@@ -324,12 +412,20 @@ static int parse_options(struct super_block *sb, char *options)
q = bdev_get_queue(sb->s_bdev);
if (blk_queue_discard(q)) {
set_opt(sbi, DISCARD);
- } else {
+ } else if (!f2fs_sb_mounted_blkzoned(sb)) {
f2fs_msg(sb, KERN_WARNING,
"mounting with \"discard\" option, but "
"the device does not support discard");
}
break;
+ case Opt_nodiscard:
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "discard is required for zoned block devices");
+ return -EINVAL;
+ }
+ clear_opt(sbi, DISCARD);
+ break;
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
@@ -388,9 +484,15 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_inline_dentry:
set_opt(sbi, INLINE_DENTRY);
break;
+ case Opt_noinline_dentry:
+ clear_opt(sbi, INLINE_DENTRY);
+ break;
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
+ case Opt_noflush_merge:
+ clear_opt(sbi, FLUSH_MERGE);
+ break;
case Opt_nobarrier:
set_opt(sbi, NOBARRIER);
break;
@@ -406,6 +508,49 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_noinline_data:
clear_opt(sbi, INLINE_DATA);
break;
+ case Opt_data_flush:
+ set_opt(sbi, DATA_FLUSH);
+ break;
+ case Opt_mode:
+ name = match_strdup(&args[0]);
+
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 8 &&
+ !strncmp(name, "adaptive", 8)) {
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_WARNING,
+ "adaptive mode is not allowed with "
+ "zoned block device feature");
+ kfree(name);
+ return -EINVAL;
+ }
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "lfs", 3)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
+ case Opt_fault_injection:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, arg);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_lazytime:
+ sb->s_flags |= MS_LAZYTIME;
+ break;
+ case Opt_nolazytime:
+ sb->s_flags &= ~MS_LAZYTIME;
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -432,20 +577,15 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
fi->i_current_depth = 1;
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
+ INIT_LIST_HEAD(&fi->dirty_list);
+ INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
-
- set_inode_flag(fi, FI_NEW_INODE);
-
- if (test_opt(F2FS_SB(sb), INLINE_XATTR))
- set_inode_flag(fi, FI_INLINE_XATTR);
+ init_rwsem(&fi->dio_rwsem[READ]);
+ init_rwsem(&fi->dio_rwsem[WRITE]);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
-
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- fi->i_crypt_info = NULL;
-#endif
return &fi->vfs_inode;
}
@@ -458,7 +598,7 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
+ if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
@@ -466,32 +606,69 @@ static int f2fs_drop_inode(struct inode *inode)
/* some remained atomic pages should discarded */
if (f2fs_is_atomic_file(inode))
- commit_inmem_pages(inode, true);
+ drop_inmem_pages(inode);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
- i_size_write(inode, 0);
+ f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
- f2fs_truncate(inode, true);
+ f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
-#ifdef CONFIG_F2FS_FS_ENCRYPTION
- if (F2FS_I(inode)->i_crypt_info)
- f2fs_free_encryption_info(inode,
- F2FS_I(inode)->i_crypt_info);
-#endif
+ fscrypt_put_encryption_info(inode, NULL);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
return 0;
}
+
return generic_drop_inode(inode);
}
+int f2fs_inode_dirtied(struct inode *inode, bool sync)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ int ret = 0;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ ret = 1;
+ } else {
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
+ }
+ if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
+ &sbi->inode_list[DIRTY_META]);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return ret;
+}
+
+void f2fs_inode_synced(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return;
+ }
+ if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ }
+ clear_inode_flag(inode, FI_DIRTY_INODE);
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+}
+
/*
* f2fs_dirty_inode() is called from __mark_inode_dirty()
*
@@ -499,7 +676,19 @@ static int f2fs_drop_inode(struct inode *inode)
*/
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
- set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ return;
+
+ if (flags == I_DIRTY_TIME)
+ return;
+
+ if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+
+ f2fs_inode_dirtied(inode, false);
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -513,12 +702,32 @@ static void f2fs_destroy_inode(struct inode *inode)
call_rcu(&inode->i_rcu, f2fs_i_callback);
}
+static void destroy_percpu_info(struct f2fs_sb_info *sbi)
+{
+ percpu_counter_destroy(&sbi->alloc_valid_block_count);
+ percpu_counter_destroy(&sbi->total_valid_inode_count);
+}
+
+static void destroy_device_list(struct f2fs_sb_info *sbi)
+{
+ int i;
+
+ for (i = 0; i < sbi->s_ndevs; i++) {
+ blkdev_put(FDEV(i).bdev, FMODE_EXCL);
+#ifdef CONFIG_BLK_DEV_ZONED
+ kfree(FDEV(i).blkz_type);
+#endif
+ }
+ kfree(sbi->devs);
+}
+
static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
@@ -534,7 +743,7 @@ static void f2fs_put_super(struct super_block *sb)
* clean checkpoint again.
*/
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
- !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
@@ -548,12 +757,14 @@ static void f2fs_put_super(struct super_block *sb)
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
*/
- release_dirty_inode(sbi);
- release_discard_addrs(sbi);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
+ /* our cp_error case, we can wait for any writeback page */
+ f2fs_flush_merged_bios(sbi);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -566,13 +777,20 @@ static void f2fs_put_super(struct super_block *sb)
wait_for_completion(&sbi->s_kobj_unregister);
sb->s_fs_info = NULL;
- brelse(sbi->raw_super_buf);
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
+ kfree(sbi->raw_super);
+
+ destroy_device_list(sbi);
+
+ destroy_percpu_info(sbi);
kfree(sbi);
}
int f2fs_sync_fs(struct super_block *sb, int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int err = 0;
trace_f2fs_sync_fs(sb, sync);
@@ -582,25 +800,27 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
cpc.reason = __get_cp_reason(sbi);
mutex_lock(&sbi->gc_mutex);
- write_checkpoint(sbi, &cpc);
+ err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
- } else {
- f2fs_balance_fs(sbi);
}
f2fs_trace_ios(NULL, 1);
- return 0;
+ return err;
}
static int f2fs_freeze(struct super_block *sb)
{
- int err;
-
if (f2fs_readonly(sb))
return 0;
- err = f2fs_sync_fs(sb, 1);
- return err;
+ /* IO error happened before */
+ if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
+ return -EIO;
+
+ /* must be clean, since sync_filesystem() was already called */
+ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
+ return -EINVAL;
+ return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
@@ -623,11 +843,12 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
- buf->f_ffree = buf->f_files - valid_inode_count(sbi);
+ buf->f_ffree = min(buf->f_files - valid_node_count(sbi),
+ buf->f_bavail);
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid.val[0] = (u32)id;
@@ -676,6 +897,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq, ",inline_dentry");
+ else
+ seq_puts(seq, ",noinline_dentry");
if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
seq_puts(seq, ",flush_merge");
if (test_opt(sbi, NOBARRIER))
@@ -686,6 +909,14 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",extent_cache");
else
seq_puts(seq, ",noextent_cache");
+ if (test_opt(sbi, DATA_FLUSH))
+ seq_puts(seq, ",data_flush");
+
+ seq_puts(seq, ",mode=");
+ if (test_opt(sbi, ADAPTIVE))
+ seq_puts(seq, "adaptive");
+ else if (test_opt(sbi, LFS))
+ seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
return 0;
@@ -718,19 +949,47 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
return 0;
}
-static int segment_info_open_fs(struct inode *inode, struct file *file)
+static int segment_bits_seq_show(struct seq_file *seq, void *offset)
{
- return single_open(file, segment_info_seq_show, PDE_DATA(inode));
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned int total_segs =
+ le32_to_cpu(sbi->raw_super->segment_count_main);
+ int i, j;
+
+ seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n"
+ "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
+
+ for (i = 0; i < total_segs; i++) {
+ struct seg_entry *se = get_seg_entry(sbi, i);
+
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d|%-3u|", se->type,
+ get_valid_blocks(sbi, i, 1));
+ for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
+ seq_printf(seq, " %.2x", se->cur_valid_map[j]);
+ seq_putc(seq, '\n');
+ }
+ return 0;
}
-static const struct file_operations f2fs_seq_segment_info_fops = {
- .owner = THIS_MODULE,
- .open = segment_info_open_fs,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+#define F2FS_PROC_FILE_DEF(_name) \
+static int _name##_open_fs(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
+} \
+ \
+static const struct file_operations f2fs_seq_##_name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = _name##_open_fs, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
};
+F2FS_PROC_FILE_DEF(segment_info);
+F2FS_PROC_FILE_DEF(segment_bits);
+
static void default_options(struct f2fs_sb_info *sbi)
{
/* init some FS parameters */
@@ -738,7 +997,16 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA);
+ set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
+ sbi->sb->s_flags |= MS_LAZYTIME;
+ set_opt(sbi, FLUSH_MERGE);
+ if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ set_opt(sbi, DISCARD);
+ } else {
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -746,6 +1014,10 @@ static void default_options(struct f2fs_sb_info *sbi)
#ifdef CONFIG_F2FS_FS_POSIX_ACL
set_opt(sbi, POSIX_ACL);
#endif
+
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0);
+#endif
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
@@ -756,8 +1028,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
bool need_restart_gc = false;
bool need_stop_gc = false;
bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
-
- sync_filesystem(sb);
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ struct f2fs_fault_info ffi = sbi->fault_info;
+#endif
/*
* Save the old mount options in case we
@@ -766,6 +1039,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
org_mount_opt = sbi->mount_opt;
active_logs = sbi->active_logs;
+ /* recover superblocks we couldn't write due to previous RO mount */
+ if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ err = f2fs_commit_super(sbi, false);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover all the superblocks, ret: %d", err);
+ if (!err)
+ clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ }
+
sbi->mount_opt.opt = 0;
default_options(sbi);
@@ -797,7 +1079,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
- f2fs_sync_fs(sb, 1);
need_restart_gc = true;
}
} else if (!sbi->gc_thread) {
@@ -807,21 +1088,33 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
need_stop_gc = true;
}
+ if (*flags & MS_RDONLY) {
+ writeback_inodes_sb(sb, WB_REASON_SYNC);
+ sync_inodes_sb(sb);
+
+ set_sbi_flag(sbi, SBI_IS_DIRTY);
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_sync_fs(sb, 1);
+ clear_sbi_flag(sbi, SBI_IS_CLOSE);
+ }
+
/*
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
- destroy_flush_cmd_control(sbi);
- } else if (!SM_I(sbi)->cmd_control_info) {
+ clear_opt(sbi, FLUSH_MERGE);
+ destroy_flush_cmd_control(sbi, false);
+ } else {
err = create_flush_cmd_control(sbi);
if (err)
goto restore_gc;
}
skip:
/* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
(test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+
return 0;
restore_gc:
if (need_restart_gc) {
@@ -834,6 +1127,9 @@ restore_gc:
restore_opts:
sbi->mount_opt = org_mount_opt;
sbi->active_logs = active_logs;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ sbi->fault_info = ffi;
+#endif
return err;
}
@@ -853,6 +1149,48 @@ static struct super_operations f2fs_sops = {
.remount_fs = f2fs_remount,
};
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, NULL);
+}
+
+static int f2fs_key_prefix(struct inode *inode, u8 **key)
+{
+ *key = F2FS_I_SB(inode)->key_prefix;
+ return F2FS_I_SB(inode)->key_prefix_size;
+}
+
+static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data)
+{
+ return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
+ F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
+ ctx, len, fs_data, XATTR_CREATE);
+}
+
+static unsigned f2fs_max_namelen(struct inode *inode)
+{
+ return S_ISLNK(inode->i_mode) ?
+ inode->i_sb->s_blocksize : F2FS_NAME_LEN;
+}
+
+static struct fscrypt_operations f2fs_cryptops = {
+ .get_context = f2fs_get_context,
+ .key_prefix = f2fs_key_prefix,
+ .set_context = f2fs_set_context,
+ .is_encrypted = f2fs_encrypted_inode,
+ .empty_dir = f2fs_empty_dir,
+ .max_namelen = f2fs_max_namelen,
+};
+#else
+static struct fscrypt_operations f2fs_cryptops = {
+ .is_encrypted = f2fs_encrypted_inode,
+};
+#endif
+
static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
@@ -898,7 +1236,7 @@ static const struct export_operations f2fs_export_ops = {
.get_parent = f2fs_get_parent,
};
-loff_t max_file_size(unsigned bits)
+static loff_t max_file_blocks(void)
{
loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
loff_t leaf_count = ADDRS_PER_BLOCK;
@@ -914,13 +1252,29 @@ loff_t max_file_size(unsigned bits)
leaf_count *= NIDS_PER_BLOCK;
result += leaf_count;
- result <<= bits;
return result;
}
-static inline bool sanity_check_area_boundary(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int __f2fs_commit_super(struct buffer_head *bh,
+ struct f2fs_super_block *super)
{
+ lock_buffer(bh);
+ if (super)
+ memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
+ set_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ unlock_buffer(bh);
+
+ /* it's rare case, we can do fua all the time */
+ return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+}
+
+static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
+{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
@@ -934,6 +1288,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
u32 segment_count = le32_to_cpu(raw_super->segment_count);
u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ u64 main_end_blkaddr = main_blkaddr +
+ (segment_count_main << log_blocks_per_seg);
+ u64 seg_end_blkaddr = segment0_blkaddr +
+ (segment_count << log_blocks_per_seg);
if (segment0_blkaddr != cp_blkaddr) {
f2fs_msg(sb, KERN_INFO,
@@ -978,22 +1336,47 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
return true;
}
- if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
- segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+ if (main_end_blkaddr > seg_end_blkaddr) {
f2fs_msg(sb, KERN_INFO,
- "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+ "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
main_blkaddr,
- segment0_blkaddr + (segment_count << log_blocks_per_seg),
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
segment_count_main << log_blocks_per_seg);
return true;
- }
+ } else if (main_end_blkaddr < seg_end_blkaddr) {
+ int err = 0;
+ char *res;
+
+ /* fix in-memory information all the time */
+ raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
+ segment0_blkaddr) >> log_blocks_per_seg);
+ if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ res = "internally";
+ } else {
+ err = __f2fs_commit_super(bh, NULL);
+ res = err ? "failed" : "done";
+ }
+ f2fs_msg(sb, KERN_INFO,
+ "Fix alignment : %s, start(%u) end(%u) block(%u)",
+ res, main_blkaddr,
+ segment0_blkaddr +
+ (segment_count << log_blocks_per_seg),
+ segment_count_main << log_blocks_per_seg);
+ if (err)
+ return true;
+ }
return false;
}
-static int sanity_check_raw_super(struct super_block *sb,
- struct f2fs_super_block *raw_super)
+static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
+ struct buffer_head *bh)
{
+ struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
+ (bh->b_data + F2FS_SUPER_OFFSET);
+ struct super_block *sb = sbi->sb;
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1004,10 +1387,10 @@ static int sanity_check_raw_super(struct super_block *sb,
}
/* Currently, support only 4KB page cache size */
- if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
+ if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_msg(sb, KERN_INFO,
"Invalid page_cache_size (%lu), supports only 4KB\n",
- PAGE_CACHE_SIZE);
+ PAGE_SIZE);
return 1;
}
@@ -1059,27 +1442,19 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
- if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
- f2fs_msg(sb, KERN_INFO,
- "Invalid segment count (%u)",
- le32_to_cpu(raw_super->segment_count));
- return 1;
- }
-
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
- if (sanity_check_area_boundary(sb, raw_super))
+ if (sanity_check_area_boundary(sbi, bh))
return 1;
return 0;
}
-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned int main_segs, blocks_per_seg;
- int i;
+ unsigned int ovp_segments, reserved_segments;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
@@ -1091,20 +1466,14 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
if (unlikely(fsmeta >= total))
return 1;
- main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
- blocks_per_seg = sbi->blocks_per_seg;
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
- for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
- if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
- return 1;
- }
- }
- for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
- if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
- return 1;
- }
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
@@ -1137,129 +1506,282 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->cur_victim_sec = NULL_SECNO;
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
- for (i = 0; i < NR_COUNT_TYPE; i++)
- atomic_set(&sbi->nr_pages[i], 0);
-
sbi->dir_level = DEF_DIR_LEVEL;
- sbi->cp_interval = DEF_CP_INTERVAL;
+ sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
+ sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
clear_sbi_flag(sbi, SBI_NEED_FSCK);
+ for (i = 0; i < NR_COUNT_TYPE; i++)
+ atomic_set(&sbi->nr_pages[i], 0);
+
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
+ mutex_init(&sbi->wio_mutex[NODE]);
+ mutex_init(&sbi->wio_mutex[DATA]);
+ spin_lock_init(&sbi->cp_lock);
+
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
+ F2FS_KEY_DESC_PREFIX_SIZE);
+ sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE;
+#endif
}
+static int init_percpu_info(struct f2fs_sb_info *sbi)
+{
+ int err;
+
+ err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
+ if (err)
+ return err;
+
+ return percpu_counter_init(&sbi->total_valid_inode_count, 0,
+ GFP_KERNEL);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
+{
+ struct block_device *bdev = FDEV(devi).bdev;
+ sector_t nr_sectors = bdev->bd_part->nr_sects;
+ sector_t sector = 0;
+ struct blk_zone *zones;
+ unsigned int i, nr_zones;
+ unsigned int n = 0;
+ int err = -EIO;
+
+ if (!f2fs_sb_mounted_blkzoned(sbi->sb))
+ return 0;
+
+ if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
+ SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+ return -EINVAL;
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+ if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
+ __ilog2_u32(sbi->blocks_per_blkz))
+ return -EINVAL;
+ sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
+ FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
+ sbi->log_blocks_per_blkz;
+ if (nr_sectors & (bdev_zone_size(bdev) - 1))
+ FDEV(devi).nr_blkz++;
+
+ FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
+ if (!FDEV(devi).blkz_type)
+ return -ENOMEM;
+
+#define F2FS_REPORT_NR_ZONES 4096
+
+ zones = kcalloc(F2FS_REPORT_NR_ZONES, sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zones)
+ return -ENOMEM;
+
+ /* Get block zones type */
+ while (zones && sector < nr_sectors) {
+
+ nr_zones = F2FS_REPORT_NR_ZONES;
+ err = blkdev_report_zones(bdev, sector,
+ zones, &nr_zones,
+ GFP_KERNEL);
+ if (err)
+ break;
+ if (!nr_zones) {
+ err = -EIO;
+ break;
+ }
+
+ for (i = 0; i < nr_zones; i++) {
+ FDEV(devi).blkz_type[n] = zones[i].type;
+ sector += zones[i].len;
+ n++;
+ }
+ }
+
+ kfree(zones);
+
+ return err;
+}
+#endif
+
/*
* Read f2fs raw super block.
- * Because we have two copies of super block, so read the first one at first,
- * if the first one is invalid, move to read the second one.
+ * Because we have two copies of super block, so read both of them
+ * to get the first valid one. If any one of them is broken, we pass
+ * them recovery flag back to the caller.
*/
-static int read_raw_super_block(struct super_block *sb,
+static int read_raw_super_block(struct f2fs_sb_info *sbi,
struct f2fs_super_block **raw_super,
- struct buffer_head **raw_super_buf,
- int *recovery)
+ int *valid_super_block, int *recovery)
{
- int block = 0;
- struct buffer_head *buffer;
+ struct super_block *sb = sbi->sb;
+ int block;
+ struct buffer_head *bh;
struct f2fs_super_block *super;
int err = 0;
-retry:
- buffer = sb_bread(sb, block);
- if (!buffer) {
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+ super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
+ if (!super)
+ return -ENOMEM;
+
+ for (block = 0; block < 2; block++) {
+ bh = sb_bread(sb, block);
+ if (!bh) {
+ f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
err = -EIO;
- goto out;
+ continue;
}
- }
-
- super = (struct f2fs_super_block *)
- ((char *)(buffer)->b_data + F2FS_SUPER_OFFSET);
- /* sanity checking of raw super */
- if (sanity_check_raw_super(sb, super)) {
- brelse(buffer);
- *recovery = 1;
- f2fs_msg(sb, KERN_ERR,
- "Can't find valid F2FS filesystem in %dth superblock",
- block + 1);
- if (block == 0) {
- block++;
- goto retry;
- } else {
+ /* sanity checking of raw super */
+ if (sanity_check_raw_super(sbi, bh)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Can't find valid F2FS filesystem in %dth superblock",
+ block + 1);
err = -EINVAL;
- goto out;
+ brelse(bh);
+ continue;
}
- }
- if (!*raw_super) {
- *raw_super_buf = buffer;
- *raw_super = super;
- } else {
- /* already have a valid superblock */
- brelse(buffer);
+ if (!*raw_super) {
+ memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
+ sizeof(*super));
+ *valid_super_block = block;
+ *raw_super = super;
+ }
+ brelse(bh);
}
- /* check the validity of the second superblock */
- if (block == 0) {
- block++;
- goto retry;
- }
+ /* Fail to read any one of the superblocks*/
+ if (err < 0)
+ *recovery = 1;
-out:
/* No valid superblock */
if (!*raw_super)
- return err;
+ kfree(super);
+ else
+ err = 0;
- return 0;
+ return err;
}
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
{
- struct buffer_head *sbh = sbi->raw_super_buf;
- sector_t block = sbh->b_blocknr;
+ struct buffer_head *bh;
int err;
- /* write back-up superblock first */
- sbh->b_blocknr = block ? 0 : 1;
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
+ if ((recover && f2fs_readonly(sbi->sb)) ||
+ bdev_read_only(sbi->sb->s_bdev)) {
+ set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
+ return -EROFS;
+ }
- sbh->b_blocknr = block;
+ /* write back-up superblock first */
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
/* if we are in recovery path, skip writing valid superblock */
if (recover || err)
- goto out;
+ return err;
/* write current valid superblock */
- mark_buffer_dirty(sbh);
- err = sync_dirty_buffer(sbh);
-out:
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
+ bh = sb_getblk(sbi->sb, sbi->valid_super_block);
+ if (!bh)
+ return -EIO;
+ err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
+ brelse(bh);
return err;
}
+static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
+{
+ struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ int i;
+
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (!RDEV(i).path[0])
+ return 0;
+
+ if (i == 0) {
+ sbi->devs = kzalloc(sizeof(struct f2fs_dev_info) *
+ MAX_DEVICES, GFP_KERNEL);
+ if (!sbi->devs)
+ return -ENOMEM;
+ }
+
+ memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
+ FDEV(i).total_segments = le32_to_cpu(RDEV(i).total_segments);
+ if (i == 0) {
+ FDEV(i).start_blk = 0;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1 +
+ le32_to_cpu(raw_super->segment0_blkaddr);
+ } else {
+ FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
+ FDEV(i).end_blk = FDEV(i).start_blk +
+ (FDEV(i).total_segments <<
+ sbi->log_blocks_per_seg) - 1;
+ }
+
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ sbi->sb->s_mode, sbi->sb->s_type);
+ if (IS_ERR(FDEV(i).bdev))
+ return PTR_ERR(FDEV(i).bdev);
+
+ /* to release errored devices */
+ sbi->s_ndevs = i + 1;
+
+#ifdef CONFIG_BLK_DEV_ZONED
+ if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
+ !f2fs_sb_mounted_blkzoned(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Zoned block device feature not enabled\n");
+ return -EINVAL;
+ }
+ if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
+ if (init_blkz_info(sbi, i)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Failed to initialize F2FS blkzone information");
+ return -EINVAL;
+ }
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk,
+ bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
+ "Host-aware" : "Host-managed");
+ continue;
+ }
+#endif
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Mount Device [%2d]: %20s, %8u, %8x - %8x",
+ i, FDEV(i).path,
+ FDEV(i).total_segments,
+ FDEV(i).start_blk, FDEV(i).end_blk);
+ }
+ return 0;
+}
+
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
{
struct f2fs_sb_info *sbi;
struct f2fs_super_block *raw_super;
- struct buffer_head *raw_super_buf;
struct inode *root;
- long err;
+ int err;
bool retry = true, need_fsck = false;
char *options = NULL;
- int recovery, i;
+ int recovery, i, valid_super_block;
+ struct curseg_info *seg_i;
try_onemore:
err = -EINVAL;
raw_super = NULL;
- raw_super_buf = NULL;
+ valid_super_block = -1;
recovery = 0;
/* allocate memory for f2fs-specific super block info */
@@ -1267,17 +1789,43 @@ try_onemore:
if (!sbi)
return -ENOMEM;
+ sbi->sb = sb;
+
+ /* Load the checksum driver */
+ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
+ if (IS_ERR(sbi->s_chksum_driver)) {
+ f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver.");
+ err = PTR_ERR(sbi->s_chksum_driver);
+ sbi->s_chksum_driver = NULL;
+ goto free_sbi;
+ }
+
/* set a block size */
if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
goto free_sbi;
}
- err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery);
+ err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
+ &recovery);
if (err)
goto free_sbi;
sb->s_fs_info = sbi;
+ sbi->raw_super = raw_super;
+
+ /*
+ * The BLKZONED feature indicates that the drive was formatted with
+ * zone alignment optimization. This is optional for host-aware
+ * devices, but mandatory for host-managed zoned block devices.
+ */
+#ifndef CONFIG_BLK_DEV_ZONED
+ if (f2fs_sb_mounted_blkzoned(sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Zoned block device support is not enabled\n");
+ goto free_sb_buf;
+ }
+#endif
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1290,11 +1838,14 @@ try_onemore:
if (err)
goto free_options;
- sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
+ sbi->max_file_blocks = max_file_blocks();
+ sb->s_maxbytes = sbi->max_file_blocks <<
+ le32_to_cpu(raw_super->log_blocksize);
sb->s_max_links = F2FS_LINK_MAX;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
sb->s_op = &f2fs_sops;
+ sb->s_cop = &f2fs_cryptops;
sb->s_xattr = f2fs_xattr_handlers;
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
@@ -1304,11 +1855,8 @@ try_onemore:
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
- sbi->sb = sb;
- sbi->raw_super = raw_super;
- sbi->raw_super_buf = raw_super_buf;
+ sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
- mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
@@ -1329,6 +1877,10 @@ try_onemore:
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
+ err = init_percpu_info(sbi);
+ if (err)
+ goto free_options;
+
/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
@@ -1343,24 +1895,26 @@ try_onemore:
goto free_meta_inode;
}
- /* sanity checking of checkpoint */
- err = -EINVAL;
- if (sanity_check_ckpt(sbi)) {
- f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
- goto free_cp;
+ /* Initialize device list */
+ err = f2fs_scan_devices(sbi);
+ if (err) {
+ f2fs_msg(sb, KERN_ERR, "Failed to find devices");
+ goto free_devices;
}
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
- sbi->total_valid_inode_count =
- le32_to_cpu(sbi->ckpt->valid_inode_count);
+ percpu_counter_set(&sbi->total_valid_inode_count,
+ le32_to_cpu(sbi->ckpt->valid_inode_count));
sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
sbi->total_valid_block_count =
le64_to_cpu(sbi->ckpt->valid_block_count);
sbi->last_valid_block_count = sbi->total_valid_block_count;
- sbi->alloc_valid_block_count = 0;
- INIT_LIST_HEAD(&sbi->dir_inode_list);
- spin_lock_init(&sbi->dir_inode_lock);
+
+ for (i = 0; i < NR_INODE_TYPE; i++) {
+ INIT_LIST_HEAD(&sbi->inode_list[i]);
+ spin_lock_init(&sbi->inode_lock[i]);
+ }
init_extent_cache_info(sbi);
@@ -1380,6 +1934,17 @@ try_onemore:
goto free_nm;
}
+ /* For write statistics */
+ if (sb->s_bdev->bd_part)
+ sbi->sectors_written_start =
+ (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+ /* Read accumulated write IO statistics if exists */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+ if (__exist_node_summaries(sbi))
+ sbi->kbytes_written =
+ le64_to_cpu(seg_i->journal->info.kbytes_written);
+
build_gc_manager(sbi);
/* get an inode for node space */
@@ -1423,9 +1988,12 @@ try_onemore:
if (f2fs_proc_root)
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
- if (sbi->s_proc)
+ if (sbi->s_proc) {
proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
&f2fs_seq_segment_info_fops, sb);
+ proc_create_data("segment_bits", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_segment_bits_fops, sb);
+ }
sbi->s_kobj.kset = f2fs_kset;
init_completion(&sbi->s_kobj_unregister);
@@ -1441,7 +2009,7 @@ try_onemore:
* previous checkpoint was not done by clean system shutdown.
*/
if (bdev_read_only(sb->s_bdev) &&
- !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) {
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
err = -EROFS;
goto free_kobj;
}
@@ -1449,14 +2017,27 @@ try_onemore:
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = recover_fsync_data(sbi);
- if (err) {
+ if (!retry)
+ goto skip_recovery;
+
+ err = recover_fsync_data(sbi, false);
+ if (err < 0) {
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
- "Cannot recover all fsync data errno=%ld", err);
+ "Cannot recover all fsync data errno=%d", err);
+ goto free_kobj;
+ }
+ } else {
+ err = recover_fsync_data(sbi, true);
+
+ if (!f2fs_readonly(sb) && err > 0) {
+ err = -EINVAL;
+ f2fs_msg(sb, KERN_ERR,
+ "Need to recover fsync data");
goto free_kobj;
}
}
+skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -1473,20 +2054,26 @@ try_onemore:
kfree(options);
/* recover broken superblock */
- if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) {
- f2fs_msg(sb, KERN_INFO, "Recover invalid superblock");
- f2fs_commit_super(sbi, true);
+ if (recovery) {
+ err = f2fs_commit_super(sbi, true);
+ f2fs_msg(sb, KERN_INFO,
+ "Try to recover %dth superblock, ret: %d",
+ sbi->valid_super_block ? 1 : 2, err);
}
- sbi->cp_expires = round_jiffies_up(jiffies);
-
+ f2fs_update_time(sbi, CP_TIME);
+ f2fs_update_time(sbi, REQ_TIME);
return 0;
free_kobj:
+ f2fs_sync_inode_meta(sbi);
kobject_del(&sbi->s_kobj);
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
free_proc:
if (sbi->s_proc) {
remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry("segment_bits", sbi->s_proc);
remove_proc_entry(sb->s_id, f2fs_proc_root);
}
f2fs_destroy_stats(sbi);
@@ -1494,24 +2081,37 @@ free_root_inode:
dput(sb->s_root);
sb->s_root = NULL;
free_node_inode:
+ truncate_inode_pages_final(NODE_MAPPING(sbi));
mutex_lock(&sbi->umount_mutex);
+ release_ino_entry(sbi, true);
f2fs_leave_shrinker(sbi);
+ /*
+ * Some dirty meta pages can be produced by recover_orphan_inodes()
+ * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
+ * followed by write_checkpoint() through f2fs_write_node_pages(), which
+ * falls into an infinite loop in sync_meta_pages().
+ */
+ truncate_inode_pages_final(META_MAPPING(sbi));
iput(sbi->node_inode);
mutex_unlock(&sbi->umount_mutex);
free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
-free_cp:
+free_devices:
+ destroy_device_list(sbi);
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
free_options:
+ destroy_percpu_info(sbi);
kfree(options);
free_sb_buf:
- brelse(raw_super_buf);
+ kfree(raw_super);
free_sbi:
+ if (sbi->s_chksum_driver)
+ crypto_free_shash(sbi->s_chksum_driver);
kfree(sbi);
/* give only one another chance */
@@ -1547,8 +2147,9 @@ MODULE_ALIAS_FS("f2fs");
static int __init init_inodecache(void)
{
- f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
- sizeof(struct f2fs_inode_info));
+ f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
+ sizeof(struct f2fs_inode_info), 0,
+ SLAB_RECLAIM_ACCOUNT, NULL);
if (!f2fs_inode_cachep)
return -ENOMEM;
return 0;
@@ -1590,25 +2191,23 @@ static int __init init_f2fs_fs(void)
err = -ENOMEM;
goto free_extent_cache;
}
- err = f2fs_init_crypto();
- if (err)
- goto free_kset;
-
err = register_shrinker(&f2fs_shrinker_info);
if (err)
- goto free_crypto;
+ goto free_kset;
err = register_filesystem(&f2fs_fs_type);
if (err)
goto free_shrinker;
- f2fs_create_root_stats();
+ err = f2fs_create_root_stats();
+ if (err)
+ goto free_filesystem;
f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
return 0;
+free_filesystem:
+ unregister_filesystem(&f2fs_fs_type);
free_shrinker:
unregister_shrinker(&f2fs_shrinker_info);
-free_crypto:
- f2fs_exit_crypto();
free_kset:
kset_unregister(f2fs_kset);
free_extent_cache:
@@ -1629,15 +2228,14 @@ static void __exit exit_f2fs_fs(void)
{
remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
- unregister_shrinker(&f2fs_shrinker_info);
unregister_filesystem(&f2fs_fs_type);
- f2fs_exit_crypto();
+ unregister_shrinker(&f2fs_shrinker_info);
+ kset_unregister(f2fs_kset);
destroy_extent_cache();
destroy_checkpoint_caches();
destroy_segment_manager_caches();
destroy_node_manager_caches();
destroy_inodecache();
- kset_unregister(f2fs_kset);
f2fs_destroy_trace_ios();
}
@@ -1647,3 +2245,4 @@ module_exit(exit_f2fs_fs)
MODULE_AUTHOR("Samsung Electronics's Praesto Team");
MODULE_DESCRIPTION("Flash Friendly File System");
MODULE_LICENSE("GPL");
+
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index 145fb659ad44..562ce0821559 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -29,7 +29,8 @@ static inline void __print_last_io(void)
last_io.major, last_io.minor,
last_io.pid, "----------------",
last_io.type,
- last_io.fio.rw, last_io.fio.blk_addr,
+ last_io.fio.rw,
+ last_io.fio.new_blkaddr,
last_io.len);
memset(&last_io, 0, sizeof(last_io));
}
@@ -101,7 +102,8 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
last_io.pid == pid &&
last_io.type == __file_type(inode, pid) &&
last_io.fio.rw == fio->rw &&
- last_io.fio.blk_addr + last_io.len == fio->blk_addr) {
+ last_io.fio.new_blkaddr + last_io.len ==
+ fio->new_blkaddr) {
last_io.len++;
return;
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 862368a32e53..1c4d5e39586c 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -151,7 +151,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
@@ -264,18 +264,20 @@ static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int index,
return entry;
}
-static void *read_all_xattrs(struct inode *inode, struct page *ipage)
+static int read_all_xattrs(struct inode *inode, struct page *ipage,
+ void **base_addr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_xattr_header *header;
size_t size = PAGE_SIZE, inline_size = 0;
void *txattr_addr;
+ int err;
inline_size = inline_xattr_size(inode);
txattr_addr = kzalloc(inline_size + size, GFP_F2FS_ZERO);
if (!txattr_addr)
- return NULL;
+ return -ENOMEM;
/* read from inline xattr */
if (inline_size) {
@@ -286,8 +288,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
inline_addr = inline_xattr_addr(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(page))
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
goto fail;
+ }
inline_addr = inline_xattr_addr(page);
}
memcpy(txattr_addr, inline_addr, inline_size);
@@ -301,8 +305,10 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
/* The inode already has an extended attribute block. */
xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
- if (IS_ERR(xpage))
+ if (IS_ERR(xpage)) {
+ err = PTR_ERR(xpage);
goto fail;
+ }
xattr_addr = page_address(xpage);
memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
@@ -316,10 +322,11 @@ static void *read_all_xattrs(struct inode *inode, struct page *ipage)
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
}
- return txattr_addr;
+ *base_addr = txattr_addr;
+ return 0;
fail:
kzfree(txattr_addr);
- return NULL;
+ return err;
}
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
@@ -345,7 +352,8 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
- f2fs_wait_on_page_writeback(ipage, NODE);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
+ set_page_dirty(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
@@ -353,7 +361,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
return PTR_ERR(page);
}
inline_addr = inline_xattr_addr(page);
- f2fs_wait_on_page_writeback(page, NODE);
+ f2fs_wait_on_page_writeback(page, NODE, true);
}
memcpy(inline_addr, txattr_addr, inline_size);
f2fs_put_page(page, 1);
@@ -374,7 +382,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
return PTR_ERR(xpage);
}
f2fs_bug_on(sbi, new_nid);
- f2fs_wait_on_page_writeback(xpage, NODE);
+ f2fs_wait_on_page_writeback(xpage, NODE, true);
} else {
struct dnode_of_data dn;
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
@@ -412,9 +420,9 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (len > F2FS_NAME_LEN)
return -ERANGE;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
entry = __find_xattr(base_addr, index, len, name);
if (IS_XATTR_LAST_ENTRY(entry)) {
@@ -448,9 +456,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
int error = 0;
size_t rest = buffer_size;
- base_addr = read_all_xattrs(inode, NULL);
- if (!base_addr)
- return -ENOMEM;
+ error = read_all_xattrs(inode, NULL, &base_addr);
+ if (error)
+ return error;
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
@@ -481,13 +489,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr;
int found, newsize;
size_t len;
__u32 new_hsize;
- int error = -ENOMEM;
+ int error = 0;
if (name == NULL)
return -EINVAL;
@@ -503,9 +510,9 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (size > MAX_VALUE_LEN(inode))
return -E2BIG;
- base_addr = read_all_xattrs(inode, ipage);
- if (!base_addr)
- goto exit;
+ error = read_all_xattrs(inode, ipage, &base_addr);
+ if (error)
+ return error;
/* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name);
@@ -538,7 +545,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
free = free + ENTRY_SIZE(here);
if (unlikely(free < newsize)) {
- error = -ENOSPC;
+ error = -E2BIG;
goto exit;
}
}
@@ -566,7 +573,6 @@ static int __f2fs_setxattr(struct inode *inode, int index,
* Before we come here, old entry is removed.
* We just write new entry.
*/
- memset(last, 0, newsize);
last->e_name_index = index;
last->e_name_len = len;
memcpy(last->e_name, name, len);
@@ -580,19 +586,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
goto exit;
- if (is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- inode->i_ctime = CURRENT_TIME;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ inode->i_ctime = current_time(inode);
+ clear_inode_flag(inode, FI_ACL_MODE);
}
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
-
- if (ipage)
- update_inode(inode, ipage);
- else
- update_inode_page(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ if (!error && S_ISDIR(inode->i_mode))
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
exit:
kzfree(base_addr);
return error;
@@ -609,7 +613,7 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
if (ipage)
return __f2fs_setxattr(inode, index, name, value,
size, ipage, flags);
- f2fs_balance_fs(sbi);
+ f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
/* protect xattr_ver */
@@ -618,5 +622,6 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
up_write(&F2FS_I(inode)->i_sem);
f2fs_unlock_op(sbi);
+ f2fs_update_time(sbi, REQ_TIME);
return err;
}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 71a7100d5492..d2fd0387a3c7 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -126,7 +126,8 @@ extern ssize_t f2fs_listxattr(struct dentry *, char *, size_t);
#define f2fs_xattr_handlers NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
- const char *name, const void *value, size_t size, int flags)
+ const char *name, const void *value, size_t size,
+ struct page *page, int flags)
{
return -EOPNOTSUPP;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index baab99b69d8a..de3e91817228 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1222,7 +1222,7 @@ static int set_gfs2_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index f32450c3e72c..adbe44dda88f 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -227,6 +227,7 @@ static struct mount *alloc_vfsmnt(const char *name)
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
+ mnt->mnt.data = NULL;
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
@@ -976,7 +977,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
if (!mnt)
return ERR_PTR(-ENOMEM);
- mnt->mnt.data = NULL;
if (type->alloc_mnt_data) {
mnt->mnt.data = type->alloc_mnt_data();
if (!mnt->mnt.data) {
@@ -990,7 +990,6 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
root = mount_fs(type, flags, name, &mnt->mnt, data);
if (IS_ERR(root)) {
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_CAST(root);
@@ -1094,7 +1093,6 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
return mnt;
out_free:
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9dea85f7f918..578350fd96e1 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -243,7 +243,6 @@ int nfs_iocounter_wait(struct nfs_io_counter *c);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 452a011ba0d8..8ebfdd00044b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -528,16 +528,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
@@ -546,14 +536,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -671,7 +671,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
u32 midx;
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
/* TODO: Make sure it's right to clean up all mirrors here
* and not just hdr->pgio_mirror_idx */
@@ -689,7 +688,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 3cae0726c1b1..7af7bedd7c02 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1943,7 +1943,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
@@ -2059,7 +2058,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c7f1ce41442a..9e5a6842346e 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1145,9 +1145,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
-
- lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@@ -1156,12 +1154,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
- struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
- spin_lock(&oo->oo_owner.so_client->cl_lock);
+ spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
- spin_unlock(&oo->oo_owner.so_client->cl_lock);
+ spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 354013ea22ec..67c6c650b21e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1079,7 +1079,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_max_links = NILFS_LINK_MAX;
- sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info;
+ sb->s_bdi = bdev_get_queue(sb->s_bdev)->backing_dev_info;
err = load_nilfs(nilfs, sb);
if (err)
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 85a60fb5ff39..e0b74dc77c5f 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -176,6 +176,9 @@ void fixup_lower_ownership(struct dentry *dentry, const char *name)
gid_t gid = sbi->options.fs_low_gid;
struct iattr newattrs;
+ if (!sbi->options.gid_derivation)
+ return;
+
info = SDCARDFS_I(d_inode(dentry));
info_d = info->data;
perm = info_d->perm;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 103dc45a131f..85eb8ebb5372 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -34,10 +34,14 @@ const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
if (!cred)
return NULL;
- if (data->under_obb)
- uid = AID_MEDIA_OBB;
- else
- uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ if (sbi->options.gid_derivation) {
+ if (data->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ } else {
+ uid = sbi->options.fs_low_uid;
+ }
cred->fsuid = make_kuid(&init_user_ns, uid);
cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 80825b287836..0a2b5167e9a2 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -32,6 +32,7 @@ enum {
Opt_multiuser,
Opt_userid,
Opt_reserved_mb,
+ Opt_gid_derivation,
Opt_err,
};
@@ -43,6 +44,7 @@ static const match_table_t sdcardfs_tokens = {
{Opt_mask, "mask=%u"},
{Opt_userid, "userid=%d"},
{Opt_multiuser, "multiuser"},
+ {Opt_gid_derivation, "derive_gid"},
{Opt_reserved_mb, "reserved_mb=%u"},
{Opt_err, NULL}
};
@@ -64,6 +66,8 @@ static int parse_options(struct super_block *sb, char *options, int silent,
vfsopts->gid = 0;
/* by default, 0MB is reserved */
opts->reserved_mb = 0;
+ /* by default, gid derivation is off */
+ opts->gid_derivation = false;
*debug = 0;
@@ -115,6 +119,9 @@ static int parse_options(struct super_block *sb, char *options, int silent,
return 0;
opts->reserved_mb = option;
break;
+ case Opt_gid_derivation:
+ opts->gid_derivation = true;
+ break;
/* unknown option */
default:
if (!silent)
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index b8624fd8b817..88b92b2f1872 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -219,6 +219,7 @@ struct sdcardfs_mount_options {
gid_t fs_low_gid;
userid_t fs_user_id;
bool multiuser;
+ bool gid_derivation;
unsigned int reserved_mb;
};
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 7f4539b4b249..b89947d878e3 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -302,6 +302,8 @@ static int sdcardfs_show_options(struct vfsmount *mnt, struct seq_file *m,
seq_printf(m, ",mask=%u", vfsopts->mask);
if (opts->fs_user_id)
seq_printf(m, ",userid=%u", opts->fs_user_id);
+ if (opts->gid_derivation)
+ seq_puts(m, ",derive_gid");
if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
diff --git a/fs/super.c b/fs/super.c
index c96434ea71e2..cbd4fab271d4 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -968,7 +968,7 @@ static int set_bdev_super(struct super_block *s, void *data)
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
- s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
+ s->s_bdi = bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239a0fa9..201aae0b2662 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -369,7 +369,14 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#endif /* DEBUG */
#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip) \
+ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
+ (ip)->i_mount->m_rtdev_targp)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
#endif
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2828f3..5d2add1a6c96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 140c29635069..c104d4aed62a 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -10,6 +10,7 @@
#include <linux/flex_proportions.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kref.h>
struct page;
struct device;
@@ -141,6 +142,7 @@ struct backing_dev_info {
void *congested_data; /* Pointer to aux data for congested func */
char *name;
+ struct kref refcnt; /* Reference counter for the structure */
unsigned int min_ratio;
unsigned int max_ratio, max_prop_frac;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 89d3de3e096b..125bc67319b4 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -18,7 +18,14 @@
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi);
-void bdi_exit(struct backing_dev_info *bdi);
+
+static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
+{
+ kref_get(&bdi->refcnt);
+ return bdi;
+}
+
+void bdi_put(struct backing_dev_info *bdi);
__printf(3, 4)
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
@@ -29,6 +36,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_destroy(struct backing_dev_info *bdi);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ae64a897622c..8150e164385c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -332,7 +332,7 @@ struct request_queue {
*/
struct delayed_work delay_work;
- struct backing_dev_info backing_dev_info;
+ struct backing_dev_info *backing_dev_info;
/*
* The queue owner gets to use this for whatever they like.
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5daa9e78584c..a998cf205cdc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -119,7 +119,15 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
- /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int up_transition_delay_us;
+ unsigned int down_transition_delay_us;
+
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 01448e01b40d..c066f6b56e58 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -229,6 +229,7 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
+#define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */
#define DCACHE_OP_REAL 0x08000000
extern seqlock_t rename_lock;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 3d6e6ce44c5c..3e5972ef5019 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -21,7 +21,7 @@
#define F2FS_BLKSIZE 4096 /* support only 4KB block */
#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
-#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
@@ -51,10 +51,18 @@
#define MAX_ACTIVE_DATA_LOGS 8
#define VERSION_LEN 256
+#define MAX_VOLUME_NAME 512
+#define MAX_PATH_LEN 64
+#define MAX_DEVICES 8
/*
* For superblock
*/
+struct f2fs_device {
+ __u8 path[MAX_PATH_LEN];
+ __le32 total_segments;
+} __packed;
+
struct f2fs_super_block {
__le32 magic; /* Magic Number */
__le16 major_ver; /* Major Version */
@@ -84,7 +92,7 @@ struct f2fs_super_block {
__le32 node_ino; /* node inode number */
__le32 meta_ino; /* meta inode number */
__u8 uuid[16]; /* 128-bit uuid for volume */
- __le16 volume_name[512]; /* volume name */
+ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */
__le32 extension_count; /* # of extensions below */
__u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
__le32 cp_payload;
@@ -93,12 +101,14 @@ struct f2fs_super_block {
__le32 feature; /* defined features */
__u8 encryption_level; /* versioning level for encryption */
__u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */
- __u8 reserved[871]; /* valid reserved region */
+ struct f2fs_device devs[MAX_DEVICES]; /* device list */
+ __u8 reserved[327]; /* valid reserved region */
} __packed;
/*
* For checkpoint
*/
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
@@ -169,12 +179,12 @@ struct f2fs_extent {
#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
-#define ADDRS_PER_PAGE(page, fi) \
- (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -261,7 +271,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -281,7 +291,7 @@ struct f2fs_nat_block {
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -350,7 +360,7 @@ struct f2fs_summary {
struct summary_footer {
unsigned char entry_type; /* SUM_TYPE_XXX */
- __u32 check_sum; /* summary checksum */
+ __le32 check_sum; /* summary checksum */
} __packed;
#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -363,6 +373,12 @@ struct summary_footer {
sizeof(struct sit_journal_entry))
#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED (SUM_JOURNAL_SIZE - 2 - 8)
+
/*
* frequently updated NAT/SIT entries can be stored in the spare area in
* summary blocks
@@ -392,18 +408,28 @@ struct sit_journal {
__u8 reserved[SIT_JOURNAL_RESERVED];
} __packed;
-/* 4KB-sized summary block structure */
-struct f2fs_summary_block {
- struct f2fs_summary entries[ENTRIES_IN_SUM];
+struct f2fs_extra_info {
+ __le64 kbytes_written;
+ __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
+struct f2fs_journal {
union {
__le16 n_nats;
__le16 n_sits;
};
- /* spare area is used by NAT or SIT journals */
+ /* spare area is used by NAT or SIT journals or extra info */
union {
struct nat_journal nat_j;
struct sit_journal sit_j;
+ struct f2fs_extra_info info;
};
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ struct f2fs_journal journal;
struct summary_footer footer;
} __packed;
@@ -497,4 +523,6 @@ enum {
F2FS_FT_MAX
};
+#define S_SHIFT 12
+
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index df1171bada01..d5264dcaaa26 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -52,6 +52,8 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
+struct fscrypt_info;
+struct fscrypt_operations;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -479,6 +481,7 @@ struct block_device {
int bd_invalidated;
struct gendisk * bd_disk;
struct request_queue * bd_queue;
+ struct backing_dev_info *bd_bdi;
struct list_head bd_list;
/*
* Private data. You must have bd_claim'ed the block_device
@@ -686,6 +689,9 @@ struct inode {
struct hlist_head i_fsnotify_marks;
#endif
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ struct fscrypt_info *i_crypt_info;
+#endif
void *i_private; /* fs or device private pointer */
};
@@ -1350,6 +1356,8 @@ struct super_block {
#endif
const struct xattr_handler **s_xattr;
+ const struct fscrypt_operations *s_cop;
+
struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
@@ -2324,6 +2332,7 @@ extern struct kmem_cache *names_cachep;
#ifdef CONFIG_BLOCK
extern int register_blkdev(unsigned int, const char *);
extern void unregister_blkdev(unsigned int, const char *);
+extern void bdev_unhash_inode(dev_t dev);
extern struct block_device *bdget(dev_t);
extern struct block_device *bdgrab(struct block_device *bdev);
extern void bd_set_size(struct block_device *, loff_t size);
diff --git a/include/linux/fscrypto.h b/include/linux/fscrypto.h
new file mode 100644
index 000000000000..ff8b11b26f31
--- /dev/null
+++ b/include/linux/fscrypto.h
@@ -0,0 +1,411 @@
+/*
+ * General per-file encryption definition
+ *
+ * Copyright (C) 2015, Google, Inc.
+ *
+ * Written by Michael Halcrow, 2015.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+
+#ifndef _LINUX_FSCRYPTO_H
+#define _LINUX_FSCRYPTO_H
+
+#include <linux/key.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/dcache.h>
+#include <crypto/skcipher.h>
+#include <uapi/linux/fs.h>
+
+#define FS_KEY_DERIVATION_NONCE_SIZE 16
+#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1
+
+#define FS_POLICY_FLAGS_PAD_4 0x00
+#define FS_POLICY_FLAGS_PAD_8 0x01
+#define FS_POLICY_FLAGS_PAD_16 0x02
+#define FS_POLICY_FLAGS_PAD_32 0x03
+#define FS_POLICY_FLAGS_PAD_MASK 0x03
+#define FS_POLICY_FLAGS_VALID 0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID 0
+#define FS_ENCRYPTION_MODE_AES_256_XTS 1
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3
+#define FS_ENCRYPTION_MODE_AES_256_CTS 4
+
+/**
+ * Encryption context for inode
+ *
+ * Protector format:
+ * 1 byte: Protector format (1 = this version)
+ * 1 byte: File contents encryption mode
+ * 1 byte: File names encryption mode
+ * 1 byte: Flags
+ * 8 bytes: Master Key descriptor
+ * 16 bytes: Encryption Key derivation nonce
+ */
+struct fscrypt_context {
+ u8 format;
+ u8 contents_encryption_mode;
+ u8 filenames_encryption_mode;
+ u8 flags;
+ u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+ u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+} __packed;
+
+/* Encryption parameters */
+#define FS_XTS_TWEAK_SIZE 16
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+#define FS_MAX_KEY_SIZE 64
+
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* This is passed in from userspace into the kernel keyring */
+struct fscrypt_key {
+ u32 mode;
+ u8 raw[FS_MAX_KEY_SIZE];
+ u32 size;
+} __packed;
+
+struct fscrypt_info {
+ u8 ci_data_mode;
+ u8 ci_filename_mode;
+ u8 ci_flags;
+ struct crypto_skcipher *ci_ctfm;
+ struct key *ci_keyring_key;
+ u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
+#define FS_WRITE_PATH_FL 0x00000002
+
+struct fscrypt_ctx {
+ union {
+ struct {
+ struct page *bounce_page; /* Ciphertext page */
+ struct page *control_page; /* Original page */
+ } w;
+ struct {
+ struct bio *bio;
+ struct work_struct work;
+ } r;
+ struct list_head free_list; /* Free list */
+ };
+ u8 flags; /* Flags */
+ u8 mode; /* Encryption mode for tfm */
+};
+
+struct fscrypt_completion_result {
+ struct completion completion;
+ int res;
+};
+
+#define DECLARE_FS_COMPLETION_RESULT(ecr) \
+ struct fscrypt_completion_result ecr = { \
+ COMPLETION_INITIALIZER((ecr).completion), 0 }
+
+#define FS_FNAME_NUM_SCATTER_ENTRIES 4
+#define FS_CRYPTO_BLOCK_SIZE 16
+#define FS_FNAME_CRYPTO_DIGEST_SIZE 32
+
+/**
+ * For encrypted symlinks, the ciphertext length is stored at the beginning
+ * of the string in little-endian format.
+ */
+struct fscrypt_symlink_data {
+ __le16 len;
+ char encrypted_path[1];
+} __packed;
+
+/**
+ * This function is used to calculate the disk space required to
+ * store a filename of length l in encrypted symlink format.
+ */
+static inline u32 fscrypt_symlink_data_len(u32 l)
+{
+ if (l < FS_CRYPTO_BLOCK_SIZE)
+ l = FS_CRYPTO_BLOCK_SIZE;
+ return (l + sizeof(struct fscrypt_symlink_data) - 1);
+}
+
+struct fscrypt_str {
+ unsigned char *name;
+ u32 len;
+};
+
+struct fscrypt_name {
+ const struct qstr *usr_fname;
+ struct fscrypt_str disk_name;
+ u32 hash;
+ u32 minor_hash;
+ struct fscrypt_str crypto_buf;
+};
+
+#define FSTR_INIT(n, l) { .name = n, .len = l }
+#define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len)
+#define fname_name(p) ((p)->disk_name.name)
+#define fname_len(p) ((p)->disk_name.len)
+
+/*
+ * crypto opertions for filesystems
+ */
+struct fscrypt_operations {
+ int (*get_context)(struct inode *, void *, size_t);
+ int (*key_prefix)(struct inode *, u8 **);
+ int (*prepare_context)(struct inode *);
+ int (*set_context)(struct inode *, const void *, size_t, void *);
+ int (*dummy_context)(struct inode *);
+ bool (*is_encrypted)(struct inode *);
+ bool (*empty_dir)(struct inode *);
+ unsigned (*max_namelen)(struct inode *);
+};
+
+static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
+{
+ if (inode->i_sb->s_cop->dummy_context &&
+ inode->i_sb->s_cop->dummy_context(inode))
+ return true;
+ return false;
+}
+
+static inline bool fscrypt_valid_contents_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_XTS);
+}
+
+static inline bool fscrypt_valid_filenames_enc_mode(u32 mode)
+{
+ return (mode == FS_ENCRYPTION_MODE_AES_256_CTS);
+}
+
+static inline bool fscrypt_is_dot_dotdot(const struct qstr *str)
+{
+ if (str->len == 1 && str->name[0] == '.')
+ return true;
+
+ if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ return true;
+
+ return false;
+}
+
+static inline struct page *fscrypt_control_page(struct page *page)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
+#else
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-EINVAL);
+#endif
+}
+
+static inline int fscrypt_has_encryption_key(struct inode *inode)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ return (inode->i_crypt_info != NULL);
+#else
+ return 0;
+#endif
+}
+
+static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY;
+ spin_unlock(&dentry->d_lock);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+extern const struct dentry_operations fscrypt_d_ops;
+#endif
+
+static inline void fscrypt_set_d_op(struct dentry *dentry)
+{
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ d_set_d_op(dentry, &fscrypt_d_ops);
+#endif
+}
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+/* crypto.c */
+extern struct kmem_cache *fscrypt_info_cachep;
+int fscrypt_initialize(void);
+
+extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *, gfp_t);
+extern void fscrypt_release_ctx(struct fscrypt_ctx *);
+extern struct page *fscrypt_encrypt_page(struct inode *, struct page *, gfp_t);
+extern int fscrypt_decrypt_page(struct page *);
+extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
+extern void fscrypt_pullback_bio_page(struct page **, bool);
+extern void fscrypt_restore_control_page(struct page *);
+extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t,
+ unsigned int);
+/* policy.c */
+extern int fscrypt_process_policy(struct file *, const struct fscrypt_policy *);
+extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *);
+extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
+extern int fscrypt_inherit_context(struct inode *, struct inode *,
+ void *, bool);
+/* keyinfo.c */
+extern int get_crypt_info(struct inode *);
+extern int fscrypt_get_encryption_info(struct inode *);
+extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *);
+
+/* fname.c */
+extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
+ int lookup, struct fscrypt_name *);
+extern void fscrypt_free_filename(struct fscrypt_name *);
+extern u32 fscrypt_fname_encrypted_size(struct inode *, u32);
+extern int fscrypt_fname_alloc_buffer(struct inode *, u32,
+ struct fscrypt_str *);
+extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
+extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
+ const struct fscrypt_str *, struct fscrypt_str *);
+extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
+ struct fscrypt_str *);
+#endif
+
+/* crypto.c */
+static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i,
+ gfp_t f)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c)
+{
+ return;
+}
+
+static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i,
+ struct page *p, gfp_t f)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int fscrypt_notsupp_decrypt_page(struct page *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c,
+ struct bio *b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b)
+{
+ return;
+}
+
+static inline void fscrypt_notsupp_restore_control_page(struct page *p)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p,
+ sector_t s, unsigned int f)
+{
+ return -EOPNOTSUPP;
+}
+
+/* policy.c */
+static inline int fscrypt_notsupp_process_policy(struct file *f,
+ const struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_get_policy(struct inode *i,
+ struct fscrypt_policy *p)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_has_permitted_context(struct inode *p,
+ struct inode *i)
+{
+ return 0;
+}
+
+static inline int fscrypt_notsupp_inherit_context(struct inode *p,
+ struct inode *i, void *v, bool b)
+{
+ return -EOPNOTSUPP;
+}
+
+/* keyinfo.c */
+static inline int fscrypt_notsupp_get_encryption_info(struct inode *i)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_put_encryption_info(struct inode *i,
+ struct fscrypt_info *f)
+{
+ return;
+}
+
+ /* fname.c */
+static inline int fscrypt_notsupp_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct fscrypt_name *fname)
+{
+ if (dir->i_sb->s_cop->is_encrypted(dir))
+ return -EOPNOTSUPP;
+
+ memset(fname, 0, sizeof(struct fscrypt_name));
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *)iname->name;
+ fname->disk_name.len = iname->len;
+ return 0;
+}
+
+static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname)
+{
+ return;
+}
+
+static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s)
+{
+ /* never happens */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode,
+ u32 ilen, struct fscrypt_str *crypto_str)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c)
+{
+ return;
+}
+
+static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode,
+ const struct qstr *iname,
+ struct fscrypt_str *oname)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* _LINUX_FSCRYPTO_H */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 782d4e814e21..4bc4b1b13193 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 33316a1ae98f..fe0a5de1eda5 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -12,6 +12,7 @@
#include <linux/cpumask.h>
#include <linux/uprobes.h>
#include <linux/page-flags-layout.h>
+#include <linux/workqueue.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -531,6 +532,7 @@ struct mm_struct {
int app_setting;
#endif
+ struct work_struct async_put_work;
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0065ffc9322b..08b3b8348fd7 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -170,6 +170,7 @@ extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aea4c0f2ef5f..65a188eeeeb6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -519,6 +519,7 @@ struct mmc_host {
unsigned int bus_resume_flags;
#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+ bool ignore_bus_resume_flags;
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 37f05cb1dfd6..1af616138d1d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -573,6 +573,7 @@
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
diff --git a/include/linux/qdsp6v2/apr.h b/include/linux/qdsp6v2/apr.h
index 29deb3ca5ac7..adcdbcbc5907 100644
--- a/include/linux/qdsp6v2/apr.h
+++ b/include/linux/qdsp6v2/apr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -137,6 +137,10 @@ struct apr_svc {
struct mutex m_lock;
spinlock_t w_lock;
uint8_t pkt_owner;
+#ifdef CONFIG_MSM_QDSP6_APRV2_VM
+ uint16_t vm_dest_svc;
+ uint32_t vm_handle;
+#endif
};
struct apr_client {
diff --git a/include/linux/qdsp6v2/aprv2_vm.h b/include/linux/qdsp6v2/aprv2_vm.h
new file mode 100644
index 000000000000..d16ea12d62b5
--- /dev/null
+++ b/include/linux/qdsp6v2/aprv2_vm.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APRV2_VM_H__
+#define __APRV2_VM_H__
+
+#define APRV2_VM_MAX_DNS_SIZE (31)
+ /* Includes NULL character. */
+#define APRV2_VM_PKT_SERVICE_ID_MASK (0x00FF)
+ /* Bitmask of the service ID field. */
+
+/* Packet Structure Definition */
+struct aprv2_vm_packet_t {
+ uint32_t header;
+ uint16_t src_addr;
+ uint16_t src_port;
+ uint16_t dst_addr;
+ uint16_t dst_port;
+ uint32_t token;
+ uint32_t opcode;
+};
+
+/**
+ * In order to send command/event via MM HAB, the following buffer
+ * format shall be followed, where the buffer is provided to the
+ * HAB send API.
+ * |-----cmd_id or evt_id -----| <- 32 bit, e.g. APRV2_VM_CMDID_REGISTER
+ * |-----cmd payload ----------| e.g. aprv2_vm_cmd_register_t
+ * | ... |
+ *
+ * In order to receive a command response or event ack, the following
+ * buffer format shall be followed, where the buffer is provided to
+ * the HAB receive API.
+ * |-----cmd response ---------| e.g. aprv2_vm_cmd_register_rsp_t
+ * | ... |
+ */
+
+/* Registers a service with the backend APR driver. */
+#define APRV2_VM_CMDID_REGISTER (0x00000001)
+
+struct aprv2_vm_cmd_register_t {
+ uint32_t name_size;
+ /**< The service name string size in bytes. */
+ char name[APRV2_VM_MAX_DNS_SIZE];
+ /**<
+ * The service name string to register.
+ *
+ * A NULL name means the service does not have a name.
+ */
+ uint16_t addr;
+ /**<
+ * The address to register.
+ *
+ * A zero value means to auto-generate a free dynamic address.
+ * A non-zero value means to directly use the statically assigned address.
+ */
+};
+
+struct aprv2_vm_cmd_register_rsp_t {
+ int32_t status;
+ /**< The status of registration. */
+ uint32_t handle;
+ /**< The registered service handle. */
+ uint16_t addr;
+ /**< The actual registered address. */
+};
+
+#define APRV2_VM_CMDID_DEREGISTER (0x00000002)
+
+struct aprv2_vm_cmd_deregister_t {
+ uint32_t handle;
+ /**< The registered service handle. */
+};
+
+struct aprv2_vm_cmd_deregister_rsp_t {
+ int32_t status;
+ /**< The status of de-registration. */
+};
+
+#define APRV2_VM_CMDID_ASYNC_SEND (0x00000003)
+
+struct aprv2_vm_cmd_async_send_t {
+ uint32_t handle;
+ /**< The registered service handle. */
+ struct aprv2_vm_packet_t pkt_header;
+ /**< The packet header. */
+ /* The apr packet payload follows */
+};
+
+struct aprv2_vm_cmd_async_send_rsp_t {
+ int32_t status;
+ /**< The status of send. */
+};
+
+#define APRV2_VM_EVT_RX_PKT_AVAILABLE (0x00000004)
+
+struct aprv2_vm_evt_rx_pkt_available_t {
+ struct aprv2_vm_packet_t pkt_header;
+ /**< The packet header. */
+ /* The apr packet payload follows */
+};
+
+struct aprv2_vm_ack_rx_pkt_available_t {
+ int32_t status;
+};
+
+#endif /* __APRV2_VM_H__ */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 60d15a080d7c..9d3eda39bcd2 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
- * to save a few bytes.
+ * to save a few bytes. The caller must have disabled interrupts.
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2716faadc618..7299540fe1ec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2969,6 +2969,11 @@ static inline void mmdrop(struct mm_struct * mm)
/* mmput gets rid of the mappings and all user-space */
extern int mmput(struct mm_struct *);
+/* same as above but performs the slow path from the async kontext. Can
+ * be called from the atomic context as well
+ */
+extern void mmput_async(struct mm_struct *);
+
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489a46b6..17a33f31bfa2 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -23,6 +23,8 @@ extern void print_stack_trace(struct stack_trace *trace, int spaces);
extern int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces);
+#define BACKPORTED_EXPORT_SAVE_STACK_TRACE_TSK_ARM
+
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index c28dd523f96e..d43837f2ce3a 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+ return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 2e04fa5a5b58..4e4aee64f559 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,7 +311,7 @@ enum {
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
- __WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/include/media/msm_cam_sensor.h b/include/media/msm_cam_sensor.h
index 762f1c51620c..2c8b651147e0 100644
--- a/include/media/msm_cam_sensor.h
+++ b/include/media/msm_cam_sensor.h
@@ -84,6 +84,15 @@ struct msm_ir_cut_cfg_data_t32 {
enum msm_ir_cut_cfg_type_t cfg_type;
};
+struct msm_laser_led_cfg_data_t32 {
+ enum msm_laser_led_cfg_type_t cfg_type;
+ compat_uptr_t setting;
+ compat_uptr_t debug_reg;
+ uint32_t debug_reg_size;
+ uint16_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+};
+
struct eeprom_read_t32 {
compat_uptr_t dbuffer;
uint32_t num_bytes;
@@ -276,7 +285,10 @@ struct msm_flash_cfg_data_t32 {
#define VIDIOC_MSM_IR_CUT_CFG32 \
_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t32)
-#endif
+
+#define VIDIOC_MSM_LASER_LED_CFG32 \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_laser_led_cfg_data_t32)
#endif
+#endif
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 5409e1b15a25..f1d321299492 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -153,14 +153,8 @@ extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
extern int cnss_request_bus_bandwidth(int bandwidth);
-extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
-extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
- u16 buf_len);
-extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len);
-extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len);
extern int cnss_power_up(struct device *dev);
extern int cnss_power_down(struct device *dev);
-extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern void cnss_request_pm_qos(u32 qos_val);
extern void cnss_remove_pm_qos(void);
extern void cnss_lock_pm_sem(void);
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
index 86c2fccc930e..b8a7cfdb7966 100644
--- a/include/net/cnss_nl.h
+++ b/include/net/cnss_nl.h
@@ -23,12 +23,16 @@
* @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
* attribute.
* @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help
+ * wlan driver to peek into request message packet without opening up definition
+ * of complete request message.
*
* Any new message in future can be added as another attribute
*/
enum cld80211_attr {
CLD80211_ATTR_VENDOR_DATA = 1,
CLD80211_ATTR_DATA,
+ CLD80211_ATTR_META_DATA,
/* add new attributes above here */
__CLD80211_ATTR_AFTER_LAST,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ac42bbb37b2d..c26a6e4dc306 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,9 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
-
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
@@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
{
- percpu_counter_destroy(&nf->mem);
+ atomic_set(&nf->mem, 0);
}
-
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -140,37 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
static inline int frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_read(&nf->mem);
+ return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_add(i, &nf->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- unsigned int res;
-
- local_bh_disable();
- res = percpu_counter_sum_positive(&nf->mem);
- local_bh_enable();
-
- return res;
+ return atomic_read(&nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index fb961a576abe..fa5e703a14ed 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -68,6 +68,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
+ struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@@ -102,7 +103,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
- struct fib6_node *rt6i_node;
+ struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@@ -165,13 +166,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+ u32 *cookie)
+{
+ struct fib6_node *fn;
+ bool status = false;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+
+ if (fn) {
+ *cookie = fn->fn_sernum;
+ status = true;
+ }
+
+ rcu_read_unlock();
+ return status;
+}
+
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
+ u32 cookie = 0;
+
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
- return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+ rt6_get_cookie_safe(rt, &cookie);
+
+ return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 21b30bf7c74c..6078ef2e24de 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -9349,6 +9349,73 @@ struct asm_aptx_dec_fmt_blk_v2 {
*/
} __packed;
+/* Q6Core Specific */
+#define AVCS_CMD_GET_FWK_VERSION (0x0001292C)
+#define AVCS_CMDRSP_GET_FWK_VERSION (0x0001292D)
+
+#define AVCS_SERVICE_ID_ALL (0xFFFFFFFF)
+
+struct avcs_get_fwk_version {
+ /*
+ * Indicates the major version of the AVS build.
+ * This value is incremented on chipset family boundaries.
+ */
+ uint32_t build_major_version;
+
+ /*
+ * Minor version of the AVS build.
+ * This value represents the mainline to which the AVS build belongs.
+ */
+ uint32_t build_minor_version;
+
+ /* Indicates the AVS branch version to which the image belongs. */
+ uint32_t build_branch_version;
+
+ /* Indicates the AVS sub-branch or customer product line information. */
+ uint32_t build_subbranch_version;
+
+ /* Number of supported AVS services in the current build. */
+ uint32_t num_services;
+};
+
+struct avs_svc_api_info {
+ /*
+ * APRV2 service IDs for the individual static services.
+ *
+ * @values
+ * - APRV2_IDS_SERVICE_ID_ADSP_CORE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_AFE_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ASM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_ADM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_MVM_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVS_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_CVP_V
+ * - APRV2_IDS_SERVICE_ID_ADSP_LSM_V
+ */
+ uint32_t service_id;
+
+ /*
+ * Indicates the API version of the service.
+ *
+ * Each new API update that warrants a change on the HLOS side triggers
+ * an increment in the version.
+ */
+ uint32_t api_version;
+
+ /*
+ * Indicates the API increments on a sub-branch (not on the mainline).
+ *
+ * API branch version numbers can increment independently on different
+ * sub-branches.
+ */
+ uint32_t api_branch_version;
+};
+
+struct avcs_fwk_ver_info {
+ struct avcs_get_fwk_version avcs_build;
+ struct avs_svc_api_info services[0];
+};
+
/* LSM Specific */
#define VW_FEAT_DIM (39)
diff --git a/include/sound/q6core.h b/include/sound/q6core.h
index 4f55880d410f..0ce171973cb5 100644
--- a/include/sound/q6core.h
+++ b/include/sound/q6core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, 2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#ifndef __Q6CORE_H__
#define __Q6CORE_H__
#include <linux/qdsp6v2/apr.h>
+#include <sound/apr_audio-v2.h>
@@ -23,6 +24,9 @@ bool q6core_is_adsp_ready(void);
int q6core_add_remove_pool_pages(phys_addr_t buf_add, uint32_t bufsz,
uint32_t mempool_id, bool add_pages);
+int q6core_get_avcs_fwk_ver_info(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info);
+
#define ADSP_CMD_SET_DTS_EAGLE_DATA_ID 0x00012919
#define DTS_EAGLE_LICENSE_ID 0x00028346
struct adsp_dts_eagle {
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 00b4a6308249..90d6ad49a9c5 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -52,6 +52,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
@@ -693,28 +694,32 @@ TRACE_EVENT(f2fs_direct_IO_exit,
__entry->ret)
);
-TRACE_EVENT(f2fs_reserve_new_block,
+TRACE_EVENT(f2fs_reserve_new_blocks,
- TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node),
+ TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node,
+ blkcnt_t count),
- TP_ARGS(inode, nid, ofs_in_node),
+ TP_ARGS(inode, nid, ofs_in_node, count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(nid_t, nid)
__field(unsigned int, ofs_in_node)
+ __field(blkcnt_t, count)
),
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->nid = nid;
__entry->ofs_in_node = ofs_in_node;
+ __entry->count = count;
),
- TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u",
+ TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu",
show_dev(__entry),
(unsigned int)__entry->nid,
- __entry->ofs_in_node)
+ __entry->ofs_in_node,
+ (unsigned long long)__entry->count)
);
DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
@@ -727,7 +732,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__field(dev_t, dev)
__field(ino_t, ino)
__field(pgoff_t, index)
- __field(block_t, blkaddr)
+ __field(block_t, old_blkaddr)
+ __field(block_t, new_blkaddr)
__field(int, rw)
__field(int, type)
),
@@ -736,16 +742,18 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
__entry->dev = page->mapping->host->i_sb->s_dev;
__entry->ino = page->mapping->host->i_ino;
__entry->index = page->index;
- __entry->blkaddr = fio->blk_addr;
+ __entry->old_blkaddr = fio->old_blkaddr;
+ __entry->new_blkaddr = fio->new_blkaddr;
__entry->rw = fio->rw;
__entry->type = fio->type;
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "blkaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx rw = %s%s, type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
- (unsigned long long)__entry->blkaddr,
+ (unsigned long long)__entry->old_blkaddr,
+ (unsigned long long)__entry->new_blkaddr,
show_bio_type(__entry->rw),
show_block_type(__entry->type))
);
@@ -1102,6 +1110,27 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+TRACE_EVENT(f2fs_issue_reset_zone,
+
+ TP_PROTO(struct super_block *sb, block_t blkstart),
+
+ TP_ARGS(sb, blkstart),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(block_t, blkstart)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->blkstart = blkstart;
+ ),
+
+ TP_printk("dev = (%d,%d), reset zone at block = 0x%llx",
+ show_dev(__entry),
+ (unsigned long long)__entry->blkstart)
+);
+
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct super_block *sb, unsigned int nobarrier,
@@ -1265,6 +1294,44 @@ TRACE_EVENT(f2fs_destroy_extent_tree,
__entry->node_cnt)
);
+DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(s64, count)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = sb->s_dev;
+ __entry->type = type;
+ __entry->count = count;
+ ),
+
+ TP_printk("dev = (%d,%d), %s, dirty count = %lld",
+ show_dev(__entry),
+ show_file_type(__entry->type),
+ __entry->count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
+DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit,
+
+ TP_PROTO(struct super_block *sb, int type, s64 count),
+
+ TP_ARGS(sb, type, count)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index cd0c1a1a9ccf..15048097910f 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -173,6 +173,24 @@ struct inodes_stat_t {
#define FS_IOC32_SETVERSION _IOW('v', 2, int)
/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+struct fscrypt_policy {
+ __u8 version;
+ __u8 contents_encryption_mode;
+ __u8 filenames_encryption_mode;
+ __u8 flags;
+ __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+} __packed;
+
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+
+/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*/
#define FS_SECRM_FL 0x00000001 /* Secure deletion */
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 322fb09b8614..0bdfc9741d19 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -128,6 +128,17 @@
#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
/**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
* the attributes of the rule (routing or filtering)
*/
#define IPA_FLT_TOS (1ul << 0)
@@ -447,7 +458,14 @@ enum ipa_vlan_l2tp_event {
IPA_VLAN_L2TP_EVENT_MAX,
};
-#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+enum ipa_per_client_stats_event {
+ IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+ IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+ IPA_PER_CLIENT_STATS_EVENT_MAX,
+ IPA_EVENT_MAX_NUM = IPA_PER_CLIENT_STATS_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM ((int)IPA_PER_CLIENT_STATS_EVENT_MAX)
#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
@@ -1061,6 +1079,48 @@ struct ipa_rt_rule_del {
};
/**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ * specifies rule_id as 0 the driver will assign a new rule_id
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+ uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ * no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
* struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
* multiple headers and commit)
* @commit: should rules be removed from IPA HW also?
@@ -1619,6 +1679,52 @@ enum ipacm_client_enum {
IPACM_CLIENT_WLAN,
IPACM_CLIENT_MAX
};
+
+enum ipacm_per_client_device_type {
+ IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+ IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+ IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+ char lanIface[IPA_RESOURCE_NAME_MAX];
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ int8_t client_idx;
+ uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+ int32_t ul_src_pipe;
+ uint8_t hdr_len;
+ uint32_t num_clients;
+ struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
/**
* actual IOCTLs supported by IPA driver
*/
@@ -1631,6 +1737,9 @@ enum ipacm_client_enum {
#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE, \
struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE_EXT, \
+ struct ipa_ioc_add_rt_rule_ext *)
#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE_AFTER, \
struct ipa_ioc_add_rt_rule_after *)
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index da9ee3bcc525..61b5f8eaa7f9 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -821,4 +821,26 @@ struct mdp_hdr_stream {
uint32_t content_type;
uint32_t reserved[5];
};
+
+/* hdr hdmi state takes possible values of 1, 2 and 4 respectively */
+#define HDR_ENABLE (1 << 0)
+#define HDR_DISABLE (1 << 1)
+#define HDR_RESET (1 << 2)
+
+/*
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values HDR_ENABLE, HDR_DISABLE
+ * and HDR_RESET.
+ * hdr_meta: Metadata sent by the userspace for the HDR clip.
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct mdp_hdr_stream_ctrl {
+ __u8 hdr_state; /* HDR state */
+ struct mdp_hdr_stream hdr_stream; /* HDR metadata */
+};
+
#endif
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index f04ac495a5c0..13dac9a1526d 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -33,6 +33,12 @@
#define WAN_IOCTL_QUERY_DL_FILTER_STATS 8
#define WAN_IOCTL_ADD_FLT_RULE_EX 9
#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10
+#define WAN_IOCTL_ADD_UL_FLT_RULE 11
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS 12
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS 13
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO 14
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO 15
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG 16
/* User space may not have this defined. */
#ifndef IFNAMSIZ
@@ -126,6 +132,57 @@ struct wan_ioctl_query_dl_filter_stats {
uint32_t index;
};
+struct wan_ioctl_send_lan_client_msg {
+ /* Lan client info. */
+ struct ipa_lan_client_msg lan_client;
+ /* Event to indicate whether client is
+ * connected or disconnected.
+ */
+ enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Init client. */
+ uint8_t client_init;
+ /* Client Index */
+ int8_t client_idx;
+ /* Header length of the client. */
+ uint8_t hdr_len;
+ /* Source pipe of the lan client. */
+ enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Ipv4 UL traffic bytes. */
+ uint64_t ipv4_tx_bytes;
+ /* Ipv4 DL traffic bytes. */
+ uint64_t ipv4_rx_bytes;
+ /* Ipv6 UL traffic bytes. */
+ uint64_t ipv6_tx_bytes;
+ /* Ipv6 DL traffic bytes. */
+ uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* Indicate whether to reset the stats (use 1) or not */
+ uint8_t reset_stats;
+ /* Indicates whether client is disconnected. */
+ uint8_t disconnect_clnt;
+ /* Number of clients. */
+ uint8_t num_clients;
+ /* Client information. */
+ struct wan_ioctl_per_client_info
+ client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_ADD_FLT_RULE, \
struct ipa_install_fltr_rule_req_msg_v01 *)
@@ -170,4 +227,27 @@ struct wan_ioctl_query_dl_filter_stats {
WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
struct wan_ioctl_query_tether_stats_all *)
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_UL_FLT_RULE, \
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+ struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
#endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 0d87fa1e253c..25cb17ca6bf3 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -1225,6 +1225,13 @@ enum v4l2_mpeg_vidc_video_au_delimiter {
V4L2_MPEG_VIDC_VIDEO_AU_DELIMITER_ENABLED = 1
};
+#define V4L2_CID_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 103)
+enum v4l2_mpeg_vidc_video_venc_send_skipped_frame {
+ V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_DISABLE = 0,
+ V4L2_MPEG_VIDC_VIDEO_SEND_SKIPPED_FRAME_ENABLE = 1
+};
+
/* Camera class control IDs */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fa930a91b4aa..36e94588d1d9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -2,7 +2,7 @@
* Video for Linux Two header file
*
* Copyright (C) 1999-2012 the contributors
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -590,6 +590,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10DPCM6 v4l2_fourcc('b', 'G', 'A', '6')
#define V4L2_PIX_FMT_SGRBG10DPCM6 v4l2_fourcc('B', 'D', '1', '6')
#define V4L2_PIX_FMT_SRGGB10DPCM6 v4l2_fourcc('b', 'R', 'A', '6')
+ /* 10bit raw bayer, plain16 packed */
+#define V4L2_PIX_FMT_SBGGRPLAIN16 v4l2_fourcc('B', 'G', '1', '6')
+#define V4L2_PIX_FMT_SGBRGPLAIN16 v4l2_fourcc('G', 'B', '1', '6')
+#define V4L2_PIX_FMT_SGRBGPLAIN16 v4l2_fourcc('G', 'R', '1', '6')
+#define V4L2_PIX_FMT_SRGGBPLAIN16 v4l2_fourcc('R', 'G', '1', '6')
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
diff --git a/include/uapi/media/msm_cam_sensor.h b/include/uapi/media/msm_cam_sensor.h
index c6144cd8f355..0ec18d663cff 100644
--- a/include/uapi/media/msm_cam_sensor.h
+++ b/include/uapi/media/msm_cam_sensor.h
@@ -88,6 +88,7 @@ enum sensor_sub_module_t {
SUB_MODULE_EXT,
SUB_MODULE_IR_LED,
SUB_MODULE_IR_CUT,
+ SUB_MODULE_LASER_LED,
SUB_MODULE_MAX,
};
@@ -301,6 +302,15 @@ struct msm_ir_cut_cfg_data_t {
enum msm_ir_cut_cfg_type_t cfg_type;
};
+struct msm_laser_led_cfg_data_t {
+ enum msm_laser_led_cfg_type_t cfg_type;
+ void __user *setting;
+ void __user *debug_reg;
+ uint32_t debug_reg_size;
+ uint16_t i2c_addr;
+ enum i2c_freq_mode_t i2c_freq_mode;
+};
+
struct msm_eeprom_cfg_data {
enum eeprom_cfg_type_t cfgtype;
uint8_t is_supported;
@@ -381,7 +391,9 @@ enum msm_ois_cfg_download_type_t {
enum msm_ois_i2c_operation {
MSM_OIS_WRITE = 0,
MSM_OIS_POLL,
+ MSM_OIS_READ,
};
+#define MSM_OIS_READ MSM_OIS_READ
struct reg_settings_ois_t {
uint16_t reg_addr;
@@ -616,5 +628,8 @@ struct sensor_init_cfg_data {
#define VIDIOC_MSM_IR_CUT_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_ir_cut_cfg_data_t)
+#define VIDIOC_MSM_LASER_LED_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 16, struct msm_laser_led_cfg_data_t)
+
#endif
diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h
index 10ee4b7c9390..39e6927d9b7e 100644
--- a/include/uapi/media/msm_camera.h
+++ b/include/uapi/media/msm_camera.h
@@ -1541,7 +1541,9 @@ enum msm_camera_i2c_reg_addr_type {
MSM_CAMERA_I2C_BYTE_ADDR = 1,
MSM_CAMERA_I2C_WORD_ADDR,
MSM_CAMERA_I2C_3B_ADDR,
+ MSM_CAMERA_I2C_DWORD_ADDR,
};
+#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR
struct msm_camera_i2c_reg_array {
uint16_t reg_addr;
diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h
index a92c144f712e..08605aca474d 100644
--- a/include/uapi/media/msm_camsensor_sdk.h
+++ b/include/uapi/media/msm_camsensor_sdk.h
@@ -85,8 +85,10 @@ enum msm_camera_i2c_reg_addr_type {
MSM_CAMERA_I2C_BYTE_ADDR = 1,
MSM_CAMERA_I2C_WORD_ADDR,
MSM_CAMERA_I2C_3B_ADDR,
+ MSM_CAMERA_I2C_DWORD_ADDR,
MSM_CAMERA_I2C_ADDR_TYPE_MAX,
};
+#define MSM_CAMERA_I2C_DWORD_ADDR MSM_CAMERA_I2C_DWORD_ADDR
enum msm_camera_i2c_data_type {
MSM_CAMERA_I2C_BYTE_DATA = 1,
@@ -206,6 +208,13 @@ enum msm_ir_led_cfg_type_t {
#define CFG_IR_LED_OFF CFG_IR_LED_OFF
#define CFG_IR_LED_ON CFG_IR_LED_ON
+enum msm_laser_led_cfg_type_t {
+ CFG_LASER_LED_INIT,
+ CFG_LASER_LED_CONTROL,
+};
+#define CFG_LASER_LED_INIT CFG_LASER_LED_INIT
+#define CFG_LASER_LED_CONTROL CFG_LASER_LED_CONTROL
+
enum msm_ir_cut_cfg_type_t {
CFG_IR_CUT_INIT = 0,
CFG_IR_CUT_RELEASE,
diff --git a/include/uapi/media/msmb_camera.h b/include/uapi/media/msmb_camera.h
index df9807e72e47..4b23806071d4 100644
--- a/include/uapi/media/msmb_camera.h
+++ b/include/uapi/media/msmb_camera.h
@@ -52,6 +52,7 @@
#define MSM_CAMERA_SUBDEV_IR_CUT 18
#define MSM_CAMERA_SUBDEV_EXT 19
#define MSM_CAMERA_SUBDEV_TOF 20
+#define MSM_CAMERA_SUBDEV_LASER_LED 21
#define MSM_MAX_CAMERA_SENSORS 5
/* The below macro is defined to put an upper limit on maximum
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 077bb52e2d47..3fdb7545852e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2799,6 +2799,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0;
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) {
struct cgroup *from_cgrp;
@@ -2813,6 +2814,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (retval)
break;
}
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return retval;
@@ -4072,6 +4074,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
/* all tasks in @from are being moved, all csets are source */
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
@@ -4100,6 +4104,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
} while (task && !ret);
out_err:
cgroup_migrate_finish(&preloaded_csets);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return ret;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1656a48d5bee..f92ec9a71af3 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1916,6 +1916,7 @@ static struct cftype files[] = {
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
},
{
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cd0d68ee02..1d168ba55118 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -178,13 +178,13 @@ static inline void free_thread_stack(unsigned long *stack)
# else
static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_stack(unsigned long *stack)
+static void free_thread_stack(unsigned long *stack)
{
kmem_cache_free(thread_stack_cache, stack);
}
@@ -697,6 +697,26 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
+static inline void __mmput(struct mm_struct *mm)
+{
+ VM_BUG_ON(atomic_read(&mm->mm_users));
+
+ uprobe_clear_state(mm);
+ exit_aio(mm);
+ ksm_exit(mm);
+ khugepaged_exit(mm); /* must run before exit_mmap */
+ exit_mmap(mm);
+ set_mm_exe_file(mm, NULL);
+ if (!list_empty(&mm->mmlist)) {
+ spin_lock(&mmlist_lock);
+ list_del(&mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
+ if (mm->binfmt)
+ module_put(mm->binfmt->module);
+ mmdrop(mm);
+}
+
/*
* Decrement the use count and release all resources for an mm.
*/
@@ -706,26 +726,27 @@ int mmput(struct mm_struct *mm)
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
- uprobe_clear_state(mm);
- exit_aio(mm);
- ksm_exit(mm);
- khugepaged_exit(mm); /* must run before exit_mmap */
- exit_mmap(mm);
- set_mm_exe_file(mm, NULL);
- if (!list_empty(&mm->mmlist)) {
- spin_lock(&mmlist_lock);
- list_del(&mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- if (mm->binfmt)
- module_put(mm->binfmt->module);
- mmdrop(mm);
+ __mmput(mm);
mm_freed = 1;
}
return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
+static void mmput_async_fn(struct work_struct *work)
+{
+ struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
+ __mmput(mm);
+}
+
+void mmput_async(struct mm_struct *mm)
+{
+ if (atomic_dec_and_test(&mm->mm_users)) {
+ INIT_WORK(&mm->async_put_work, mmput_async_fn);
+ schedule_work(&mm->async_put_work);
+ }
+}
+
/**
* set_mm_exe_file - change a reference to the mm's executable file
*
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 7080ae1eb6c1..f850e906564b 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
}
EXPORT_SYMBOL(__gcov_merge_icall_topn);
+void __gcov_exit(void)
+{
+ /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index e25e92fb44fa..46a18e72bce6 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8ef1919d63b2..d580b7d6ee6d 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -776,6 +776,8 @@ static void lock_torture_cleanup(void)
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ kfree(cxt.lwsa);
+ kfree(cxt.lrsa);
torture_cleanup_end();
}
@@ -917,6 +919,8 @@ static int __init lock_torture_init(void)
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ kfree(writer_tasks);
+ writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 1e6a51cc25c4..0befa20ce96e 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -108,29 +108,16 @@ bool osq_lock(struct optimistic_spin_queue *lock)
node->prev = prev;
/*
- * We need to avoid reordering of link updation sequence of osq.
- * A case in which the status of optimistic spin queue is
- * CPU6->CPU2 in which CPU6 has acquired the lock. At this point
- * if CPU0 comes in to acquire osq_lock, it will update the tail
- * count. After tail count update if CPU2 starts to unqueue itself
- * from optimistic spin queue, it will find updated tail count with
- * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If
- * reordering of following stores happen then prev->next where prev
- * being CPU2 would be updated to point to CPU0 node:
- * node->prev = prev;
- * WRITE_ONCE(prev->next, node);
+ * osq_lock() unqueue
*
- * At this point if next instruction
- * WRITE_ONCE(next->prev, prev);
- * in CPU2 path is committed before the update of CPU0 node->prev =
- * prev then CPU0 node->prev will point to CPU6 node. At this point
- * if CPU0 path's node->prev = prev is committed resulting in change
- * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if
- * CPU0 gets into unqueue path of osq_lock it will keep spinning
- * in infinite loop as condition prev->next == node will never be
- * true.
+ * node->prev = prev osq_wait_next()
+ * WMB MB
+ * prev->next = node next->prev = prev // unqueue-C
+ *
+ * Here 'node->prev' and 'next->prev' are the same variable and we need
+ * to ensure these stores happen in-order to avoid corrupting the list.
*/
- smp_mb();
+ smp_wmb();
WRITE_ONCE(prev->next, node);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2cb46d51d715..1ba183e7987c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -248,24 +248,17 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
*/
void rcu_sched_qs(void)
{
- unsigned long flags;
-
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
- trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gpnum),
- TPS("cpuqs"));
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
- return;
- local_irq_save(flags);
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data),
- true);
- }
- local_irq_restore(flags);
- }
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
+ return;
+ trace_rcu_grace_period(TPS("rcu_sched"),
+ __this_cpu_read(rcu_sched_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+ return;
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+ rcu_report_exp_rdp(&rcu_sched_state,
+ this_cpu_ptr(&rcu_sched_data), true);
}
void rcu_bh_qs(void)
@@ -302,17 +295,16 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
* We inform the RCU core by emulating a zero-duration dyntick-idle
* period, which we in turn do by incrementing the ->dynticks counter
* by two.
+ *
+ * The caller must have disabled interrupts.
*/
static void rcu_momentary_dyntick_idle(void)
{
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_dynticks *rdtp;
int resched_mask;
struct rcu_state *rsp;
- local_irq_save(flags);
-
/*
* Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay.
@@ -342,13 +334,12 @@ static void rcu_momentary_dyntick_idle(void)
smp_mb__after_atomic(); /* Later stuff after QS. */
break;
}
- local_irq_restore(flags);
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
- * The caller must have disabled preemption.
+ * The caller must have disabled interrupts.
*/
void rcu_note_context_switch(void)
{
@@ -378,9 +369,14 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
*/
void rcu_all_qs(void)
{
+ unsigned long flags;
+
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+ local_irq_save(flags);
rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
this_cpu_inc(rcu_qs_ctr);
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 32cbe72bf545..c6fc11d626f8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -147,8 +147,8 @@ static void __init rcu_bootup_announce(void)
* the corresponding expedited grace period will also be the end of the
* normal grace period.
*/
-static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long flags) __releases(rnp->lock)
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
+ __releases(rnp->lock) /* But leaves rrupts disabled. */
{
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
@@ -236,7 +236,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
- raw_spin_unlock(&rnp->lock);
+ raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
/*
* Report the quiescent state for the expedited GP. This expedited
@@ -251,7 +251,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
} else {
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
}
- local_irq_restore(flags);
}
/*
@@ -286,12 +285,11 @@ static void rcu_preempt_qs(void)
* predating the current grace period drain, in other words, until
* rnp->gp_tasks becomes NULL.
*
- * Caller must disable preemption.
+ * Caller must disable interrupts.
*/
static void rcu_preempt_note_context_switch(void)
{
struct task_struct *t = current;
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -301,7 +299,7 @@ static void rcu_preempt_note_context_switch(void)
/* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
+ raw_spin_lock(&rnp->lock);
smp_mb__after_unlock_lock();
t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
@@ -318,7 +316,7 @@ static void rcu_preempt_note_context_switch(void)
(rnp->qsmask & rdp->grpmask)
? rnp->gpnum
: rnp->gpnum + 1);
- rcu_preempt_ctxt_queue(rnp, rdp, flags);
+ rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4ecca604e64b..0271e8e2c020 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,8 +621,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
- return;
+ raw_spin_lock_irqsave(&rq->lock, flags);
resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -3177,20 +3176,20 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
return sched_freq_tick_pelt(cpu);
/*
- * Add a margin to the WALT utilization.
+ * Add a margin to the WALT utilization to check if we will need to
+ * increase frequency.
* NOTE: WALT tracks a single CPU signal for all the scheduling
* classes, thus this margin is going to be added to the DL class as
* well, which is something we do not do in sched_freq_tick_pelt case.
*/
- cpu_utilization = add_capacity_margin(cpu_utilization);
- if (cpu_utilization <= capacity_curr)
+ if (add_capacity_margin(cpu_utilization) <= capacity_curr)
return;
/*
@@ -3512,7 +3511,6 @@ static void __sched notrace __schedule(bool preempt)
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_note_context_switch();
prev = rq->curr;
/*
@@ -3531,13 +3529,16 @@ static void __sched notrace __schedule(bool preempt)
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ local_irq_disable();
+ rcu_note_context_switch();
+
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_lock(&rq->lock);
lockdep_pin_lock(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index f10d9f7d6d07..6ffb23adbcef 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -235,6 +235,18 @@ out:
cpufreq_cpu_put(policy);
}
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+requested_capacity(struct sched_capacity_reqs *scr)
+{
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return scr->cfs;
+ return scr->cfs + scr->rt;
+}
+#else
+#define requested_capacity(scr) (scr->cfs + scr->rt)
+#endif
+
void update_cpu_capacity_request(int cpu, bool request)
{
unsigned long new_capacity;
@@ -245,7 +257,7 @@ void update_cpu_capacity_request(int cpu, bool request)
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- new_capacity = scr->cfs + scr->rt;
+ new_capacity = requested_capacity(scr);
new_capacity = new_capacity * capacity_margin
/ SCHED_CAPACITY_SCALE;
new_capacity += scr->dl;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e12309c1b07b..b90f7434e13b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -64,8 +64,9 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned long iowait_boost;
- unsigned long iowait_boost_max;
+ bool iowait_boost_pending;
+ unsigned int iowait_boost;
+ unsigned int iowait_boost_max;
u64 last_update;
/* The fields below are only needed when sharing a policy. */
@@ -224,30 +225,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
unsigned int flags)
{
if (flags & SCHED_CPUFREQ_IOWAIT) {
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ if (sg_cpu->iowait_boost_pending)
+ return;
+
+ sg_cpu->iowait_boost_pending = true;
+
+ if (sg_cpu->iowait_boost) {
+ sg_cpu->iowait_boost <<= 1;
+ if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else {
+ sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ }
} else if (sg_cpu->iowait_boost) {
s64 delta_ns = time - sg_cpu->last_update;
/* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
}
}
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
- unsigned long boost_util = sg_cpu->iowait_boost;
- unsigned long boost_max = sg_cpu->iowait_boost_max;
+ unsigned int boost_util, boost_max;
- if (!boost_util)
+ if (!sg_cpu->iowait_boost)
return;
+ if (sg_cpu->iowait_boost_pending) {
+ sg_cpu->iowait_boost_pending = false;
+ } else {
+ sg_cpu->iowait_boost >>= 1;
+ if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ sg_cpu->iowait_boost = 0;
+ return;
+ }
+ }
+
+ boost_util = sg_cpu->iowait_boost;
+ boost_max = sg_cpu->iowait_boost_max;
+
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
- sg_cpu->iowait_boost >>= 1;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -320,6 +345,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
delta_ns = last_freq_update_time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
+ j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
@@ -589,7 +615,6 @@ static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
- unsigned int lat;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -628,12 +653,19 @@ static int sugov_init(struct cpufreq_policy *policy)
goto stop_kthread;
}
- tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
- tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
- lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (lat) {
- tunables->up_rate_limit_us *= lat;
- tunables->down_rate_limit_us *= lat;
+ if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
+ tunables->up_rate_limit_us = policy->up_transition_delay_us;
+ tunables->down_rate_limit_us = policy->down_transition_delay_us;
+ } else {
+ unsigned int lat;
+
+ tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat) {
+ tunables->up_rate_limit_us *= lat;
+ tunables->down_rate_limit_us *= lat;
+ }
}
policy->governor_data = sg_policy;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a105e97ab6bf..905d8cb5a6eb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
+#include "walt.h"
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 853064319b0d..aa016919eab8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5809,10 +5809,11 @@ static inline void hrtick_update(struct rq *rq)
#endif
#ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
-#define boosted_cpu_util(cpu) cpu_util(cpu)
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
@@ -6626,10 +6627,8 @@ end:
*/
static int sched_group_energy(struct energy_env *eenv)
{
- struct sched_domain *sd;
- int cpu, total_energy = 0;
struct cpumask visit_cpus;
- struct sched_group *sg;
+ u64 total_energy = 0;
WARN_ON(!eenv->sg_top->sge);
@@ -6637,8 +6636,8 @@ static int sched_group_energy(struct energy_env *eenv)
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
-
- cpu = cpumask_first(&visit_cpus);
+ int cpu = cpumask_first(&visit_cpus);
+ struct sched_domain *sd;
/*
* Is the group utilization affected by cpus outside this
@@ -6650,7 +6649,7 @@ static int sched_group_energy(struct energy_env *eenv)
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
- sg = sd->groups;
+ struct sched_group *sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
@@ -6686,11 +6685,9 @@ static int sched_group_energy(struct energy_env *eenv)
idle_idx = group_idle_state(eenv, sg);
group_util = group_norm_util(eenv, sg);
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ * sg->sge->idle_states[idle_idx].power);
total_energy += sg_busy_energy + sg_idle_energy;
@@ -6715,7 +6712,7 @@ next_cpu:
continue;
}
- eenv->energy = total_energy;
+ eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
@@ -7004,9 +7001,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
return __task_fits(p, cpu, 0);
}
+static bool __cpu_overutilized(int cpu, int delta)
+{
+ return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
static bool cpu_overutilized(int cpu)
{
- return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+ return __cpu_overutilized(cpu, 0);
}
#ifdef CONFIG_SCHED_TUNE
@@ -7085,7 +7087,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -7729,6 +7731,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (target_cpu != prev_cpu) {
+ int delta = 0;
struct energy_env eenv = {
.util_delta = task_util(p),
.src_cpu = prev_cpu,
@@ -7736,8 +7739,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
.task = p,
};
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ delta = task_util(p);
+#endif
/* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(prev_cpu)) {
+ if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
goto unlock;
@@ -11348,8 +11356,8 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
return true;
/* Do idle load balance if there have misfit task */
- if (energy_aware() && rq->misfit_task)
- return 1;
+ if (energy_aware())
+ return rq->misfit_task;
return (rq->nr_running >= 2);
}
@@ -11391,7 +11399,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
- if (sd && !energy_aware()) {
+ if (sd) {
sgc = sd->groups->sgc;
nr_busy = atomic_read(&sgc->nr_busy_cpus);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 33bf0c07e757..c53970b5a8f0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2363,6 +2363,12 @@ static inline unsigned long __cpu_util(int cpu, int delta)
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+
delta += util;
if (delta < 0)
return 0;
@@ -2375,6 +2381,19 @@ static inline unsigned long cpu_util(int cpu)
return __cpu_util(cpu, 0);
}
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index a71e94cecdb6..9c56841227cc 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -829,7 +829,6 @@ schedtune_boostgroup_init(struct schedtune *st)
bg = &per_cpu(cpu_boost_groups, cpu);
bg->group[st->idx].boost = 0;
bg->group[st->idx].tasks = 0;
- raw_spin_lock_init(&bg->lock);
}
return 0;
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 92c3aae8e056..28e999554463 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -111,8 +111,10 @@ walt_dec_cumulative_runnable_avg(struct rq *rq,
static void
fixup_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p, s64 task_load_delta)
+ struct task_struct *p, u64 new_task_load)
{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+
rq->cumulative_runnable_avg += task_load_delta;
if ((s64)rq->cumulative_runnable_avg < 0)
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
@@ -802,11 +804,11 @@ void walt_set_window_start(struct rq *rq)
int cpu = cpu_of(rq);
struct rq *sync_rq = cpu_rq(sync_cpu);
- if (rq->window_start)
+ if (likely(rq->window_start))
return;
if (cpu == sync_cpu) {
- rq->window_start = walt_ktime_clock();
+ rq->window_start = 1;
} else {
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eba904bae48c..38d73a6e2857 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) {
/*
- * If these are control ops, they still need their
- * per_cpu field freed. Since, function tracing is
+ * If these are dynamic or control ops, they still
+ * need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
- if (ops->flags & FTRACE_OPS_FL_CONTROL)
- control_ops_free(ops);
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
+ goto free_ops;
+
return 0;
}
@@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
schedule_on_each_cpu(ftrace_sync);
+ free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_CONTROL)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a579a874045b..35613a2a5164 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5397,7 +5397,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer);
#endif
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index b0f86ea77881..ca70d11b8aa7 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
goto out_free;
if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0)
- goto out;
+ goto out_free;
}
if (trace_selftest_test_dyn_cnt == 0)
goto out_free;
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index a988d4ef39da..4c480a20d76c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -237,6 +237,7 @@ static __init int bdi_class_init(void)
bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init();
+
return 0;
}
postcore_initcall(bdi_class_init);
@@ -780,6 +781,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->dev = NULL;
+ kref_init(&bdi->refcnt);
bdi->min_ratio = 0;
bdi->max_ratio = 100;
bdi->max_prop_frac = FPROP_FRAC_BASE;
@@ -795,6 +797,22 @@ int bdi_init(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(bdi_init);
+struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
+{
+ struct backing_dev_info *bdi;
+
+ bdi = kmalloc_node(sizeof(struct backing_dev_info),
+ gfp_mask | __GFP_ZERO, node_id);
+ if (!bdi)
+ return NULL;
+
+ if (bdi_init(bdi)) {
+ kfree(bdi);
+ return NULL;
+ }
+ return bdi;
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
@@ -875,12 +893,26 @@ void bdi_unregister(struct backing_dev_info *bdi)
}
}
-void bdi_exit(struct backing_dev_info *bdi)
+static void bdi_exit(struct backing_dev_info *bdi)
{
WARN_ON_ONCE(bdi->dev);
wb_exit(&bdi->wb);
}
+static void release_bdi(struct kref *ref)
+{
+ struct backing_dev_info *bdi =
+ container_of(ref, struct backing_dev_info, refcnt);
+
+ bdi_exit(bdi);
+ kfree(bdi);
+}
+
+void bdi_put(struct backing_dev_info *bdi)
+{
+ kref_put(&bdi->refcnt, release_bdi);
+}
+
void bdi_destroy(struct backing_dev_info *bdi)
{
bdi_unregister(bdi);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 15cb026ef807..29597fe6bb35 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1993,11 +1993,11 @@ void laptop_mode_timer_fn(unsigned long data)
* We want to write everything out, not just down to the dirty
* threshold
*/
- if (!bdi_has_dirty_io(&q->backing_dev_info))
+ if (!bdi_has_dirty_io(q->backing_dev_info))
return;
rcu_read_lock();
- list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node)
+ list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
if (wb_has_dirty_io(wb))
wb_start_writeback(wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER);
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 3a9a358e7c63..10b7f196b005 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -129,7 +129,7 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
.nr_entries = 0,
.entries = entries,
.max_entries = PAGE_OWNER_STACK_DEPTH,
- .skip = 0
+ .skip = 2
};
depot_stack_handle_t handle;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index bd5937dc6fe9..f703d462b7f8 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -57,7 +57,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
@@ -1462,7 +1462,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -2966,12 +2966,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
return len;
}
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
{
struct l2cap_conf_opt *opt = *ptr;
BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ if (size < L2CAP_CONF_OPT_SIZE + len)
+ return;
+
opt->type = type;
opt->len = len;
@@ -2996,7 +2999,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
{
struct l2cap_conf_efs efs;
@@ -3024,7 +3027,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
}
l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, size);
}
static void l2cap_ack_timeout(struct work_struct *work)
@@ -3170,11 +3173,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
chan->ack_win = chan->tx_win;
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ void *endptr = data + data_size;
u16 size;
BT_DBG("chan %pK", chan);
@@ -3199,7 +3203,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -3218,7 +3222,7 @@ done:
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_MODE_ERTM:
@@ -3238,21 +3242,21 @@ done:
L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
@@ -3270,17 +3274,17 @@ done:
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
}
@@ -3291,10 +3295,11 @@ done:
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
+ void *endptr = data + data_size;
void *req = chan->conf_req;
int len = chan->conf_len;
int type, hint, olen;
@@ -3396,7 +3401,7 @@ done:
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
}
if (result == L2CAP_CONF_SUCCESS) {
@@ -3409,7 +3414,7 @@ done:
chan->omtu = mtu;
set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
if (remote_efs) {
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
@@ -3423,7 +3428,7 @@ done:
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
} else {
/* Send PENDING Conf Rsp */
result = L2CAP_CONF_PENDING;
@@ -3456,7 +3461,7 @@ done:
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
@@ -3470,7 +3475,7 @@ done:
le32_to_cpu(efs.sdu_itime);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
}
break;
@@ -3484,7 +3489,7 @@ done:
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
@@ -3506,10 +3511,11 @@ done:
}
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- void *data, u16 *result)
+ void *data, size_t size, u16 *result)
{
struct l2cap_conf_req *req = data;
void *ptr = req->data;
+ void *endptr = data + size;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
@@ -3527,13 +3533,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
@@ -3547,13 +3553,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
chan->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
@@ -3566,7 +3572,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
@@ -3671,7 +3677,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
return;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3879,7 +3885,7 @@ sendresp:
u8 buf[128];
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3957,7 +3963,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
break;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
chan->num_conf_req++;
break;
@@ -4069,7 +4075,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
@@ -4103,7 +4109,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -4163,7 +4169,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
char buf[64];
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
+ buf, sizeof(buf), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4193,7 +4199,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ req, sizeof(req), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4771,7 +4777,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
}
@@ -7443,7 +7449,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf),
+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
buf);
chan->num_conf_req++;
}
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 6b437e8760d3..12e8cf4bda9f 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
- int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&ieee802154_lowpan->frags);
- if (res)
- return res;
- res = lowpan_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&ieee802154_lowpan->frags);
- return res;
+ inet_frags_init_net(&ieee802154_lowpan->frags);
+
+ return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index fe144dae7372..c5fb2f694ed0 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -234,10 +234,8 @@ evict_again:
cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) ||
- percpu_counter_sum(&nf->mem))
+ sum_frag_mem_limit(nf))
goto evict_again;
-
- percpu_counter_destroy(&nf->mem);
}
EXPORT_SYMBOL(inet_frags_exit_net);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b8a0607dab96..e2e162432aa3 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -840,8 +840,6 @@ static void __init ip4_frags_ctl_register(void)
static int __net_init ipv4_frags_init_net(struct net *net)
{
- int res;
-
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
@@ -865,13 +863,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
*/
net->ipv4.frags.timeout = IP_FRAG_TIME;
- res = inet_frags_init_net(&net->ipv4.frags);
- if (res)
- return res;
- res = ip4_frags_ns_ctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv4.frags);
- return res;
+ inet_frags_init_net(&net->ipv4.frags);
+
+ return ip4_frags_ns_ctl_register(net);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9bdd7847ef3a..79203972bfc6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2269,6 +2269,10 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
+ /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
+ * issue in __tcp_select_window()
+ */
+ icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4b707ad4ffbd..40c29712f32a 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -5230,7 +5230,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
- if (!(ifp->rt->rt6i_node))
+ if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index aad8cdf15472..c23e02a7ccb0 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -150,11 +150,23 @@ static struct fib6_node *node_alloc(void)
return fn;
}
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
+{
+ kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free_rcu(struct rcu_head *head)
{
+ struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
kmem_cache_free(fib6_node_kmem, fn);
}
+static void node_free(struct fib6_node *fn)
+{
+ call_rcu(&fn->rcu, node_free_rcu);
+}
+
static void rt6_rcu_free(struct rt6_info *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
@@ -191,6 +203,12 @@ static void rt6_release(struct rt6_info *rt)
}
}
+static void fib6_free_table(struct fib6_table *table)
+{
+ inetpeer_invalidate_tree(&table->tb6_peers);
+ kfree(table);
+}
+
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
@@ -588,9 +606,9 @@ insert_above:
if (!in || !ln) {
if (in)
- node_free(in);
+ node_free_immediate(in);
if (ln)
- node_free(ln);
+ node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
@@ -857,7 +875,7 @@ add:
rt->dst.rt6_next = iter;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
@@ -882,7 +900,7 @@ add:
return err;
*ins = rt;
- rt->rt6i_node = fn;
+ rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
@@ -1015,7 +1033,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
root, and then (in failure) stale node
in main tree.
*/
- node_free(sfn);
+ node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
@@ -1442,8 +1460,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
+ struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+ lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
- struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
@@ -1632,7 +1651,9 @@ static int fib6_clean_node(struct fib6_walker *w)
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
- __func__, rt, rt->rt6i_node, res);
+ __func__, rt,
+ rcu_access_pointer(rt->rt6i_node),
+ res);
#endif
continue;
}
@@ -1870,15 +1891,22 @@ out_timer:
static void fib6_net_exit(struct net *net)
{
+ unsigned int i;
+
rt6_ifdown(net, NULL);
del_timer_sync(&net->ipv6.ip6_fib_timer);
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
- kfree(net->ipv6.fib6_local_tbl);
-#endif
- inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
- kfree(net->ipv6.fib6_main_tbl);
+ for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
+ struct hlist_head *head = &net->ipv6.fib_table_hash[i];
+ struct hlist_node *tmp;
+ struct fib6_table *tb;
+
+ hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
+ hlist_del(&tb->tb6_hlist);
+ fib6_free_table(tb);
+ }
+ }
+
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index bab4441ed4e4..eb2dc39f7066 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -649,18 +649,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
- int res;
-
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->nf_frag.frags);
- if (res)
- return res;
- res = nf_ct_frag6_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->nf_frag.frags);
- return res;
+ inet_frags_init_net(&net->nf_frag.frags);
+
+ return nf_ct_frag6_sysctl_register(net);
}
static void nf_ct_net_exit(struct net *net)
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index f9f02581c4ca..f99a04674419 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
- unsigned int len;
switch (**nexthdr) {
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
- len = ipv6_optlen(exthdr);
- if (len + offset >= IPV6_MAXPLEN)
+ offset += ipv6_optlen(exthdr);
+ if (offset > IPV6_MAXPLEN)
return -EINVAL;
- offset += len;
*nexthdr = &exthdr->nexthdr;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index a234552a7e3d..58f2139ebb5e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -708,19 +708,13 @@ static void ip6_frags_sysctl_unregister(void)
static int __net_init ipv6_frags_init_net(struct net *net)
{
- int res;
-
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
- res = inet_frags_init_net(&net->ipv6.frags);
- if (res)
- return res;
- res = ip6_frags_ns_sysctl_register(net);
- if (res)
- inet_frags_uninit_net(&net->ipv6.frags);
- return res;
+ inet_frags_init_net(&net->ipv6.frags);
+
+ return ip6_frags_ns_sysctl_register(net);
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dd37fe0b6a49..2e249d4a2bf3 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1241,7 +1241,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
- if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+ u32 rt_cookie;
+
+ if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
@@ -1309,8 +1311,14 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
ip6_del_rt(rt);
- } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
- rt->rt6i_node->fn_sernum = -1;
+ } else {
+ struct fib6_node *fn;
+
+ rcu_read_lock();
+ fn = rcu_dereference(rt->rt6i_node);
+ if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+ fn->fn_sernum = -1;
+ rcu_read_unlock();
}
}
}
@@ -1327,7 +1335,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
- (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+ (rt->rt6i_flags & RTF_PCPU ||
+ rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index a8cabc876348..329ae3ccfa35 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1073,6 +1073,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ csum = skb->csum;
skb->ip_summed = CHECKSUM_NONE;
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index b0e0555e79ad..d44469cd5754 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1714,8 +1714,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
-
- if (sk == NULL) {
+ if (!sk) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
@@ -1733,7 +1732,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
sock_uid = sk->sk_uid;
if (do_tag_stat)
- account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+ account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid),
+ par);
/*
* The following two tests fail the match when:
@@ -1745,8 +1745,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
- if ((uid_gte(sk->sk_uid, uid_min) &&
- uid_lte(sk->sk_uid, uid_max)) ^
+ if ((uid_gte(sock_uid, uid_min) &&
+ uid_lte(sock_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
@@ -1760,16 +1760,18 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
set_sk_callback_lock = true;
read_lock_bh(&sk->sk_callback_lock);
MT_DEBUG("qtaguid[%d]: sk=%pK->sk_socket=%pK->file=%pK\n",
- par->hooknum, sk, sk->sk_socket,
- sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
filp = sk->sk_socket ? sk->sk_socket->file : NULL;
if (!filp) {
- res = ((info->match ^ info->invert) & XT_QTAGUID_GID) == 0;
+ res = ((info->match ^ info->invert) &
+ XT_QTAGUID_GID) == 0;
atomic64_inc(&qtu_events.match_no_sk_gid);
goto put_sock_ret_res;
}
MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
- par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+ par->hooknum, filp ?
+ from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 8a2a489b2cd3..ede54061c554 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -237,7 +237,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
sock_gen_put(sk);
@@ -419,7 +419,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 0727a6e9f780..86005410a22f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -224,17 +224,16 @@ country BY: DFS-ETSI
(5490 - 5710 @ 160), (30), DFS
country BZ:
- (2402 - 2482 @ 40), (36)
- (5170 - 5330 @ 160), (27)
- (5490 - 5730 @ 160), (36)
- (5735 - 5835 @ 80), (36)
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5730 @ 160), (30)
+ (5735 - 5835 @ 80), (30)
country CA: DFS-FCC
(2402 - 2472 @ 40), (30)
(5170 - 5250 @ 80), (24), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
- (5490 - 5590 @ 80), (24), DFS
- (5650 - 5730 @ 80), (24), DFS
+ (5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-3
(57240 - 63720 @ 2160), (40)
@@ -683,7 +682,13 @@ country IL: DFS-ETSI
country IN:
(2402 - 2482 @ 40), (20)
(5170 - 5330 @ 160), (23)
- (5735 - 5835 @ 80), (30)
+ (5735 - 5835 @ 80), (33)
+
+country IQ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
country IS: DFS-ETSI
(2402 - 2482 @ 40), (20)
@@ -737,7 +742,6 @@ country JO:
country JP: DFS-JP
(2402 - 2482 @ 40), (20)
- (2474 - 2494 @ 20), (20), NO-OFDM
(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
(5490 - 5710 @ 160), (20), DFS
@@ -759,7 +763,7 @@ country KH: DFS-ETSI
country KN: DFS-FCC
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
- (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5250 - 5330 @ 80), (30), DFS, AUTO-BW
(5490 - 5710 @ 160), (30), DFS
(5735 - 5815 @ 80), (30)
@@ -1010,7 +1014,7 @@ country MY: DFS-FCC
(5170 - 5250 @ 80), (24), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5650 @ 160), (24), DFS
- (5735 - 5815 @ 80), (24)
+ (5735 - 5835 @ 80), (24)
# 60 gHz band channels 1-3
(57240 - 63720 @ 2160), (40)
@@ -1090,7 +1094,7 @@ country OM: DFS-ETSI
(5490 - 5710 @ 160), (30), DFS
country PA:
- (2402 - 2472 @ 40), (30)
+ (2402 - 2472 @ 40), (36)
(5170 - 5250 @ 80), (23), AUT0-BW
(5250 - 5330 @ 80), (30), AUTO-BW
(5735 - 5835 @ 80), (36)
@@ -1375,9 +1379,9 @@ country TR: DFS-ETSI
country TT:
(2402 - 2482 @ 40), (20)
- (5170 - 5330 @ 160), (27)
- (5490 - 5730 @ 160), (36)
- (5735 - 5835 @ 80), (36)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-3, FCC
(57240 - 63720 @ 2160), (40)
@@ -1451,7 +1455,7 @@ country UY: DFS-FCC
country UZ: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
- (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
country VC: DFS-ETSI
(2402 - 2482 @ 40), (20)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c8700399d7fd..e8994c345c53 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3272,9 +3272,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
diff --git a/sound/core/info.c b/sound/core/info.c
index 79dee33b5035..a04016c19f6d 100644
--- a/sound/core/info.c
+++ b/sound/core/info.c
@@ -754,8 +754,11 @@ snd_info_create_entry(const char *name, struct snd_info_entry *parent)
INIT_LIST_HEAD(&entry->children);
INIT_LIST_HEAD(&entry->list);
entry->parent = parent;
- if (parent)
+ if (parent) {
+ mutex_lock(&parent->access);
list_add_tail(&entry->list, &parent->children);
+ mutex_unlock(&parent->access);
+ }
return entry;
}
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index ffc67fd80c23..58e59cd3c95c 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
- readw(mpu->dev->MIDQ + JQS_wHead)) {
- u16 wTmp, val;
- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
-
- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
- &mpu->mode))
- snd_rawmidi_receive(mpu->substream_input,
- (unsigned char *)&val, 1);
-
- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
- writew(0, mpu->dev->MIDQ + JQS_wHead);
- else
- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
+ head = readw(mpu->dev->MIDQ + JQS_wHead);
+ tail = readw(mpu->dev->MIDQ + JQS_wTail);
+ size = readw(mpu->dev->MIDQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ unsigned char val = readw(pwMIDQData + 2 * head);
+
+ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+ snd_rawmidi_receive(mpu->substream_input, &val, 1);
+ if (++head > size)
+ head = 0;
+ writew(head, mpu->dev->MIDQ + JQS_wHead);
}
+ out:
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
EXPORT_SYMBOL(snd_msndmidi_input_read);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 4c072666115d..a31ea6c22d19 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
{
struct snd_msnd *chip = dev_id;
void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ u16 head, tail, size;
/* Send ack to DSP */
/* inb(chip->io + HP_RXL); */
/* Evaluate queued DSP messages */
- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
- u16 wTmp;
-
- snd_msnd_eval_dsp_msg(chip,
- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
- if (wTmp > readw(chip->DSPQ + JQS_wSize))
- writew(0, chip->DSPQ + JQS_wHead);
- else
- writew(wTmp, chip->DSPQ + JQS_wHead);
+ head = readw(chip->DSPQ + JQS_wHead);
+ tail = readw(chip->DSPQ + JQS_wTail);
+ size = readw(chip->DSPQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+ if (++head > size)
+ head = 0;
+ writew(head, chip->DSPQ + JQS_wHead);
}
+ out:
/* Send ack to DSP */
inb(chip->io + HP_RXL);
return IRQ_HANDLED;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 74177189063c..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
- sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index a01c781acdf1..55eef61a01de 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -49,10 +49,10 @@
#define BUS_DOWN 1
/*
- * 50 Milliseconds sufficient for DSP bring up in the lpass
+ * 200 Milliseconds sufficient for DSP bring up in the lpass
* after Sub System Restart
*/
-#define ADSP_STATE_READY_TIMEOUT_MS 50
+#define ADSP_STATE_READY_TIMEOUT_MS 200
#define EAR_PMD 0
#define EAR_PMU 1
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 25c318c6c4e1..5f9dc9c0c392 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -215,6 +215,7 @@ static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
{
struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
int comp_ch_bits_set = 0x03;
+ int comp_ch_value;
dev_dbg(codec->dev, "%s: event %d shift %d, enabled %d\n",
__func__, event, interp_n,
@@ -234,15 +235,40 @@ static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
dig_cdc->set_compander_mode(dig_cdc->handle, 0x00);
return 0;
};
+ comp_ch_value = snd_soc_read(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL);
+ if (interp_n == 0) {
+ if ((comp_ch_value & 0x02) == 0x02) {
+ dev_dbg(codec->dev,
+ "%s comp ch already enabled\n",
+ __func__);
+ return 0;
+ }
+ }
+ if (interp_n == 1) {
+ if ((comp_ch_value & 0x01) == 0x01) {
+ dev_dbg(codec->dev,
+ "%s comp ch already enabled\n",
+ __func__);
+ return 0;
+ }
+ }
dig_cdc->set_compander_mode(dig_cdc->handle, 0x08);
/* Enable Compander Clock */
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
- snd_soc_update_bits(codec,
- MSM89XX_CDC_CORE_COMP0_B1_CTL,
- 1 << interp_n, 1 << interp_n);
+ if (dig_cdc->comp_enabled[MSM89XX_RX1]) {
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 0x02, 0x02);
+ }
+ if (dig_cdc->comp_enabled[MSM89XX_RX2]) {
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 0x01, 0x01);
+ }
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
snd_soc_update_bits(codec,
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 1613c5baa9c7..f995bf22c1c3 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -25,7 +25,8 @@
static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
/* Component related macros */
-#define WDSP_GET_COMPONENT(wdsp, x) (&(wdsp->cmpnts[x]))
+#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \
+ NULL : (&(wdsp->cmpnts[x])))
#define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x)
/*
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 10883b0939d6..2bc911e63e12 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -4073,6 +4073,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
set_bit(HPH_PA_DELAY, &tasha->status_mask);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if (!(strcmp(w->name, "ANC HPHR PA"))) {
@@ -4127,6 +4129,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4166,6 +4170,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tasha->status_mask))) {
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tasha->status_mask);
break;
case SND_SOC_DAPM_POST_PMU:
@@ -4222,6 +4228,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4544,6 +4552,10 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x10, 0x10);
+ }
if (tasha->anc_func) {
ret = tasha_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -4582,6 +4594,8 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x30, 0x00);
if ((hph_mode == CLS_H_LP) &&
(TASHA_IS_1_1(wcd9xxx))) {
snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
@@ -11094,12 +11108,12 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
0, 0, tasha_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD9335_ANA_HPH,
- 5, 0, tasha_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD9335_ANA_HPH,
- 4, 0, tasha_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -11114,11 +11128,11 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM,
0, 0, tasha_codec_lineout_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD9335_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD9335_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 26320fd01a5a..bfe471e73503 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -2014,6 +2014,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
0x02, 0x02);
}
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if ((!(strcmp(w->name, "ANC HPHR PA")))) {
@@ -2112,6 +2114,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
0x10, 0x10);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2161,6 +2165,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tavil->status_mask)))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0xC0, 0xC0);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2266,6 +2272,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2418,6 +2426,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x10, 0x10);
+ }
if (tavil->anc_func) {
ret = tavil_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -2458,6 +2470,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX2_RX_PATH_CFG0,
0x10, 0x10);
break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x30, 0x00);
+ break;
case SND_SOC_DAPM_POST_PMD:
/* 1000us required as per HW requirement */
usleep_range(1000, 1100);
@@ -7365,12 +7381,12 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
0, 0, tavil_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD934X_ANA_HPH,
- 5, 0, tavil_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD934X_ANA_HPH,
- 4, 0, tavil_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -7383,11 +7399,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_PGA_E("EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
tavil_codec_enable_ear_pa,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD934X_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD934X_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 629a9c3d91db..18a5d924e282 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -22,6 +22,17 @@ config SND_SOC_QDSP6V2
audio drivers. This includes q6asm, q6adm,
q6afe interfaces to DSP using apr.
+config SND_SOC_QDSP6V2_VM
+ tristate "SoC ALSA audio driver for QDSP6V2 virtualization"
+ depends on MSM_QDSP6_APRV2_VM
+ select SND_SOC_COMPRESS
+ help
+ To add support for MSM QDSP6V2 virtualization
+ Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using virtualized apr.
+
config SND_SOC_QDSP_DEBUG
bool "QDSP Audio Driver Debug Feature"
help
@@ -70,7 +81,7 @@ config DTS_SRS_TM
config QTI_PP
bool "Enable QTI PP"
- depends on SND_SOC_MSM_QDSP6V2_INTF
+ depends on SND_SOC_MSM_QDSP6V2_INTF || SND_SOC_QDSP6V2_VM
help
To add support for default QTI post processing.
This support is to configure the post processing
@@ -192,6 +203,22 @@ config SND_SOC_MSM8996
the machine driver and the corresponding
DAI-links
+config SND_SOC_MSM8996_VM
+ tristate "SoC Machine driver for MSM8996 virtualization"
+ select SND_SOC_QDSP6V2_VM
+ select SND_SOC_MSM_STUB
+ select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_DYNAMIC_MINORS
+ select MSM_QDSP6_APRV2_VM
+ select QTI_PP
+ help
+ To add support for SoC audio on MSM8996
+ virtualization platform.
+ This will enable sound soc drivers which
+ interfaces with DSP using virtualized apr,
+ also it will enable the machine driver and
+ the corresponding DAI-links
+
config SND_SOC_MSM8998
tristate "SoC Machine driver for MSM8998 boards"
depends on ARCH_QCOM
diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile
index 63c4e61430c4..4d2a360ad1aa 100644
--- a/sound/soc/msm/Makefile
+++ b/sound/soc/msm/Makefile
@@ -4,9 +4,11 @@ snd-soc-hostless-pcm-objs := msm-pcm-hostless.o
obj-$(CONFIG_SND_SOC_MSM_HOSTLESS_PCM) += snd-soc-hostless-pcm.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += qdsp6v2/
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += qdsp6v2/
snd-soc-qdsp6v2-objs := msm-dai-fe.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += snd-soc-qdsp6v2.o
#for CPE drivers
snd-soc-cpe-objs := msm-cpe-lsm.o
@@ -16,6 +18,10 @@ obj-$(CONFIG_SND_SOC_CPE) += snd-soc-cpe.o
snd-soc-msm8996-objs := msm8996.o apq8096-auto.o
obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o
+# for MSM8996 virtualization sound card driver
+snd-soc-msm8996-vm-objs := apq8096-auto.o
+obj-$(CONFIG_SND_SOC_MSM8996_VM) += snd-soc-msm8996-vm.o
+
# for MSM8998 sound card driver
snd-soc-msm8998-objs := msm8998.o
obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o
diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c
index b1dff8764618..f1af708f9a50 100644
--- a/sound/soc/msm/apq8096-auto.c
+++ b/sound/soc/msm/apq8096-auto.c
@@ -61,11 +61,26 @@ static int msm_quat_mi2s_rx_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_mi2s_rate = SAMPLING_RATE_48KHZ;
/* TDM default channels */
+static int msm_pri_tdm_tx_0_ch = 2;
+static int msm_pri_tdm_tx_1_ch = 2;
+static int msm_pri_tdm_tx_2_ch = 2;
+static int msm_pri_tdm_tx_3_ch = 2;
+
+static int msm_pri_tdm_rx_0_ch = 2;
+static int msm_pri_tdm_rx_1_ch = 2;
+static int msm_pri_tdm_rx_2_ch = 2;
+static int msm_pri_tdm_rx_3_ch = 2;
+
static int msm_sec_tdm_tx_0_ch = 2; /* STEREO MIC */
static int msm_sec_tdm_tx_1_ch = 2;
static int msm_sec_tdm_tx_2_ch = 2;
static int msm_sec_tdm_tx_3_ch = 2;
+static int msm_sec_tdm_rx_0_ch = 6;
+static int msm_sec_tdm_rx_1_ch = 1;
+static int msm_sec_tdm_rx_2_ch = 1;
+static int msm_sec_tdm_rx_3_ch;
+
static int msm_tert_tdm_rx_0_ch = 2; /* ICC STREAM */
static int msm_tert_tdm_rx_1_ch = 2;
static int msm_tert_tdm_rx_2_ch = 2;
@@ -88,11 +103,26 @@ static int msm_quat_tdm_tx_2_ch = 2; /*ENT RECORD*/
static int msm_quat_tdm_tx_3_ch;
/* TDM default bit format */
+static int msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
+static int msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
static int msm_sec_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_sec_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+
static int msm_tert_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_tert_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
@@ -114,6 +144,10 @@ static int msm_quat_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_quat_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
static int msm_quat_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_pri_tdm_rate = SAMPLING_RATE_48KHZ;
+static int msm_pri_tdm_slot_width = 32;
+static int msm_pri_tdm_slot_num = 8;
+
/* EC Reference default values are set in mixer_paths.xml */
static int msm_ec_ref_ch = 4;
static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
@@ -174,11 +208,26 @@ enum {
SECONDARY_TDM_TX_5,
SECONDARY_TDM_TX_6,
SECONDARY_TDM_TX_7,
+ PRIMARY_TDM_RX_0,
+ PRIMARY_TDM_RX_1,
+ PRIMARY_TDM_RX_2,
+ PRIMARY_TDM_RX_3,
+ PRIMARY_TDM_RX_4,
+ PRIMARY_TDM_RX_5,
+ PRIMARY_TDM_RX_6,
+ PRIMARY_TDM_RX_7,
+ PRIMARY_TDM_TX_0,
+ PRIMARY_TDM_TX_1,
+ PRIMARY_TDM_TX_2,
+ PRIMARY_TDM_TX_3,
+ PRIMARY_TDM_TX_4,
+ PRIMARY_TDM_TX_5,
+ PRIMARY_TDM_TX_6,
+ PRIMARY_TDM_TX_7,
TDM_MAX,
};
#define TDM_SLOT_OFFSET_MAX 8
-
/* TDM default offset */
static unsigned int tdm_slot_offset[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
/* QUAT_TDM_RX */
@@ -218,15 +267,33 @@ static unsigned int tdm_slot_offset[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
/* SEC_TDM_RX */
+ {0, 4, 8, 12, 16, 20, 0xFFFF},
+ {24, 0xFFFF},
+ {28, 0xFFFF},
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
+ /* SEC_TDM_TX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
- /* SEC_TDM_TX */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_RX */
+ {0, 4, 0xFFFF},
+ {8, 12, 0xFFFF},
+ {16, 20, 0xFFFF},
+ {24, 28, 0xFFFF},
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_TX */
{0, 4, 0xFFFF},
{8, 12, 0xFFFF},
{16, 20, 0xFFFF},
@@ -300,6 +367,24 @@ static unsigned int tdm_slot_offset_adp_mmxf[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
+ /* PRI_TDM_RX */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_TX */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
};
static unsigned int tdm_slot_offset_custom[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
@@ -357,6 +442,24 @@ static unsigned int tdm_slot_offset_custom[TDM_MAX][TDM_SLOT_OFFSET_MAX] = {
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
{0xFFFF}, /* not used */
+ /* PRI_TDM_RX */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ /* PRI_TDM_TX */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
+ {0xFFFF}, /* not used */
};
static char const *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
@@ -389,6 +492,14 @@ static const char *const ec_ref_rate_text[] = {"0", "8000", "16000",
static const char *const mi2s_rate_text[] = {"32000", "44100", "48000"};
+static const char *const pri_tdm_rate_text[] = {"8000", "16000", "48000"};
+
+static const char *const pri_tdm_slot_num_text[] = {"One", "Two", "Four",
+ "Eight", "Sixteen", "Thirtytwo"};
+
+
+static const char *const pri_tdm_slot_width_text[] = {"16", "24", "32"};
+
static struct afe_clk_set sec_mi2s_tx_clk = {
AFE_API_VERSION_I2S_CONFIG,
Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
@@ -698,6 +809,150 @@ static int msm_sec_mi2s_tx_bit_format_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_pri_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__,
+ msm_pri_tdm_tx_0_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_0_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_0_ch = %d\n", __func__,
+ msm_pri_tdm_tx_0_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: pri_tdm_tx_1_ch = %d\n", __func__,
+ msm_pri_tdm_tx_1_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_1_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_1_ch = %d\n", __func__,
+ msm_pri_tdm_tx_1_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__,
+ msm_pri_tdm_tx_2_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_2_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_2_ch = %d\n", __func__,
+ msm_pri_tdm_tx_2_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__,
+ msm_pri_tdm_tx_3_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_tx_3_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_tx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_tx_3_ch = %d\n", __func__,
+ msm_pri_tdm_tx_3_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__,
+ msm_pri_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_0_ch = %d\n", __func__,
+ msm_pri_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__,
+ msm_pri_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_1_ch = %d\n", __func__,
+ msm_pri_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__,
+ msm_pri_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_2_ch = %d\n", __func__,
+ msm_pri_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__,
+ msm_pri_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_pri_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_pri_tdm_rx_3_ch = %d\n", __func__,
+ msm_pri_tdm_rx_3_ch);
+ return 0;
+}
+
static int msm_sec_mi2s_rate_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -728,6 +983,246 @@ static int msm_sec_mi2s_rate_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_pri_tdm_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_pri_tdm_rate;
+ pr_debug("%s: msm_pri_tdm_rate = %d\n", __func__, msm_pri_tdm_rate);
+ return 0;
+}
+
+static int msm_pri_tdm_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_rate = SAMPLING_RATE_8KHZ;
+ break;
+ case 1:
+ msm_pri_tdm_rate = SAMPLING_RATE_16KHZ;
+ break;
+ case 2:
+ msm_pri_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ default:
+ msm_pri_tdm_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rate = %d\n",
+ __func__, msm_pri_tdm_rate);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_width_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_pri_tdm_slot_width;
+ pr_debug("%s: msm_pri_tdm_slot_width = %d\n",
+ __func__, msm_pri_tdm_slot_width);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_width_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_slot_width = 16;
+ break;
+ case 1:
+ msm_pri_tdm_slot_width = 24;
+ break;
+ case 2:
+ msm_pri_tdm_slot_width = 32;
+ break;
+ default:
+ msm_pri_tdm_slot_width = 32;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_slot_width= %d\n",
+ __func__, msm_pri_tdm_slot_width);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_num_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_slot_num) {
+ case 1:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ case 2:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case 4:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case 8:
+ ucontrol->value.integer.value[0] = 3;
+ break;
+ case 16:
+ ucontrol->value.integer.value[0] = 4;
+ break;
+ case 32:
+ default:
+ ucontrol->value.integer.value[0] = 5;
+ break;
+ }
+
+ pr_debug("%s: msm_pri_tdm_slot_num = %d\n",
+ __func__, msm_pri_tdm_slot_num);
+ return 0;
+}
+
+static int msm_pri_tdm_slot_num_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_pri_tdm_slot_num = 1;
+ break;
+ case 1:
+ msm_pri_tdm_slot_num = 2;
+ break;
+ case 2:
+ msm_pri_tdm_slot_num = 4;
+ break;
+ case 3:
+ msm_pri_tdm_slot_num = 8;
+ break;
+ case 4:
+ msm_pri_tdm_slot_num = 16;
+ break;
+ case 5:
+ msm_pri_tdm_slot_num = 32;
+ break;
+ default:
+ msm_pri_tdm_slot_num = 8;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_slot_num = %d\n",
+ __func__, msm_pri_tdm_slot_num);
+ return 0;
+}
+
+static int msm_tdm_slot_mapping_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_multi_mixer_control *mc =
+ (struct soc_multi_mixer_control *)kcontrol->private_value;
+ unsigned int *slot_offset;
+ int i;
+
+ if (mc->shift >= TDM_MAX) {
+ pr_err("%s invalid port index %d\n", __func__, mc->shift);
+ return -EINVAL;
+ }
+
+ slot_offset = tdm_slot_offset[mc->shift];
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ ucontrol->value.integer.value[i] = slot_offset[i];
+ pr_debug("%s port index %d offset %d value %d\n",
+ __func__, mc->shift, i, slot_offset[i]);
+ }
+
+ return 0;
+}
+
+static int msm_tdm_slot_mapping_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_multi_mixer_control *mc =
+ (struct soc_multi_mixer_control *)kcontrol->private_value;
+ unsigned int *slot_offset;
+ int i;
+
+ if (mc->shift >= TDM_MAX) {
+ pr_err("%s invalid port index %d\n", __func__, mc->shift);
+ return -EINVAL;
+ }
+
+ slot_offset = tdm_slot_offset[mc->shift];
+
+ for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) {
+ slot_offset[i] = ucontrol->value.integer.value[i];
+ pr_debug("%s port index %d offset %d value %d\n",
+ __func__, mc->shift, i, slot_offset[i]);
+ }
+
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_0_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_0_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_0_ch = %d\n", __func__,
+ msm_sec_tdm_rx_0_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_1_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_1_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_1_ch = %d\n", __func__,
+ msm_sec_tdm_rx_1_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_2_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_2_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_2_ch = %d\n", __func__,
+ msm_sec_tdm_rx_2_ch);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ ucontrol->value.integer.value[0] = msm_sec_tdm_rx_3_ch - 1;
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_sec_tdm_rx_3_ch = ucontrol->value.integer.value[0] + 1;
+ pr_debug("%s: msm_sec_tdm_rx_3_ch = %d\n", __func__,
+ msm_sec_tdm_rx_3_ch);
+ return 0;
+}
static int msm_sec_tdm_tx_0_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -1107,6 +1602,414 @@ static int msm_quat_tdm_tx_3_ch_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_pri_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_0_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_0_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_1_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_1_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_2_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_2_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_tx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_tx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_tx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_tx_3_bit_format = %d\n",
+ __func__, msm_pri_tdm_tx_3_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_pri_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_pri_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_pri_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_pri_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_pri_tdm_rx_3_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_0_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_0_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_0_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_0_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_0_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_1_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_1_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_1_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_1_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_1_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_2_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_2_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_2_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_2_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_2_bit_format);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_sec_tdm_rx_3_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_sec_tdm_rx_3_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ msm_sec_tdm_rx_3_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: msm_sec_tdm_rx_3_bit_format = %d\n",
+ __func__, msm_sec_tdm_rx_3_bit_format);
+ return 0;
+}
+
static int msm_sec_tdm_tx_0_bit_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -2070,7 +2973,77 @@ static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
+ rate->min = rate->max = SAMPLING_RATE_48KHZ;
+
switch (cpu_dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ channels->min = channels->max = msm_pri_tdm_tx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_0_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+ channels->min = channels->max = msm_pri_tdm_tx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_1_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+ channels->min = channels->max = msm_pri_tdm_tx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_2_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+ channels->min = channels->max = msm_pri_tdm_tx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_tx_3_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ channels->min = channels->max = msm_pri_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_0_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ channels->min = channels->max = msm_pri_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_1_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ channels->min = channels->max = msm_pri_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_2_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ channels->min = channels->max = msm_pri_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_pri_tdm_rx_3_bit_format);
+ rate->min = rate->max = msm_pri_tdm_rate;
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX:
+ channels->min = channels->max = msm_sec_tdm_rx_0_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_0_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_1:
+ channels->min = channels->max = msm_sec_tdm_rx_1_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_1_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_2:
+ channels->min = channels->max = msm_sec_tdm_rx_2_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_2_bit_format);
+ break;
+ case AFE_PORT_ID_SECONDARY_TDM_RX_3:
+ channels->min = channels->max = msm_sec_tdm_rx_3_ch;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ msm_sec_tdm_rx_3_bit_format);
+ break;
case AFE_PORT_ID_SECONDARY_TDM_TX:
channels->min = channels->max = msm_sec_tdm_tx_0_ch;
param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
@@ -2181,7 +3154,6 @@ static int msm_tdm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
__func__, cpu_dai->id);
return -EINVAL;
}
- rate->min = rate->max = SAMPLING_RATE_48KHZ;
pr_debug("%s: dai id = 0x%x channels = %d rate = %d format = 0x%x\n",
__func__, cpu_dai->id, channels->max, rate->max,
@@ -2300,99 +3272,18 @@ static struct snd_soc_ops apq8096_mi2s_be_ops = {
.shutdown = apq8096_mi2s_snd_shutdown,
};
-static unsigned int tdm_param_set_slot_mask(u16 port_id,
- int slot_width, int slots)
+static unsigned int tdm_param_set_slot_mask(int slots)
{
unsigned int slot_mask = 0;
- int upper, lower, i, j;
- unsigned int *slot_offset;
-
- switch (port_id) {
- case AFE_PORT_ID_SECONDARY_TDM_RX:
- case AFE_PORT_ID_SECONDARY_TDM_RX_1:
- case AFE_PORT_ID_SECONDARY_TDM_RX_2:
- case AFE_PORT_ID_SECONDARY_TDM_RX_3:
- case AFE_PORT_ID_SECONDARY_TDM_RX_4:
- case AFE_PORT_ID_SECONDARY_TDM_RX_5:
- case AFE_PORT_ID_SECONDARY_TDM_RX_6:
- case AFE_PORT_ID_SECONDARY_TDM_RX_7:
- lower = SECONDARY_TDM_RX_0;
- upper = SECONDARY_TDM_RX_7;
- break;
- case AFE_PORT_ID_SECONDARY_TDM_TX:
- case AFE_PORT_ID_SECONDARY_TDM_TX_1:
- case AFE_PORT_ID_SECONDARY_TDM_TX_2:
- case AFE_PORT_ID_SECONDARY_TDM_TX_3:
- case AFE_PORT_ID_SECONDARY_TDM_TX_4:
- case AFE_PORT_ID_SECONDARY_TDM_TX_5:
- case AFE_PORT_ID_SECONDARY_TDM_TX_6:
- case AFE_PORT_ID_SECONDARY_TDM_TX_7:
- lower = SECONDARY_TDM_TX_0;
- upper = SECONDARY_TDM_TX_7;
- break;
- case AFE_PORT_ID_TERTIARY_TDM_RX:
- case AFE_PORT_ID_TERTIARY_TDM_RX_1:
- case AFE_PORT_ID_TERTIARY_TDM_RX_2:
- case AFE_PORT_ID_TERTIARY_TDM_RX_3:
- case AFE_PORT_ID_TERTIARY_TDM_RX_4:
- case AFE_PORT_ID_TERTIARY_TDM_RX_5:
- case AFE_PORT_ID_TERTIARY_TDM_RX_6:
- case AFE_PORT_ID_TERTIARY_TDM_RX_7:
- lower = TERTIARY_TDM_RX_0;
- upper = TERTIARY_TDM_RX_7;
- break;
- case AFE_PORT_ID_TERTIARY_TDM_TX:
- case AFE_PORT_ID_TERTIARY_TDM_TX_1:
- case AFE_PORT_ID_TERTIARY_TDM_TX_2:
- case AFE_PORT_ID_TERTIARY_TDM_TX_3:
- case AFE_PORT_ID_TERTIARY_TDM_TX_4:
- case AFE_PORT_ID_TERTIARY_TDM_TX_5:
- case AFE_PORT_ID_TERTIARY_TDM_TX_6:
- case AFE_PORT_ID_TERTIARY_TDM_TX_7:
- lower = TERTIARY_TDM_TX_0;
- upper = TERTIARY_TDM_TX_7;
- break;
- case AFE_PORT_ID_QUATERNARY_TDM_RX:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_1:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_2:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_3:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_4:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_5:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_6:
- case AFE_PORT_ID_QUATERNARY_TDM_RX_7:
- lower = QUATERNARY_TDM_RX_0;
- upper = QUATERNARY_TDM_RX_7;
- break;
- case AFE_PORT_ID_QUATERNARY_TDM_TX:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_1:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_2:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_3:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_4:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_5:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_6:
- case AFE_PORT_ID_QUATERNARY_TDM_TX_7:
- lower = QUATERNARY_TDM_TX_0;
- upper = QUATERNARY_TDM_TX_7;
- break;
- default:
- return slot_mask;
- }
+ unsigned int i = 0;
- for (i = lower; i <= upper; i++) {
- slot_offset = tdm_slot_offset[i];
- for (j = 0; j < TDM_SLOT_OFFSET_MAX; j++) {
- if (slot_offset[j] != AFE_SLOT_MAPPING_OFFSET_INVALID)
- /*
- * set the mask of active slot according to
- * the offset table for the group of devices
- */
- slot_mask |=
- (1 << ((slot_offset[j] * 8) / slot_width));
- else
- break;
- }
+ if ((slots != 16) && (slots != 8)) {
+ pr_err("%s: invalid slot number %d\n", __func__, slots);
+ return -EINVAL;
}
+ for (i = 0; i < slots ; i++)
+ slot_mask |= 1 << i;
return slot_mask;
}
@@ -2402,14 +3293,16 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
- int channels, slot_width, slots;
+ int channels, slot_width, slots, rate;
unsigned int slot_mask;
unsigned int *slot_offset;
int offset_channels = 0;
int i;
+ int clk_freq;
pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id);
+ rate = params_rate(params);
channels = params_channels(params);
if (channels < 1 || channels > 8) {
pr_err("%s: invalid param channels %d\n",
@@ -2435,15 +3328,88 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream,
}
slots = msm_tdm_num_slots;
- slot_mask = tdm_param_set_slot_mask(cpu_dai->id,
- slot_width, slots);
- if (!slot_mask) {
- pr_err("%s: invalid slot_mask 0x%x\n",
- __func__, slot_mask);
- return -EINVAL;
- }
switch (cpu_dai->id) {
+ case AFE_PORT_ID_PRIMARY_TDM_RX:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_0];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_1:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_1];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_2:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_2];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_3:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_3];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_4:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_4];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_5:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_5];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_6:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_6];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_RX_7:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_RX_7];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_0];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_1:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_1];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_2:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_2];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_3:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_3];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_4:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_4];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_5:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_5];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_6:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_6];
+ break;
+ case AFE_PORT_ID_PRIMARY_TDM_TX_7:
+ slots = msm_pri_tdm_slot_num;
+ slot_width = msm_pri_tdm_slot_width;
+ slot_offset = tdm_slot_offset[PRIMARY_TDM_TX_7];
+ break;
case AFE_PORT_ID_SECONDARY_TDM_RX:
slot_offset = tdm_slot_offset[SECONDARY_TDM_RX_0];
break;
@@ -2613,6 +3579,13 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ slot_mask = tdm_param_set_slot_mask(slots);
+ if (!slot_mask) {
+ pr_err("%s: invalid slot_mask 0x%x\n",
+ __func__, slot_mask);
+ return -EINVAL;
+ }
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask,
slots, slot_width);
@@ -2647,6 +3620,13 @@ static int apq8096_tdm_snd_hw_params(struct snd_pcm_substream *substream,
}
}
+ clk_freq = rate * slot_width * slots;
+ ret = snd_soc_dai_set_sysclk(cpu_dai, 0, clk_freq, SND_SOC_CLOCK_OUT);
+ if (ret < 0) {
+ pr_err("%s: failed to set tdm clk, err:%d\n",
+ __func__, ret);
+ }
+
end:
return ret;
}
@@ -2668,6 +3648,9 @@ static const struct soc_enum msm_snd_enum[] = {
SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text),
SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text),
SOC_ENUM_SINGLE_EXT(3, mi2s_rate_text),
+ SOC_ENUM_SINGLE_EXT(3, pri_tdm_rate_text),
+ SOC_ENUM_SINGLE_EXT(6, pri_tdm_slot_num_text),
+ SOC_ENUM_SINGLE_EXT(3, pri_tdm_slot_width_text),
};
static const struct snd_kcontrol_new msm_snd_controls[] = {
@@ -2681,6 +3664,30 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_proxy_rx_ch_get, msm_proxy_rx_ch_put),
SOC_ENUM_EXT("HDMI_RX SampleRate", msm_snd_enum[4],
hdmi_rx_sample_rate_get, hdmi_rx_sample_rate_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Channels", msm_snd_enum[5],
+ msm_pri_tdm_tx_0_ch_get, msm_pri_tdm_tx_0_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_1 Channels", msm_snd_enum[5],
+ msm_pri_tdm_tx_1_ch_get, msm_pri_tdm_tx_1_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_2 Channels", msm_snd_enum[5],
+ msm_pri_tdm_tx_2_ch_get, msm_pri_tdm_tx_2_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_3 Channels", msm_snd_enum[5],
+ msm_pri_tdm_tx_3_ch_get, msm_pri_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Channels", msm_snd_enum[5],
+ msm_pri_tdm_rx_0_ch_get, msm_pri_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_1 Channels", msm_snd_enum[5],
+ msm_pri_tdm_rx_1_ch_get, msm_pri_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_2 Channels", msm_snd_enum[5],
+ msm_pri_tdm_rx_2_ch_get, msm_pri_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_3 Channels", msm_snd_enum[5],
+ msm_pri_tdm_rx_3_ch_get, msm_pri_tdm_rx_3_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_0_ch_get, msm_sec_tdm_rx_0_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_1_ch_get, msm_sec_tdm_rx_1_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_2_ch_get, msm_sec_tdm_rx_2_ch_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Channels", msm_snd_enum[5],
+ msm_sec_tdm_rx_3_ch_get, msm_sec_tdm_rx_3_ch_put),
SOC_ENUM_EXT("SEC_TDM_TX_0 Channels", msm_snd_enum[5],
msm_sec_tdm_tx_0_ch_get, msm_sec_tdm_tx_0_ch_put),
SOC_ENUM_EXT("SEC_TDM_TX_1 Channels", msm_snd_enum[5],
@@ -2723,6 +3730,42 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_quat_tdm_tx_2_ch_get, msm_quat_tdm_tx_2_ch_put),
SOC_ENUM_EXT("QUAT_TDM_TX_3 Channels", msm_snd_enum[5],
msm_quat_tdm_tx_3_ch_get, msm_quat_tdm_tx_3_ch_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_0 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_tx_0_bit_format_get,
+ msm_pri_tdm_tx_0_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_1 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_tx_1_bit_format_get,
+ msm_pri_tdm_tx_1_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_2 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_tx_2_bit_format_get,
+ msm_pri_tdm_tx_2_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_TX_3 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_tx_3_bit_format_get,
+ msm_pri_tdm_tx_3_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_0 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_rx_0_bit_format_get,
+ msm_pri_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_1 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_rx_1_bit_format_get,
+ msm_pri_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_2 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_rx_2_bit_format_get,
+ msm_pri_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("PRI_TDM_RX_3 Bit Format", msm_snd_enum[6],
+ msm_pri_tdm_rx_3_bit_format_get,
+ msm_pri_tdm_rx_3_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_0 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_0_bit_format_get,
+ msm_sec_tdm_rx_0_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_1 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_1_bit_format_get,
+ msm_sec_tdm_rx_1_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_2 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_2_bit_format_get,
+ msm_sec_tdm_rx_2_bit_format_put),
+ SOC_ENUM_EXT("SEC_TDM_RX_3 Bit Format", msm_snd_enum[6],
+ msm_sec_tdm_rx_3_bit_format_get,
+ msm_sec_tdm_rx_3_bit_format_put),
SOC_ENUM_EXT("SEC_TDM_TX_0 Bit Format", msm_snd_enum[6],
msm_sec_tdm_tx_0_bit_format_get,
msm_sec_tdm_tx_0_bit_format_put),
@@ -2797,6 +3840,268 @@ static const struct snd_kcontrol_new msm_snd_controls[] = {
msm_sec_mi2s_tx_bit_format_put),
SOC_ENUM_EXT("SEC_MI2S_TX SampleRate", msm_snd_enum[11],
msm_sec_mi2s_rate_get, msm_sec_mi2s_rate_put),
+ SOC_ENUM_EXT("PRI_TDM SampleRate", msm_snd_enum[12],
+ msm_pri_tdm_rate_get, msm_pri_tdm_rate_put),
+ SOC_ENUM_EXT("PRI_TDM Slot Number", msm_snd_enum[13],
+ msm_pri_tdm_slot_num_get, msm_pri_tdm_slot_num_put),
+ SOC_ENUM_EXT("PRI_TDM Slot Width", msm_snd_enum[14],
+ msm_pri_tdm_slot_width_get, msm_pri_tdm_slot_width_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("PRI_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ PRIMARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("SEC_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ SECONDARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("TERT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ TERTIARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_0 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_1 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_2 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_3 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_4 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_5 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_6 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_RX_7 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_RX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_0 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_0, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_1 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_1, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_2 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_2, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_3 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_3, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_4 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_4, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_5 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_5, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_6 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_6, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
+ SOC_SINGLE_MULTI_EXT("QUAT_TDM_TX_7 Slot Mapping", SND_SOC_NOPM,
+ QUATERNARY_TDM_TX_7, 0xFFFF,
+ 0, 8, msm_tdm_slot_mapping_get,
+ msm_tdm_slot_mapping_put),
SOC_ENUM_EXT("EC Reference Channels", msm_snd_enum[8],
msm_ec_ref_ch_get, msm_ec_ref_ch_put),
SOC_ENUM_EXT("EC Reference Bit Format", msm_snd_enum[9],
@@ -3806,6 +5111,126 @@ static struct snd_soc_dai_link apq8096_auto_fe_dai_links[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ {
+ .name = "Primary TDM RX 0 Hostless",
+ .stream_name = "Primary TDM RX 0 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 1 Hostless",
+ .stream_name = "Primary TDM RX 1 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 2 Hostless",
+ .stream_name = "Primary TDM RX 2 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM RX 3 Hostless",
+ .stream_name = "Primary TDM RX 3 Hostless",
+ .cpu_dai_name = "PRI_TDM_RX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 0 Hostless",
+ .stream_name = "Primary TDM TX 0 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_0_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 1 Hostless",
+ .stream_name = "Primary TDM TX 1 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_1_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 2 Hostless",
+ .stream_name = "Primary TDM TX 2 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_2_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ },
+ {
+ .name = "Primary TDM TX 3 Hostless",
+ .stream_name = "Primary TDM TX 3 Hostless",
+ .cpu_dai_name = "PRI_TDM_TX_3_HOSTLESS",
+ .platform_name = "msm-pcm-hostless",
+ .dynamic = 1,
+ .dpcm_capture = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ }
};
static struct snd_soc_dai_link apq8096_custom_fe_dai_links[] = {
@@ -4159,6 +5584,62 @@ static struct snd_soc_dai_link apq8096_auto_be_dai_links[] = {
.ignore_suspend = 1,
},
{
+ .name = LPASS_BE_SEC_TDM_RX_0,
+ .stream_name = "Secondary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36880",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_1,
+ .stream_name = "Secondary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36882",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_2,
+ .stream_name = "Secondary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36884",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_SEC_TDM_RX_3,
+ .stream_name = "Secondary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36886",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
.name = LPASS_BE_SEC_TDM_TX_0,
.stream_name = "Secondary TDM0 Capture",
.cpu_dai_name = "msm-dai-q6-tdm.36881",
@@ -4452,6 +5933,118 @@ static struct snd_soc_dai_link apq8096_auto_be_dai_links[] = {
.ops = &apq8096_tdm_be_ops,
.ignore_suspend = 1,
},
+ {
+ .name = LPASS_BE_PRI_TDM_RX_0,
+ .stream_name = "Primary TDM0 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36864",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_1,
+ .stream_name = "Primary TDM1 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36866",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_2,
+ .stream_name = "Primary TDM2 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36868",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_RX_3,
+ .stream_name = "Primary TDM3 Playback",
+ .cpu_dai_name = "msm-dai-q6-tdm.36870",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_0,
+ .stream_name = "Primary TDM0 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36865",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_1,
+ .stream_name = "Primary TDM1 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36867",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_1,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_2,
+ .stream_name = "Primary TDM2 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36869",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_2,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ },
+ {
+ .name = LPASS_BE_PRI_TDM_TX_3,
+ .stream_name = "Primary TDM3 Capture",
+ .cpu_dai_name = "msm-dai-q6-tdm.36871",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-stub-codec.1",
+ .codec_dai_name = "msm-stub-rx",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_3,
+ .be_hw_params_fixup = msm_tdm_be_hw_params_fixup,
+ .ops = &apq8096_tdm_be_ops,
+ .ignore_suspend = 1,
+ }
};
static struct snd_soc_dai_link apq8096_hdmi_dai_link[] = {
@@ -4795,7 +6388,6 @@ static int apq8096_asoc_machine_probe(struct platform_device *pdev)
goto err;
}
dev_info(&pdev->dev, "Sound card %s registered\n", card->name);
-
return 0;
err:
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 5facafdc7729..3326c993e129 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -2568,7 +2568,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "MultiMedia17 Capture",
.aif_name = "MM_UL17",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
@@ -2576,7 +2576,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.compress_new = snd_soc_new_compress,
@@ -2587,7 +2587,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "MultiMedia18 Capture",
.aif_name = "MM_UL18",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
@@ -2606,7 +2606,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "MultiMedia19 Capture",
.aif_name = "MM_UL19",
- .rates = (SNDRV_PCM_RATE_8000_48000|
+ .rates = (SNDRV_PCM_RATE_8000_192000|
SNDRV_PCM_RATE_KNOT),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
@@ -2614,7 +2614,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.channels_min = 1,
.channels_max = 8,
.rate_min = 8000,
- .rate_max = 48000,
+ .rate_max = 192000,
},
.ops = &msm_fe_Multimedia_dai_ops,
.compress_new = snd_soc_new_compress,
@@ -2802,6 +2802,44 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.name = "MultiMedia27",
.probe = fe_dai_probe,
},
+ {
+ .capture = {
+ .stream_name = "MultiMedia28 Capture",
+ .aif_name = "MM_UL28",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia28",
+ .probe = fe_dai_probe,
+ },
+ {
+ .capture = {
+ .stream_name = "MultiMedia29 Capture",
+ .aif_name = "MM_UL29",
+ .rates = (SNDRV_PCM_RATE_8000_192000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE),
+ .channels_min = 1,
+ .channels_max = 8,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ .compress_new = snd_soc_new_compress,
+ .name = "MultiMedia29",
+ .probe = fe_dai_probe,
+ },
};
static int msm_fe_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 98c52a4db51f..4116f79890a3 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -4,10 +4,13 @@ snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o \
msm-pcm-voice-v2.o msm-dai-q6-hdmi-v2.o \
msm-lsm-client.o msm-pcm-host-voice-v2.o \
msm-audio-effects-q6-v2.o msm-pcm-loopback-v2.o \
- msm-dai-slim.o msm-transcode-loopback-q6-v2.o \
+ msm-transcode-loopback-q6-v2.o \
adsp_err.o
+obj-$(CONFIG_SLIMBUS) += msm-dai-slim.o audio_slimslave.o
obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
msm-dai-stub-v2.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
+ msm-dai-stub-v2.o
obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
@@ -15,7 +18,7 @@ obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
obj-$(CONFIG_QTI_PP) += msm-qti-pp-config.o
obj-y += audio_calibration.o audio_cal_utils.o q6adm.o q6afe.o q6asm.o \
- q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o audio_slimslave.o \
+ q6audio-v2.o q6voice.o q6core.o rtac.o q6lsm.o \
msm-pcm-q6-noirq.o
ocmem-audio-objs += audio_ocmem.o
obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index c462f682e160..471be3294881 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -397,12 +397,9 @@ static int msm_compr_set_volume(struct snd_compr_stream *cstream,
} else {
gain_list[0] = volume_l;
gain_list[1] = volume_r;
- /* force sending FR/FL/FC volume for mono */
- if (prtd->num_channels == 1) {
- gain_list[2] = volume_l;
- num_channels = 3;
- use_default = true;
- }
+ gain_list[2] = volume_l;
+ num_channels = 3;
+ use_default = true;
rc = q6asm_set_multich_gain(prtd->audio_client, num_channels,
gain_list, chmap, use_default);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 35270e3340ec..ae6767d26921 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1167,7 +1167,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
u32 mode;
- if (copy_from_user(&mode, arg, sizeof(mode))) {
+ if (copy_from_user(&mode, (void __user *) arg, sizeof(mode))) {
dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
__func__, "LSM_SET_FWK_MODE_CONFIG");
return -EFAULT;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 276270258771..ce9091b1ca16 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -424,7 +424,7 @@ static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
return ret;
}
-
+#ifdef CONFIG_SND_HWDEP
static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
struct snd_pcm_mmap_fd *mmap_fd)
{
@@ -459,6 +459,7 @@ static int msm_pcm_mmap_fd(struct snd_pcm_substream *substream,
}
return mmap_fd->fd < 0 ? -EFAULT : 0;
}
+#endif
static int msm_pcm_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
@@ -1042,6 +1043,7 @@ static int msm_pcm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
return 0;
}
+#ifdef CONFIG_SND_HWDEP
static int msm_pcm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1137,6 +1139,7 @@ static int msm_pcm_add_hwdep_dev(struct snd_soc_pcm_runtime *runtime)
hwdep->ops.ioctl_compat = msm_pcm_hwdep_compat_ioctl;
return 0;
}
+#endif
static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
@@ -1170,9 +1173,11 @@ static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add app type controls failed %d\n",
__func__, ret);
}
+#ifdef CONFIG_SND_HWDEP
ret = msm_pcm_add_hwdep_dev(rtd);
if (ret)
pr_err("%s: Could not add hw dep node\n", __func__);
+#endif
pcm->nonatomic = true;
exit:
return ret;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index b94eb6fbfeea..0d01803e634d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1448,12 +1448,13 @@ static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd)
if (pdata) {
if (!pdata->pcm) {
pdata->pcm = rtd->pcm;
- snd_soc_add_platform_controls(rtd->platform,
- pcm_compress_control,
- ARRAY_SIZE
- (pcm_compress_control));
- pr_debug("%s: add control success plt = %pK\n",
- __func__, rtd->platform);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ pcm_compress_control,
+ ARRAY_SIZE
+ (pcm_compress_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
}
} else {
pr_err("%s: NULL pdata\n", __func__);
@@ -1603,24 +1604,47 @@ done:
return ret;
}
+static int msm_pcm_playback_pan_scale_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+ return 0;
+}
+
static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int len = 0;
int i = 0;
- struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform;
+ struct msm_plat_data *pdata;
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
struct asm_stream_pan_ctrl_params pan_param;
-
+ char *usr_value = NULL;
+ uint32_t *gain_ptr = NULL;
if (!usr_info) {
pr_err("%s: usr_info is null\n", __func__);
ret = -EINVAL;
goto done;
}
- substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ platform = snd_soc_component_to_platform(usr_info);
+ if (!platform) {
+ pr_err("%s: platform is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ pdata = dev_get_drvdata(platform->dev);
+ if (!pdata) {
+ pr_err("%s: pdata is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (!substream) {
pr_err("%s substream not found\n", __func__);
ret = -EINVAL;
@@ -1637,54 +1661,71 @@ static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
- pan_param.num_output_channels =
- ucontrol->value.integer.value[len++];
+ usr_value = (char *) ucontrol->value.bytes.data;
+ if (!usr_value) {
+ pr_err("%s ucontrol data is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ memcpy(&pan_param.num_output_channels, &usr_value[len],
+ sizeof(pan_param.num_output_channels));
+ len += sizeof(pan_param.num_output_channels);
if (pan_param.num_output_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- pan_param.num_input_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&pan_param.num_input_channels, &usr_value[len],
+ sizeof(pan_param.num_input_channels));
+ len += sizeof(pan_param.num_input_channels);
if (pan_param.num_input_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < pan_param.num_output_channels; i++) {
- pan_param.output_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(pan_param.output_channel_map, &usr_value[len],
+ (pan_param.num_output_channels *
+ sizeof(pan_param.output_channel_map[0])));
+ len += (pan_param.num_output_channels *
+ sizeof(pan_param.output_channel_map[0]));
}
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < pan_param.num_input_channels; i++) {
- pan_param.input_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(pan_param.input_channel_map, &usr_value[len],
+ (pan_param.num_input_channels *
+ sizeof(pan_param.input_channel_map[0])));
+ len += (pan_param.num_input_channels *
+ sizeof(pan_param.input_channel_map[0]));
}
- if (ucontrol->value.integer.value[len++]) {
+ if (usr_value[len++]) {
+ gain_ptr = (uint32_t *) &usr_value[len];
for (i = 0; i < pan_param.num_output_channels *
pan_param.num_input_channels; i++) {
pan_param.gain[i] =
- !(ucontrol->value.integer.value[len++] > 0) ?
+ !(gain_ptr[i] > 0) ?
0 : 2 << 13;
+ len += sizeof(pan_param.gain[i]);
}
+ len += (pan_param.num_input_channels *
+ pan_param.num_output_channels * sizeof(pan_param.gain[0]));
}
ret = q6asm_set_mfc_panning_params(prtd->audio_client,
&pan_param);
len -= pan_param.num_output_channels *
- pan_param.num_input_channels;
- for (i = 0; i < pan_param.num_output_channels *
- pan_param.num_input_channels; i++) {
- /*
- * The data userspace passes is already in Q14 format.
- * For volume gain is in Q28.
- */
- pan_param.gain[i] =
- ucontrol->value.integer.value[len++] << 14;
+ pan_param.num_input_channels * sizeof(pan_param.gain[0]);
+ if (gain_ptr) {
+ for (i = 0; i < pan_param.num_output_channels *
+ pan_param.num_input_channels; i++) {
+ /*
+ * The data userspace passes is already in Q14 format.
+ * For volume gain is in Q28.
+ */
+ pan_param.gain[i] =
+ (gain_ptr[i]) << 14;
+ len += sizeof(pan_param.gain[i]);
+ }
}
ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client,
&pan_param);
@@ -1701,40 +1742,60 @@ static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol,
static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_pcm *pcm;
- struct snd_pcm_usr *pan_ctl_info;
- struct snd_kcontrol *kctl;
const char *playback_mixer_ctl_name = "Audio Stream";
const char *deviceNo = "NN";
const char *suffix = "Pan Scale Control";
- int ctl_len, ret = 0;
+ char *mixer_str = NULL;
+ int ctl_len;
+ int ret = 0;
+ struct msm_plat_data *pdata;
+ struct snd_kcontrol_new pan_scale_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_pcm_playback_pan_scale_ctl_info,
+ .get = msm_pcm_playback_pan_scale_ctl_get,
+ .put = msm_pcm_playback_pan_scale_ctl_put,
+ .private_value = 0,
+ }
+ };
if (!rtd) {
- pr_err("%s: rtd is NULL\n", __func__);
- ret = -EINVAL;
- goto done;
+ pr_err("%s: NULL rtd\n", __func__);
+ return -EINVAL;
}
- pcm = rtd->pcm;
- ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
- strlen(suffix) + 1;
-
- ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- NULL, 1, ctl_len, rtd->dai_link->be_id,
- &pan_ctl_info);
-
- if (ret < 0) {
- pr_err("%s: failed add ctl %s. err = %d\n",
- __func__, suffix, ret);
+ ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
goto done;
}
- kctl = pan_ctl_info->kctl;
- snprintf(kctl->id.name, ctl_len, "%s %d %s", playback_mixer_ctl_name,
- rtd->pcm->device, suffix);
- kctl->put = msm_pcm_playback_pan_scale_ctl_put;
- kctl->get = msm_pcm_playback_pan_scale_ctl_get;
- pr_debug("%s: Registering new mixer ctl = %s\n", __func__,
- kctl->id.name);
+
+ snprintf(mixer_str, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ pan_scale_control[0].name = mixer_str;
+ pan_scale_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ pdata = dev_get_drvdata(rtd->platform->dev);
+ if (pdata) {
+ if (!pdata->pcm)
+ pdata->pcm = rtd->pcm;
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ pan_scale_control,
+ ARRAY_SIZE
+ (pan_scale_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ } else {
+ pr_err("%s: NULL pdata\n", __func__);
+ ret = -EINVAL;
+ }
+
+ kfree(mixer_str);
done:
return ret;
@@ -1746,18 +1807,28 @@ static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_pcm_playback_dnmix_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+ return 0;
+}
+
static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int len = 0;
- int i = 0;
- struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+
+ struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform;
+ struct msm_plat_data *pdata;
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
struct asm_stream_pan_ctrl_params dnmix_param;
-
- int be_id = ucontrol->value.integer.value[len++];
+ char *usr_value;
+ int be_id = 0;
int stream_id = 0;
if (!usr_info) {
@@ -1765,7 +1836,19 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
- substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ platform = snd_soc_component_to_platform(usr_info);
+ if (!platform) {
+ pr_err("%s platform is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ pdata = dev_get_drvdata(platform->dev);
+ if (!pdata) {
+ pr_err("%s pdata is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (!substream) {
pr_err("%s substream not found\n", __func__);
ret = -EINVAL;
@@ -1781,40 +1864,51 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
+ usr_value = (char *) ucontrol->value.bytes.data;
+ if (!usr_value) {
+ pr_err("%s usrvalue is null\n", __func__);
+ goto done;
+ }
+ memcpy(&be_id, usr_value, sizeof(be_id));
+ len += sizeof(be_id);
stream_id = prtd->audio_client->session;
- dnmix_param.num_output_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&dnmix_param.num_output_channels, &usr_value[len],
+ sizeof(dnmix_param.num_output_channels));
+ len += sizeof(dnmix_param.num_output_channels);
if (dnmix_param.num_output_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- dnmix_param.num_input_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&dnmix_param.num_input_channels, &usr_value[len],
+ sizeof(dnmix_param.num_input_channels));
+ len += sizeof(dnmix_param.num_input_channels);
if (dnmix_param.num_input_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
-
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_output_channels; i++) {
- dnmix_param.output_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
- }
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_input_channels; i++) {
- dnmix_param.input_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
- }
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_output_channels *
- dnmix_param.num_input_channels; i++) {
- dnmix_param.gain[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.output_channel_map, &usr_value[len],
+ (dnmix_param.num_output_channels *
+ sizeof(dnmix_param.output_channel_map[0])));
+ len += (dnmix_param.num_output_channels *
+ sizeof(dnmix_param.output_channel_map[0]));
+ }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.input_channel_map, &usr_value[len],
+ (dnmix_param.num_input_channels *
+ sizeof(dnmix_param.input_channel_map[0])));
+ len += (dnmix_param.num_input_channels *
+ sizeof(dnmix_param.input_channel_map[0]));
+ }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.gain, (uint32_t *) &usr_value[len],
+ (dnmix_param.num_input_channels *
+ dnmix_param.num_output_channels *
+ sizeof(dnmix_param.gain[0])));
+ len += (dnmix_param.num_input_channels *
+ dnmix_param.num_output_channels * sizeof(dnmix_param.gain[0]));
}
msm_routing_set_downmix_control_data(be_id,
stream_id,
@@ -1826,39 +1920,58 @@ done:
static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_pcm *pcm;
- struct snd_pcm_usr *usr_info;
- struct snd_kcontrol *kctl;
const char *playback_mixer_ctl_name = "Audio Device";
const char *deviceNo = "NN";
const char *suffix = "Downmix Control";
- int ctl_len, ret = 0;
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct msm_plat_data *pdata;
+ struct snd_kcontrol_new device_downmix_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_pcm_playback_dnmix_ctl_info,
+ .get = msm_pcm_playback_dnmix_ctl_get,
+ .put = msm_pcm_playback_dnmix_ctl_put,
+ .private_value = 0,
+ }
+ };
if (!rtd) {
- pr_err("%s: rtd is NULL\n", __func__);
+ pr_err("%s NULL rtd\n", __func__);
ret = -EINVAL;
goto done;
}
-
- pcm = rtd->pcm;
ctl_len = strlen(playback_mixer_ctl_name) + 1 +
- strlen(deviceNo) + 1 + strlen(suffix) + 1;
- ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- NULL, 1, ctl_len, rtd->dai_link->be_id,
- &usr_info);
- if (ret < 0) {
- pr_err("%s: downmix control add failed: %d\n",
- __func__, ret);
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
goto done;
}
- kctl = usr_info->kctl;
- snprintf(kctl->id.name, ctl_len, "%s %d %s",
- playback_mixer_ctl_name, rtd->pcm->device, suffix);
- kctl->put = msm_pcm_playback_dnmix_ctl_put;
- kctl->get = msm_pcm_playback_dnmix_ctl_get;
- pr_debug("%s: downmix control name = %s\n",
- __func__, playback_mixer_ctl_name);
+ snprintf(mixer_str, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ device_downmix_control[0].name = mixer_str;
+ device_downmix_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ pdata = dev_get_drvdata(rtd->platform->dev);
+ if (pdata) {
+ if (!pdata->pcm)
+ pdata->pcm = rtd->pcm;
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ device_downmix_control,
+ ARRAY_SIZE
+ (device_downmix_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ } else {
+ pr_err("%s: NULL pdata\n", __func__);
+ ret = -EINVAL;
+ }
+ kfree(mixer_str);
done:
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 2114adae72d7..70531872076b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -626,6 +626,12 @@ static struct msm_pcm_routing_fdai_data
/* MULTIMEDIA27 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA28 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* MULTIMEDIA29 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
/* CS_VOICE */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -3695,6 +3701,16 @@ static const struct snd_kcontrol_new ext_ec_ref_mux_ul19 =
msm_route_ec_ref_rx_enum[0],
msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul28 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL28 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
+static const struct snd_kcontrol_new ext_ec_ref_mux_ul29 =
+ SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL29 MUX Mux",
+ msm_route_ec_ref_rx_enum[0],
+ msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put);
+
static int msm_routing_ext_ec_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -3841,6 +3857,12 @@ static const struct snd_kcontrol_new pri_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_PRI_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
@@ -3904,6 +3926,12 @@ static const struct snd_kcontrol_new sec_i2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_I2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
@@ -3967,6 +3995,12 @@ static const struct snd_kcontrol_new spdif_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SPDIF_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SPDIF_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SPDIF_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_2_rx_mixer_controls[] = {
@@ -4084,6 +4118,12 @@ static const struct snd_kcontrol_new slimbus_5_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_5_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SLIMBUS_5_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
@@ -4162,6 +4202,12 @@ static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SLIMBUS_0_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
@@ -4225,6 +4271,12 @@ static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
@@ -4288,6 +4340,12 @@ static const struct snd_kcontrol_new quaternary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_QUATERNARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
@@ -4351,6 +4409,12 @@ static const struct snd_kcontrol_new quinary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_QUINARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
@@ -4408,6 +4472,12 @@ static const struct snd_kcontrol_new tertiary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_TERTIARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new secondary_mi2s_rx2_mixer_controls[] = {
@@ -4477,6 +4547,12 @@ static const struct snd_kcontrol_new secondary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SECONDARY_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
@@ -4540,6 +4616,12 @@ static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_MI2S_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_PRI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_PRI_MI2S_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int0_mi2s_rx_mixer_controls[] = {
@@ -4726,6 +4808,12 @@ static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_HDMI_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new display_port_mixer_controls[] = {
@@ -5081,6 +5169,12 @@ static const struct snd_kcontrol_new int_bt_sco_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_BT_SCO_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new int_bt_a2dp_rx_mixer_controls[] = {
@@ -5198,6 +5292,12 @@ static const struct snd_kcontrol_new int_fm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_INT_FM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
@@ -5261,6 +5361,12 @@ static const struct snd_kcontrol_new afe_pcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AFE_PCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
@@ -5327,6 +5433,12 @@ static const struct snd_kcontrol_new auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
@@ -5393,6 +5505,12 @@ static const struct snd_kcontrol_new sec_auxpcm_rx_mixer_controls[] = {
SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia28", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia29", MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
};
static const struct snd_kcontrol_new tert_auxpcm_rx_mixer_controls[] = {
@@ -7541,6 +7659,12 @@ static const struct snd_kcontrol_new mmul17_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA17, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -7565,6 +7689,9 @@ static const struct snd_kcontrol_new mmul18_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA18, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -7592,6 +7719,12 @@ static const struct snd_kcontrol_new mmul19_mixer_controls[] = {
SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer,
msm_routing_put_audio_mixer),
@@ -7741,6 +7874,66 @@ static const struct snd_kcontrol_new mmul27_mixer_controls[] = {
msm_routing_put_audio_mixer),
};
+static const struct snd_kcontrol_new mmul28_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA28, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_kcontrol_new mmul29_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_FRONTEND_DAI_MULTIMEDIA29, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
static const struct snd_kcontrol_new pri_rx_voice_mixer_controls[] = {
SOC_SINGLE_EXT("CSVoice", MSM_BACKEND_DAI_PRI_I2S_RX,
MSM_FRONTEND_DAI_CS_VOICE, 1, 0, msm_routing_get_voice_mixer,
@@ -11119,7 +11312,7 @@ static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new app_type_cfg_controls[] = {
SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control,
+ 0x7FFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control,
msm_routing_put_app_type_cfg_control),
};
@@ -12621,6 +12814,10 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
mmul21_mixer_controls, ARRAY_SIZE(mmul21_mixer_controls)),
SND_SOC_DAPM_MIXER("MultiMedia27 Mixer", SND_SOC_NOPM, 0, 0,
mmul27_mixer_controls, ARRAY_SIZE(mmul27_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia28 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul28_mixer_controls, ARRAY_SIZE(mmul28_mixer_controls)),
+ SND_SOC_DAPM_MIXER("MultiMedia29 Mixer", SND_SOC_NOPM, 0, 0,
+ mmul29_mixer_controls, ARRAY_SIZE(mmul29_mixer_controls)),
SND_SOC_DAPM_MIXER("AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
auxpcm_rx_mixer_controls, ARRAY_SIZE(auxpcm_rx_mixer_controls)),
SND_SOC_DAPM_MIXER("SEC_AUX_PCM_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
@@ -13216,14 +13413,27 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia17 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia18 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia19 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia28 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
+ {"MultiMedia29 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia8 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia2 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia4 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia17 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia18 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia19 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia28 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia29 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
{"MultiMedia8 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"MultiMedia17 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia18 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia19 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia28 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia29 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"MultiMedia17 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia18 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia19 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia28 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"MultiMedia29 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
{"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"},
{"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
{"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"},
@@ -14168,6 +14378,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia17 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia18 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia28 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
+ {"MultiMedia29 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
{"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"},
@@ -14177,6 +14389,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia28 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
+ {"MultiMedia29 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia5 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia6 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
{"MultiMedia8 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"},
@@ -14187,6 +14401,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MultiMedia17 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia18 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia28 Mixer", "AFE_PCM_TX", "PCM_TX"},
+ {"MultiMedia29 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"},
{"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"},
@@ -14206,6 +14422,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL20", NULL, "MultiMedia20 Mixer"},
{"MM_UL21", NULL, "MultiMedia21 Mixer"},
{"MM_UL27", NULL, "MultiMedia27 Mixer"},
+ {"MM_UL28", NULL, "MultiMedia28 Mixer"},
+ {"MM_UL29", NULL, "MultiMedia29 Mixer"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia1", "MM_DL1"},
{"AUX_PCM_RX Audio Mixer", "MultiMedia2", "MM_DL2"},
@@ -14610,6 +14828,16 @@ static const struct snd_soc_dapm_route intercon[] = {
{"AUDIO_REF_EC_UL19 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
{"AUDIO_REF_EC_UL19 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL28 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
+ {"AUDIO_REF_EC_UL29 MUX", "PRI_MI2S_TX", "PRI_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "SEC_MI2S_TX", "SEC_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"AUDIO_REF_EC_UL29 MUX", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+
{"MM_UL1", NULL, "AUDIO_REF_EC_UL1 MUX"},
{"MM_UL2", NULL, "AUDIO_REF_EC_UL2 MUX"},
{"MM_UL3", NULL, "AUDIO_REF_EC_UL3 MUX"},
@@ -14622,6 +14850,8 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"},
{"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"},
{"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"},
+ {"MM_UL28", NULL, "AUDIO_REF_EC_UL28 MUX"},
+ {"MM_UL29", NULL, "AUDIO_REF_EC_UL29 MUX"},
{"Voice_Tx Mixer", "PRI_TX_Voice", "PRI_I2S_TX"},
{"Voice_Tx Mixer", "PRI_MI2S_TX_Voice", "PRI_MI2S_TX"},
@@ -16215,7 +16445,8 @@ int msm_routing_set_downmix_control_data(int be_id, int session_id,
uint16_t ii;
uint16_t *dst_gain_ptr = NULL;
- if (be_id >= MSM_BACKEND_DAI_MAX) {
+ if (be_id < MSM_BACKEND_DAI_PRI_I2S_RX ||
+ be_id >= MSM_BACKEND_DAI_MAX) {
rc = -EINVAL;
return rc;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 21dfa48308c3..99541b824864 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -200,6 +200,8 @@ enum {
MSM_FRONTEND_DAI_MULTIMEDIA25,
MSM_FRONTEND_DAI_MULTIMEDIA26,
MSM_FRONTEND_DAI_MULTIMEDIA27,
+ MSM_FRONTEND_DAI_MULTIMEDIA28,
+ MSM_FRONTEND_DAI_MULTIMEDIA29,
MSM_FRONTEND_DAI_CS_VOICE,
MSM_FRONTEND_DAI_VOIP,
MSM_FRONTEND_DAI_AFE_RX,
@@ -225,8 +227,8 @@ enum {
MSM_FRONTEND_DAI_MAX,
};
-#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA27 + 1)
-#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA27
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA29 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA29
enum {
MSM_BACKEND_DAI_PRI_I2S_RX = 0,
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 14f9411104b3..c3d86e6cced2 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -44,7 +44,7 @@
#define TRUE 0x01
#define FALSE 0x00
-#define SESSION_MAX 8
+#define SESSION_MAX 9
#define ASM_MAX_CHANNELS 8
enum {
ASM_TOPOLOGY_CAL = 0,
@@ -1338,7 +1338,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
bufsz, bufcnt);
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
goto fail;
@@ -1429,7 +1429,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
__func__, ac->session,
bufsz, bufcnt);
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
goto fail;
@@ -1738,7 +1738,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
return -EINVAL;
@@ -3368,6 +3368,15 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac,
int bytes_to_alloc, rc;
size_t len;
+ mutex_lock(&ac->cmd_lock);
+
+ if (ac->port[dir].buf) {
+ pr_err("%s: Buffer already allocated\n", __func__);
+ rc = -EINVAL;
+ mutex_unlock(&ac->cmd_lock);
+ goto done;
+ }
+
buf_circ = kzalloc(sizeof(struct audio_buffer), GFP_KERNEL);
if (!buf_circ) {
@@ -3375,10 +3384,6 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac,
goto done;
}
- mutex_lock(&ac->cmd_lock);
-
- ac->port[dir].buf = buf_circ;
-
bytes_to_alloc = bufsz * bufcnt;
bytes_to_alloc = PAGE_ALIGN(bytes_to_alloc);
@@ -3390,11 +3395,12 @@ int q6asm_set_shared_circ_buff(struct audio_client *ac,
if (rc) {
pr_err("%s: Audio ION alloc is failed, rc = %d\n", __func__,
rc);
- mutex_unlock(&ac->cmd_lock);
kfree(buf_circ);
+ mutex_unlock(&ac->cmd_lock);
goto done;
}
+ ac->port[dir].buf = buf_circ;
buf_circ->used = dir ^ 1;
buf_circ->size = bytes_to_alloc;
buf_circ->actual_size = bytes_to_alloc;
@@ -3559,12 +3565,6 @@ int q6asm_open_shared_io(struct audio_client *ac,
goto done;
}
- if (ac->port[dir].buf) {
- pr_err("%s: Buffer already allocated\n", __func__);
- rc = -EINVAL;
- goto done;
- }
-
rc = q6asm_set_shared_circ_buff(ac, open, bufsz, bufcnt, dir);
if (rc)
@@ -7215,10 +7215,9 @@ int q6asm_send_rtic_event_ack(struct audio_client *ac,
goto done;
}
- q6asm_add_hdr_async(ac, &ack.hdr,
+ q6asm_stream_add_hdr_async(ac, &ack.hdr,
sizeof(struct avs_param_rtic_event_ack) +
- params_length, TRUE);
- atomic_set(&ac->cmd_state, -1);
+ params_length, TRUE, ac->stream_id);
ack.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM_V2;
ack.encdec.param_id = AVS_PARAM_ID_RTIC_EVENT_ACK;
ack.encdec.param_size = params_length;
@@ -7228,31 +7227,11 @@ int q6asm_send_rtic_event_ack(struct audio_client *ac,
memcpy(asm_params + sizeof(struct avs_param_rtic_event_ack),
param, params_length);
rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
- if (rc < 0) {
+ if (rc < 0)
pr_err("%s: apr pkt failed for rtic event ack\n", __func__);
- rc = -EINVAL;
- goto fail_send_param;
- }
-
- rc = wait_event_timeout(ac->cmd_wait,
- (atomic_read(&ac->cmd_state) >= 0), 1 * HZ);
- if (!rc) {
- pr_err("%s: timeout for rtic event ack cmd\n", __func__);
- rc = -ETIMEDOUT;
- goto fail_send_param;
- }
-
- if (atomic_read(&ac->cmd_state) > 0) {
- pr_err("%s: DSP returned error[%s] for rtic event ack cmd\n",
- __func__, adsp_err_get_err_str(
- atomic_read(&ac->cmd_state)));
- rc = adsp_err_get_lnx_err_code(
- atomic_read(&ac->cmd_state));
- goto fail_send_param;
- }
- rc = 0;
+ else
+ rc = 0;
-fail_send_param:
kfree(asm_params);
done:
return rc;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
index 6fed443186e5..c38cdda74623 100644
--- a/sound/soc/msm/qdsp6v2/q6core.c
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -22,6 +22,7 @@
#include <soc/qcom/smd.h>
#include <sound/q6core.h>
#include <sound/audio_cal_utils.h>
+#include <sound/adsp_err.h>
#define TIMEOUT_MS 1000
/*
@@ -37,16 +38,30 @@ enum {
CORE_MAX_CAL
};
+enum ver_query_status {
+ VER_QUERY_UNATTEMPTED,
+ VER_QUERY_UNSUPPORTED,
+ VER_QUERY_SUPPORTED
+};
+
+struct q6core_avcs_ver_info {
+ enum ver_query_status status;
+ struct avcs_fwk_ver_info avcs_fwk_ver_info;
+};
+
struct q6core_str {
struct apr_svc *core_handle_q;
wait_queue_head_t bus_bw_req_wait;
wait_queue_head_t cmd_req_wait;
+ wait_queue_head_t avcs_fwk_ver_req_wait;
u32 bus_bw_resp_received;
enum cmd_flags {
FLAG_NONE,
FLAG_CMDRSP_LICENSE_RESULT
} cmd_resp_received_flag;
+ u32 avcs_fwk_ver_resp_received;
struct mutex cmd_lock;
+ struct mutex ver_lock;
union {
struct avcs_cmdrsp_get_license_validation_result
cmdrsp_license_result;
@@ -55,6 +70,7 @@ struct q6core_str {
struct cal_type_data *cal_data[CORE_MAX_CAL];
uint32_t mem_map_cal_handle;
int32_t adsp_status;
+ struct q6core_avcs_ver_info q6core_avcs_ver_info;
};
static struct q6core_str q6core_lcl;
@@ -131,6 +147,17 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
q6core_lcl.bus_bw_resp_received = 1;
wake_up(&q6core_lcl.bus_bw_req_wait);
break;
+ case AVCS_CMD_GET_FWK_VERSION:
+ pr_debug("%s: Cmd = AVCS_CMD_GET_FWK_VERSION status[%s]\n",
+ __func__, adsp_err_get_err_str(payload1[1]));
+ /* ADSP status to match Linux error standard */
+ q6core_lcl.adsp_status = -payload1[1];
+ if (payload1[1] == ADSP_EUNSUPPORTED)
+ q6core_lcl.q6core_avcs_ver_info.status =
+ VER_QUERY_UNSUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Invalid cmd rsp[0x%x][0x%x] opcode %d\n",
__func__,
@@ -174,6 +201,13 @@ static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
q6core_lcl.cmd_resp_received_flag = FLAG_CMDRSP_LICENSE_RESULT;
wake_up(&q6core_lcl.cmd_req_wait);
break;
+ case AVCS_CMDRSP_GET_FWK_VERSION:
+ pr_debug("%s: Received AVCS_CMDRSP_GET_FWK_VERSION\n",
+ __func__);
+ q6core_lcl.q6core_avcs_ver_info.status = VER_QUERY_SUPPORTED;
+ q6core_lcl.avcs_fwk_ver_resp_received = 1;
+ wake_up(&q6core_lcl.avcs_fwk_ver_req_wait);
+ break;
default:
pr_err("%s: Message id from adsp core svc: 0x%x\n",
__func__, data->opcode);
@@ -230,6 +264,97 @@ struct cal_block_data *cal_utils_get_cal_block_by_key(
return NULL;
}
+static int q6core_send_get_avcs_fwk_ver_cmd(void)
+{
+ struct apr_hdr avcs_ver_cmd;
+ int ret;
+
+ avcs_ver_cmd.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ avcs_ver_cmd.pkt_size = sizeof(struct apr_hdr);
+ avcs_ver_cmd.src_port = 0;
+ avcs_ver_cmd.dest_port = 0;
+ avcs_ver_cmd.token = 0;
+ avcs_ver_cmd.opcode = AVCS_CMD_GET_FWK_VERSION;
+
+ q6core_lcl.adsp_status = 0;
+ q6core_lcl.avcs_fwk_ver_resp_received = 0;
+
+ ret = apr_send_pkt(q6core_lcl.core_handle_q,
+ (uint32_t *) &avcs_ver_cmd);
+ if (ret < 0) {
+ pr_err("%s: failed to send apr packet, ret=%d\n", __func__,
+ ret);
+ goto done;
+ }
+
+ ret = wait_event_timeout(q6core_lcl.avcs_fwk_ver_req_wait,
+ (q6core_lcl.avcs_fwk_ver_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout for AVCS fwk version info\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ if (q6core_lcl.adsp_status < 0) {
+ /*
+ * adsp_err_get_err_str expects a positive value but we store
+ * the DSP error as negative to match the Linux error standard.
+ * Pass in the negated value so adsp_err_get_err_str returns
+ * the correct string.
+ */
+ pr_err("%s: DSP returned error[%s]\n", __func__,
+ adsp_err_get_err_str(-q6core_lcl.adsp_status));
+ ret = adsp_err_get_lnx_err_code(q6core_lcl.adsp_status);
+ goto done;
+ }
+
+ ret = 0;
+
+done:
+ return ret;
+}
+
+int q6core_get_avcs_fwk_ver_info(uint32_t service_id,
+ struct avcs_fwk_ver_info *ver_info)
+{
+ int ret;
+
+ mutex_lock(&(q6core_lcl.ver_lock));
+ pr_debug("%s: q6core_avcs_ver_info.status(%d)\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+
+ switch (q6core_lcl.q6core_avcs_ver_info.status) {
+ case VER_QUERY_SUPPORTED:
+ ret = 0;
+ break;
+ case VER_QUERY_UNSUPPORTED:
+ ret = -EOPNOTSUPP;
+ break;
+ case VER_QUERY_UNATTEMPTED:
+ if (q6core_is_adsp_ready()) {
+ ret = q6core_send_get_avcs_fwk_ver_cmd();
+ } else {
+ pr_err("%s: ADSP is not ready to query version\n",
+ __func__);
+ ret = -ENODEV;
+ }
+ break;
+ default:
+ pr_err("%s: Invalid version query status %d\n", __func__,
+ q6core_lcl.q6core_avcs_ver_info.status);
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&(q6core_lcl.ver_lock));
+
+ return ret;
+}
+EXPORT_SYMBOL(q6core_get_avcs_fwk_ver_info);
+
int32_t core_set_license(uint32_t key, uint32_t module_id)
{
struct avcs_cmd_set_license *cmd_setl = NULL;
@@ -887,18 +1012,16 @@ err:
static int __init core_init(void)
{
+ memset(&q6core_lcl, 0, sizeof(struct q6core_str));
init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
- q6core_lcl.bus_bw_resp_received = 0;
-
- q6core_lcl.core_handle_q = NULL;
-
init_waitqueue_head(&q6core_lcl.cmd_req_wait);
+ init_waitqueue_head(&q6core_lcl.avcs_fwk_ver_req_wait);
q6core_lcl.cmd_resp_received_flag = FLAG_NONE;
mutex_init(&q6core_lcl.cmd_lock);
- q6core_lcl.mem_map_cal_handle = 0;
- q6core_lcl.adsp_status = 0;
+ mutex_init(&q6core_lcl.ver_lock);
q6core_init_cal_data();
+
return 0;
}
module_init(core_init);
@@ -906,6 +1029,7 @@ module_init(core_init);
static void __exit core_exit(void)
{
mutex_destroy(&q6core_lcl.cmd_lock);
+ mutex_destroy(&q6core_lcl.ver_lock);
q6core_delete_cal_data();
}
module_exit(core_exit);
diff --git a/sound/soc/msm/qdsp6v2/rtac.c b/sound/soc/msm/qdsp6v2/rtac.c
index 83628b8d62d9..77c6dfbbe8c1 100644
--- a/sound/soc/msm/qdsp6v2/rtac.c
+++ b/sound/soc/msm/qdsp6v2/rtac.c
@@ -901,6 +901,14 @@ int send_adm_apr(void *buf, u32 opcode)
bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[ADM_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
@@ -1123,6 +1131,14 @@ int send_rtac_asm_apr(void *buf, u32 opcode)
bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[ASM_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
@@ -1382,6 +1398,14 @@ static int send_rtac_afe_apr(void *buf, uint32_t opcode)
bytes_returned = get_resp->param_size +
sizeof(struct afe_port_param_data_v2);
+ if (bytes_returned > rtac_cal[AFE_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_afe_buf.buf_size) {
pr_err("%s: user size = 0x%x, returned size = 0x%x\n",
__func__, user_afe_buf.buf_size,
@@ -1606,6 +1630,14 @@ int send_voice_apr(u32 mode, void *buf, u32 opcode)
bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
kvaddr)[2] + 3 * sizeof(u32);
+ if (bytes_returned > rtac_cal[VOICE_RTAC_CAL].
+ map_data.map_size) {
+ pr_err("%s: Invalid data size = %d\n",
+ __func__, bytes_returned);
+ result = -EINVAL;
+ goto err;
+ }
+
if (bytes_returned > user_buf_size) {
pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
__func__, user_buf_size, bytes_returned);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index ad139d45f5b2..b62cc921695a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -215,7 +215,6 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
int index, char *buf, int maxlen)
{
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
- buf[len] = 0;
return len;
}