summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/Kconfig10
-rw-r--r--drivers/android/Makefile1
-rw-r--r--drivers/android/binder.c151
-rw-r--r--drivers/android/binder_alloc.c425
-rw-r--r--drivers/android/binder_alloc.h33
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h79
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/skd_main.c21
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/char/diag/diag_debugfs.c7
-rw-r--r--drivers/char/diag/diag_memorydevice.c1
-rw-r--r--drivers/char/diag/diagchar.h5
-rw-r--r--drivers/char/diag/diagchar_core.c35
-rw-r--r--drivers/char/diag/diagfwd.c10
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c1
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c7
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c8
-rw-r--r--drivers/crypto/talitos.c7
-rw-r--r--drivers/dma/edma.c19
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c10
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c71
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c28
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c36
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c10
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.c112
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c76
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h2
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c12
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hwmon/gl520sm.c25
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/iio/adc/ad7793.c4
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c28
-rw-r--r--drivers/iio/adc/axp288_adc.c32
-rw-r--r--drivers/iio/adc/mcp320x.c25
-rw-r--r--drivers/iio/adc/twl4030-madc.c10
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c6
-rw-r--r--drivers/iio/industrialio-core.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c10
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/amd_iommu.c1
-rw-r--r--drivers/iommu/io-pgtable-arm.c6
-rw-r--r--drivers/irqchip/irq-crossbar.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c37
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-qpnp-flash.c2709
-rw-r--r--drivers/leds/leds-qpnp-wled.c238
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/request.c16
-rw-r--r--drivers/md/bcache/super.c15
-rw-r--r--drivers/md/bcache/sysfs.c4
-rw-r--r--drivers/md/bcache/util.c50
-rw-r--r--drivers/md/bcache/writeback.c20
-rw-r--r--drivers/md/bcache/writeback.h21
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-table.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/md/linear.c5
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c29
-rw-r--r--drivers/md/raid5.c27
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.c8
-rw-r--r--drivers/media/pci/ttpci/av7110_hw.h12
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c16
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c5
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c3
-rw-r--r--drivers/mfd/wcd9xxx-core.c8
-rw-r--r--drivers/misc/cxl/api.c4
-rw-r--r--drivers/misc/cxl/file.c8
-rw-r--r--drivers/misc/hdcp.c108
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_aac.c6
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_multi_aac.c8
-rw-r--r--drivers/mmc/core/debugfs.c53
-rw-r--r--drivers/mmc/core/sdio_bus.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c3
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/plusb.c15
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c12
-rw-r--r--drivers/net/wireless/ath/regd.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h5
-rw-r--r--drivers/net/wireless/cnss2/main.c11
-rw-r--r--drivers/net/wireless/cnss2/pci.c101
-rw-r--r--drivers/net/wireless/cnss2/pci.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c70
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c28
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/scan.c6
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c61
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c2
-rw-r--r--drivers/power/supply/qcom/Kconfig18
-rw-r--r--drivers/power/supply/qcom/Makefile2
-rw-r--r--drivers/power/supply/qcom/fg-core.h2
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c37
-rw-r--r--drivers/power/supply/qcom/qpnp-fg.c7051
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c18
-rw-r--r--drivers/power/supply/qcom/qpnp-smbcharger.c8472
-rw-r--r--drivers/power/supply/qcom/smb-lib.c162
-rw-r--r--drivers/power/supply/qcom/smb-lib.h9
-rw-r--r--drivers/power/supply/qcom/smb-reg.h1
-rw-r--r--drivers/pps/clients/pps-gpio.c13
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c67
-rw-r--r--drivers/rtc/interface.c20
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c33
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h17
-rw-r--r--drivers/s390/scsi/zfcp_fc.h6
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c7
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c16
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/isci/remote_node_context.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c8
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sg.c233
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom-debugfs.c15
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c41
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h4
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/glink.c6
-rw-r--r--drivers/soc/qcom/icnss.c6
-rw-r--r--drivers/soc/qcom/ipc_router_glink_xprt.c11
-rw-r--r--drivers/soc/qcom/qdsp6v2/Makefile2
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_vm.c1270
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c838
-rw-r--r--drivers/staging/android/fiq_debugger/fiq_debugger.c2
-rw-r--r--drivers/staging/iio/adc/ad7192.c4
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/tty/serial/msm_serial_hs.c49
-rw-r--r--drivers/tty/serial/sunhv.c6
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/usb/chipidea/otg.c17
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/core/devio.c10
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h14
-rw-r--r--drivers/usb/gadget/function/f_ncm.c3
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/legacy/inode.c49
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c20
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c65
-rw-r--r--drivers/usb/host/pci-quirks.c43
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c23
-rw-r--r--drivers/usb/serial/console.c1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7720.c9
-rw-r--r--drivers/usb/serial/mos7840.c19
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/serial/qcserial.c4
-rw-r--r--drivers/usb/storage/uas-detect.h15
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/uwbd.c12
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c42
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c41
-rw-r--r--drivers/watchdog/kempld_wdt.c9
-rw-r--r--drivers/xen/swiotlb-xen.c19
244 files changed, 23809 insertions, 1115 deletions
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1a6e25..01de42c8b74b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726bb560..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 34f45abe0181..342e42f6f3d1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1154,6 +1154,10 @@ static void binder_do_set_priority(struct task_struct *task,
task->pid, desired.prio,
to_kernel_prio(policy, priority));
+ trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
+ to_kernel_prio(policy, priority),
+ desired.prio);
+
/* Set the actual priority */
if (task->policy != policy || is_rt_policy(policy)) {
struct sched_param params;
@@ -1185,7 +1189,7 @@ static void binder_transaction_priority(struct task_struct *task,
struct binder_priority node_prio,
bool inherit_rt)
{
- struct binder_priority desired_prio;
+ struct binder_priority desired_prio = t->priority;
if (t->set_priority_called)
return;
@@ -1197,9 +1201,6 @@ static void binder_transaction_priority(struct task_struct *task,
if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
desired_prio.prio = NICE_TO_PRIO(0);
desired_prio.sched_policy = SCHED_NORMAL;
- } else {
- desired_prio.prio = t->priority.prio;
- desired_prio.sched_policy = t->priority.sched_policy;
}
if (node_prio.prio < t->priority.prio ||
@@ -1302,7 +1303,7 @@ static struct binder_node *binder_init_node_ilocked(
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >>
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
@@ -2103,6 +2104,26 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
/**
+ * binder_cleanup_transaction() - cleans up undelivered transaction
+ * @t: transaction that needs to be cleaned up
+ * @reason: reason the transaction wasn't delivered
+ * @error_code: error to return to caller (if synchronous call)
+ */
+static void binder_cleanup_transaction(struct binder_transaction *t,
+ const char *reason,
+ uint32_t error_code)
+{
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
+ binder_send_failed_reply(t, error_code);
+ } else {
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered transaction %d, %s\n",
+ t->debug_id, reason);
+ binder_free_transaction(t);
+ }
+}
+
+/**
* binder_validate_object() - checks for a valid metadata object in a buffer.
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the buffer at which to validate an object.
@@ -2481,7 +2502,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@@ -2745,6 +2765,48 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return true;
}
+/**
+ * binder_get_node_refs_for_txn() - Get required refs on node for txn
+ * @node: struct binder_node for which to get refs
+ * @proc: returns @node->proc if valid
+ * @error: if no @proc then returns BR_DEAD_REPLY
+ *
+ * User-space normally keeps the node alive when creating a transaction
+ * since it has a reference to the target. The local strong ref keeps it
+ * alive if the sending process dies before the target process processes
+ * the transaction. If the source process is malicious or has a reference
+ * counting bug, relying on the local strong ref can fail.
+ *
+ * Since user-space can cause the local strong ref to go away, we also take
+ * a tmpref on the node to ensure it survives while we are constructing
+ * the transaction. We also need a tmpref on the proc while we are
+ * constructing the transaction, so we take that here as well.
+ *
+ * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
+ * Also sets @proc if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY
+ */
+static struct binder_node *binder_get_node_refs_for_txn(
+ struct binder_node *node,
+ struct binder_proc **procp,
+ uint32_t *error)
+{
+ struct binder_node *target_node = NULL;
+
+ binder_node_inner_lock(node);
+ if (node->proc) {
+ target_node = node;
+ binder_inc_node_nilocked(node, 1, 0, NULL);
+ binder_inc_node_tmpref_ilocked(node);
+ node->proc->tmp_ref++;
+ *procp = node->proc;
+ } else
+ *error = BR_DEAD_REPLY;
+ binder_node_inner_unlock(node);
+
+ return target_node;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -2847,43 +2909,35 @@ static void binder_transaction(struct binder_proc *proc,
ref = binder_get_ref_olocked(proc, tr->target.handle,
true);
if (ref) {
- binder_inc_node(ref->node, 1, 0, NULL);
- target_node = ref->node;
- }
- binder_proc_unlock(proc);
- if (target_node == NULL) {
+ target_node = binder_get_node_refs_for_txn(
+ ref->node, &target_proc,
+ &return_error);
+ } else {
binder_user_error("%d:%d got transaction to invalid handle\n",
- proc->pid, thread->pid);
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- return_error_param = -EINVAL;
- return_error_line = __LINE__;
- goto err_invalid_target_handle;
}
+ binder_proc_unlock(proc);
} else {
mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
- if (target_node == NULL) {
+ if (target_node)
+ target_node = binder_get_node_refs_for_txn(
+ target_node, &target_proc,
+ &return_error);
+ else
return_error = BR_DEAD_REPLY;
- mutex_unlock(&context->context_mgr_node_lock);
- return_error_line = __LINE__;
- goto err_no_context_mgr_node;
- }
- binder_inc_node(target_node, 1, 0, NULL);
mutex_unlock(&context->context_mgr_node_lock);
}
- e->to_node = target_node->debug_id;
- binder_node_lock(target_node);
- target_proc = target_node->proc;
- if (target_proc == NULL) {
- binder_node_unlock(target_node);
- return_error = BR_DEAD_REPLY;
+ if (!target_node) {
+ /*
+ * return_error is set above
+ */
+ return_error_param = -EINVAL;
return_error_line = __LINE__;
goto err_dead_binder;
}
- binder_inner_proc_lock(target_proc);
- target_proc->tmp_ref++;
- binder_inner_proc_unlock(target_proc);
- binder_node_unlock(target_node);
+ e->to_node = target_node->debug_id;
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
@@ -3242,6 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_thread)
binder_thread_dec_tmpref(target_thread);
binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
/*
* write barrier to synchronize with initialization
* of log entry
@@ -3261,6 +3317,8 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ if (target_node)
+ binder_dec_node_tmpref(target_node);
target_node = NULL;
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
@@ -3275,13 +3333,14 @@ err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
-err_no_context_mgr_node:
if (target_thread)
binder_thread_dec_tmpref(target_thread);
if (target_proc)
binder_proc_dec_tmpref(target_proc);
- if (target_node)
+ if (target_node) {
binder_dec_node(target_node, 1, 0);
+ binder_dec_node_tmpref(target_node);
+ }
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
@@ -4146,12 +4205,20 @@ retry:
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "put_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr))) {
if (t_from)
binder_thread_dec_tmpref(t_from);
+
+ binder_cleanup_transaction(t, "copy_to_user failed",
+ BR_FAILED_REPLY);
+
return -EFAULT;
}
ptr += sizeof(tr);
@@ -4221,15 +4288,9 @@ static void binder_release_work(struct binder_proc *proc,
struct binder_transaction *t;
t = container_of(w, struct binder_transaction, work);
- if (t->buffer->target_node &&
- !(t->flags & TF_ONE_WAY)) {
- binder_send_failed_reply(t, BR_DEAD_REPLY);
- } else {
- binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
- "undelivered transaction %d\n",
- t->debug_id);
- binder_free_transaction(t);
- }
+
+ binder_cleanup_transaction(t, "process died.",
+ BR_DEAD_REPLY);
} break;
case BINDER_WORK_RETURN_ERROR: {
struct binder_error *e = container_of(
@@ -4581,6 +4642,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5426,6 +5489,8 @@ static void print_binder_proc_stats(struct seq_file *m,
count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
@@ -5622,6 +5687,8 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_deferred_workqueue = create_singlethread_workqueue("binder");
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index aabfebac6e57..b95da16fd938 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/list_lru.h>
#include "binder_alloc.h"
#include "binder_trace.h"
+struct list_lru binder_alloc_lru;
+
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
@@ -48,14 +51,23 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -105,9 +117,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -122,18 +134,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -175,13 +186,14 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+ void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
+ struct binder_lru_page *page;
+ struct vm_area_struct *vma = NULL;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
@@ -192,25 +204,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_update_page_range(alloc, allocate, start, end);
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(alloc->tsk);
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
+ if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
@@ -218,18 +232,40 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
+ bool on_lru;
+ size_t index;
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
@@ -239,12 +275,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
+
+ trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
@@ -256,16 +294,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- alloc->user_buffer_offset, PAGE_SIZE, NULL);
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
err_alloc_page_failed:
+err_page_ptr_cleared:
;
}
err_no_vma:
@@ -321,6 +370,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -380,32 +432,35 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->free_in_progress = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ struct binder_buffer *new_buffer;
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
@@ -420,6 +475,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -454,57 +515,58 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -525,8 +587,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -538,14 +600,12 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -553,8 +613,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
@@ -640,14 +699,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -655,10 +714,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ /* Same as mmgrab() in later kernel versions */
+ atomic_inc(&alloc->vma_vm_mm->mm_count);
return 0;
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
@@ -678,14 +739,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
@@ -695,28 +755,44 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
+ bool on_lru;
- if (!alloc->pages[i])
+ if (!alloc->pages[i].page_ptr)
continue;
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK not freed\n",
- __func__, alloc->pid, i, page_addr);
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(alloc->pages[i]);
+ __free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -754,6 +830,34 @@ void binder_alloc_print_allocated(struct seq_file *m,
}
/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
@@ -783,10 +887,112 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+ struct vm_area_struct *vma;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ vma = alloc->vma;
+ if (vma) {
+ /* Same as mmget_not_zero() in later kernel versions */
+ if (!atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+ }
+
+ list_lru_isolate(lru, item);
+ spin_unlock(lock);
+
+ if (vma) {
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(vma,
+ page_addr +
+ alloc->user_buffer_offset,
+ PAGE_SIZE, NULL);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ spin_lock(lock);
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED_RETRY;
+
+err_down_write_mmap_sem_failed:
+ mmput_async(mm);
+err_mmget:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
@@ -795,8 +1001,13 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 088e4ffc6230..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/list_lru.h>
+extern struct list_lru binder_alloc_lru;
struct binder_transaction;
/**
@@ -57,7 +59,19 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
};
/**
@@ -75,8 +89,7 @@ struct binder_buffer {
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of physical page addresses for each
- * page of mmap'd space
+ * @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
@@ -87,7 +100,6 @@ struct binder_buffer {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
@@ -96,18 +108,27 @@ struct binder_alloc {
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct page **pages;
+ struct binder_lru_page *pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -120,6 +141,8 @@ extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7967db16ba5a..b11dffc521e8 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+TRACE_EVENT(binder_set_priority,
+ TP_PROTO(int proc, int thread, unsigned int old_prio,
+ unsigned int desired_prio, unsigned int new_prio),
+ TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(int, thread)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(unsigned int, desired_prio)
+ ),
+ TP_fast_assign(
+ __entry->proc = proc;
+ __entry->thread = thread;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = new_prio;
+ __entry->desired_prio = desired_prio;
+ ),
+ TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+ __entry->proc, __entry->thread, __entry->old_prio,
+ __entry->new_prio, __entry->desired_prio)
+);
+
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
@@ -291,6 +315,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e2d94972962d..7aa10c200ecb 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
static void ata_tport_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
device_initialize(dev);
dev->type = &ata_port_type;
- dev->parent = get_device(parent);
+ dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
static void ata_tlink_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
int error;
device_initialize(dev);
- dev->parent = get_device(&ap->tdev);
+ dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
@@ -588,7 +586,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
static void ata_tdev_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -661,7 +658,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
int error;
device_initialize(dev);
- dev->parent = get_device(&link->tdev);
+ dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 500592486e88..0346e46e2871 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -737,7 +737,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ae7f3ce90bd2..0ed3d8381840 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -829,7 +829,8 @@ static ssize_t driver_override_store(struct device *dev,
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index dd73e1ff1759..aadab0381e0d 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->gd);
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
- q->backing_dev_info.name = "aoe";
- q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
+ q->backing_dev_info->name = "aoe";
+ q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 1d58854c4a9f..1f9c77609dd1 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2393,7 +2393,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
if (get_ldev(device)) {
q = bdev_get_queue(device->ldev->backing_bdev);
- r = bdi_congested(&q->backing_dev_info, bdi_bits);
+ r = bdi_congested(q->backing_dev_info, bdi_bits);
put_ldev(device);
if (r)
reason = 'b';
@@ -2765,8 +2765,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
/* we have no partitions. we contain only ourselves. */
device->this_bdev->bd_contains = device->this_bdev;
- q->backing_dev_info.congested_fn = drbd_congested;
- q->backing_dev_info.congested_data = device;
+ q->backing_dev_info->congested_fn = drbd_congested;
+ q->backing_dev_info->congested_data = device;
blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e80cbefbc2b5..ef03cb25f5bf 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1170,11 +1170,11 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_stack_limits(q, b);
- if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
+ if (q->backing_dev_info->ra_pages != b->backing_dev_info->ra_pages) {
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
- q->backing_dev_info.ra_pages,
- b->backing_dev_info.ra_pages);
- q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
+ q->backing_dev_info->ra_pages,
+ b->backing_dev_info->ra_pages);
+ q->backing_dev_info->ra_pages = b->backing_dev_info->ra_pages;
}
}
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 3b10fa6cb039..7a6b9f3e1a9f 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
} else {
/* reset device->congestion_reason */
- bdi_rw_congested(&device->rq_queue->backing_dev_info);
+ bdi_rw_congested(device->rq_queue->backing_dev_info);
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3ae2c0086563..17ae4e1ab358 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -937,7 +937,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
switch (rbm) {
case RB_CONGESTED_REMOTE:
- bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
+ bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
return bdi_read_congested(bdi);
case RB_LEAST_PENDING:
return atomic_read(&device->local_cnt) >
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d06c62eccdf0..f018318d4466 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1276,7 +1276,7 @@ try_next_bio:
&& pd->bio_queue_size <= pd->write_congestion_off);
spin_unlock(&pd->lock);
if (wakeup) {
- clear_bdi_congested(&pd->disk->queue->backing_dev_info,
+ clear_bdi_congested(pd->disk->queue->backing_dev_info,
BLK_RW_ASYNC);
}
@@ -2405,7 +2405,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
+ set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
do {
spin_unlock(&pd->lock);
congestion_wait(BLK_RW_ASYNC, HZ);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index fbdddd6f94b8..55a8671f1979 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3780,7 +3780,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.discard_zeroes_data = 1;
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
- q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+ q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
disk->queue = q;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f9168ffa4..47d1e834f3f4 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ disk->queue = NULL;
}
+
+ put_disk(disk);
skdev->disk = NULL;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd6b141b9825..7bb8055bd10c 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -333,6 +333,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index c963e4658c07..5e455878ac3e 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -52,7 +52,7 @@ static int diag_dbgfs_bridgeinfo_index;
static int diag_dbgfs_finished;
static int diag_dbgfs_dci_data_index;
static int diag_dbgfs_dci_finished;
-
+static struct mutex diag_dci_dbgfs_mutex;
static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -159,6 +159,7 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file,
buf_size = ksize(buf);
bytes_remaining = buf_size;
+ mutex_lock(&diag_dci_dbgfs_mutex);
if (diag_dbgfs_dci_data_index == 0) {
bytes_written =
scnprintf(buf, buf_size,
@@ -214,8 +215,8 @@ static ssize_t diag_dbgfs_read_dcistats(struct file *file,
}
temp_data++;
}
-
diag_dbgfs_dci_data_index = (i >= DIAG_DCI_DEBUG_CNT) ? 0 : i + 1;
+ mutex_unlock(&diag_dci_dbgfs_mutex);
bytes_written = simple_read_from_buffer(ubuf, count, ppos, buf,
bytes_in_buf);
kfree(buf);
@@ -1186,6 +1187,7 @@ int diag_debugfs_init(void)
pr_warn("diag: could not allocate memory for dci debug info\n");
mutex_init(&dci_stat_mutex);
+ mutex_init(&diag_dci_dbgfs_mutex);
return 0;
err:
kfree(dci_traffic);
@@ -1202,6 +1204,7 @@ void diag_debugfs_cleanup(void)
kfree(dci_traffic);
mutex_destroy(&dci_stat_mutex);
+ mutex_destroy(&diag_dci_dbgfs_mutex);
}
#else
int diag_debugfs_init(void) { return 0; }
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 986aeed169f5..072c55ca3c4e 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -202,6 +202,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
found = 1;
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index d81a39e2c637..80f004b8435e 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -26,6 +26,8 @@
#include <asm/atomic.h>
#include "diagfwd_bridge.h"
+#define THRESHOLD_CLIENT_LIMIT 50
+
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
#define APPS_BUF_SIZE 4096
@@ -34,7 +36,7 @@
#define DIAG_MAX_REQ_SIZE (16 * 1024)
#define DIAG_MAX_RSP_SIZE (16 * 1024)
-#define APF_DIAG_PADDING 256
+#define APF_DIAG_PADDING 0
/*
* In the worst case, the HDLC buffer can be atmost twice the size of the
* original packet. Add 3 bytes for 16 bit CRC (2 bytes) and a delimiter
@@ -508,6 +510,7 @@ struct diagchar_dev {
wait_queue_head_t wait_q;
struct diag_client_map *client_map;
int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
int num_clients;
int polling_reg_flag;
int use_device_tree;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 4111e599877a..ae0182ae77db 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -139,7 +139,6 @@ module_param(poolsize_qsc_usb, uint, 0);
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
module_param(max_clients, uint, 0);
/* Timer variables */
@@ -328,7 +327,7 @@ static int diagchar_open(struct inode *inode, struct file *file)
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
- if (i < threshold_client_limit) {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
@@ -358,11 +357,17 @@ static int diagchar_open(struct inode *inode, struct file *file)
}
}
driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
if (driver->ref_count == 0)
diag_mempool_init();
@@ -1866,6 +1871,7 @@ static int diag_ioctl_lsm_deinit(void)
}
driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
@@ -3029,16 +3035,6 @@ static int diag_user_process_apps_data(const char __user *buf, int len,
return 0;
}
-static int check_data_ready(int index)
-{
- int data_type = 0;
-
- mutex_lock(&driver->diagchar_mutex);
- data_type = driver->data_ready[index];
- mutex_unlock(&driver->diagchar_mutex);
- return data_type;
-}
-
static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
@@ -3065,7 +3061,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
pr_err("diag: bad address from user side\n");
return -EFAULT;
}
- wait_event_interruptible(driver->wait_q, (check_data_ready(index)) > 0);
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
mutex_lock(&driver->diagchar_mutex);
@@ -3076,6 +3073,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
/* place holder for number of data field */
ret += sizeof(int);
@@ -3089,11 +3087,13 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/* In case, the thread wakes up and the logging mode is
not memory device any more, the condition needs to be cleared */
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
}
if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
@@ -3110,6 +3110,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
data_type = driver->data_ready[index] & DEINIT_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
diag_remove_client_entry(file);
return ret;
@@ -3125,6 +3126,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3144,6 +3146,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
event_mask.mask_len);
}
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3157,6 +3160,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3168,6 +3172,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
*(driver->apps_req_buf),
driver->apps_req_buf_len);
driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_pktdata = 0;
goto exit;
}
@@ -3179,6 +3184,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
driver->dci_pkt_length);
driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_dcipktdata = 0;
goto exit;
}
@@ -3191,6 +3197,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
event_mask_composite), DCI_EVENT_MASK_SIZE);
driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3202,6 +3209,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
log_mask_composite), DCI_LOG_MASK_SIZE);
driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3233,6 +3241,7 @@ exit:
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index ef08f939c36e..40412ba87897 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -226,6 +226,7 @@ void chk_logging_wakeup(void)
* situation.
*/
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: Force wakeup of logging process\n");
wake_up_interruptible(&driver->wait_q);
break;
@@ -480,8 +481,10 @@ void diag_update_userspace_clients(unsigned int type)
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid != 0)
+ if (driver->client_map[i].pid != 0) {
driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -498,6 +501,8 @@ void diag_update_md_clients(unsigned int type)
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
break;
}
}
@@ -513,6 +518,7 @@ void diag_update_sleeping_process(int process_id, int data_type)
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == process_id) {
driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
break;
}
wake_up_interruptible(&driver->wait_q);
@@ -1703,6 +1709,8 @@ int diagfwd_init(void)
, GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
if (driver->apps_req_buf == NULL) {
driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
if (!driver->apps_req_buf)
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 0f7ec18e477a..8b2e6fd601c0 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -179,6 +179,7 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
F_GFX(160000000, 0, 2, 0, 0, 640000000),
F_GFX(266000000, 0, 2, 0, 0, 532000000),
F_GFX(370000000, 0, 2, 0, 0, 740000000),
+ F_GFX(430000000, 0, 2, 0, 0, 860000000),
F_GFX(465000000, 0, 2, 0, 0, 930000000),
F_GFX(588000000, 0, 2, 0, 0, 1176000000),
F_GFX(647000000, 0, 2, 0, 0, 1294000000),
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index e2c02f9dd141..ff0fd0e44f07 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -241,7 +241,7 @@ config ARM_PXA2xx_CPUFREQ
config ACPI_CPPC_CPUFREQ
tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI
+ depends on ACPI_PROCESSOR
select ACPI_CPPC_LIB
default n
help
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..a72ae98b4838 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -279,6 +279,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
+ /*
+ * Android: set default parameters for parity between schedutil and
+ * schedfreq
+ */
+ policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
+ policy->down_transition_delay_us = 50000; /* 50ms */
+
return 0;
out_free_cpufreq_table:
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index abbee61c99c8..ae65fbc3ceac 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1825,6 +1825,7 @@ struct cpufreq_governor cpufreq_gov_interactive = {
static int __init cpufreq_interactive_init(void)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ int ret = 0;
spin_lock_init(&speedchange_cpumask_lock);
mutex_init(&gov_lock);
@@ -1841,7 +1842,12 @@ static int __init cpufreq_interactive_init(void)
/* NB: wake up so the thread does not look hung to the freezer */
wake_up_process_no_notif(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
+ ret = cpufreq_register_governor(&cpufreq_gov_interactive);
+ if (ret) {
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+ }
+ return ret;
}
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6a60936b46e0..62ce93568e11 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1749,9 +1749,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
- /* Indicate next op is not the first. */
- req_ctx->first = 0;
}
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
@@ -2770,7 +2770,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
- t_alg->algt.alg.hash.setkey = ahash_setkey;
+ if (!strncmp(alg->cra_name, "hmac", 4))
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 16fe773fb846..85674a8d0436 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1126,11 +1126,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1152,7 +1165,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1202,7 +1215,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index fd55c2f2080a..6c9d7ccebb8c 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -168,7 +168,7 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
return ret;
}
- vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
+ vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
if (!vbus_attach)
goto notify_otg;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d775e2bfc017..9d8b2e59b755 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
+cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 377d935a3380..556d05547670 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -9,9 +9,17 @@
* published by the Free Software Foundation.
*
*/
+
+/*
+ * To prevent the compiler from emitting GOT-indirected (and thus absolute)
+ * references to the section markers, override their visibility as 'hidden'
+ */
+#pragma GCC visibility push(hidden)
+#include <asm/sections.h>
+#pragma GCC visibility pop
+
#include <linux/efi.h>
#include <asm/efi.h>
-#include <asm/sections.h>
#include "efistub.h"
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index d652f3b53635..c7382d46c673 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -550,7 +550,7 @@ out_put_node:
return err;
}
-static const struct of_device_id const psci_of_match[] __initconst = {
+static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{ .compatible = "arm,psci-1.0", .data = psci_0_2_init},
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index b6e28dcaea1d..1fb1daa0b366 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
struct kfd_event_data event_data;
if (copy_from_user(&event_data, &events[i],
- sizeof(struct kfd_event_data)))
+ sizeof(struct kfd_event_data))) {
+ ret = -EFAULT;
goto fail;
+ }
ret = init_event_waiter(p, &event_waiters[i],
event_data.event_id, i);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 58bf94b69186..273e05a3c933 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1802,6 +1802,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
return -EINVAL;
}
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
@@ -1817,6 +1818,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
mgr->payloads[i].num_slots = req_payload.num_slots;
+ mgr->payloads[i].vcpi = req_payload.vcpi;
} else if (mgr->payloads[i].num_slots) {
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..dba5c0ea0827 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -36,7 +36,10 @@ struct adv7511 {
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -48,6 +51,10 @@ struct adv7511 {
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
@@ -362,12 +369,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
@@ -422,7 +436,27 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,8 +472,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -456,7 +490,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +507,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -567,13 +601,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -849,10 +888,6 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -913,6 +948,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index d14bdc537587..0a2ac3efd04e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -957,6 +957,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..6fd7b50c5747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index fa111d581529..0f77e35ef287 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -666,6 +666,15 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
}
break;
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ break;
case HDCP_STATE_AUTH_ENC_NONE:
hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
if (sde_hdmi_tx_is_panel_on(hdmi_ctrl))
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 0d93edb9201f..0c143059b749 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -110,6 +110,20 @@ void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
{
}
+static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->hdr_eotf = SDE_HDMI_HDR_EOTF_NONE;
+ connector->hdr_metadata_type_one = false;
+ connector->hdr_max_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_avg_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_min_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_supported = false;
+}
+
static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
@@ -435,6 +449,19 @@ static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
vbi_pkt_reg |= BIT(5) | BIT(4);
hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ } else {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* disable GC CD override */
+ hdmi_ctrl_reg &= ~BIT(27);
+ /* disable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg &= ~BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+
+ /* disable the GC packet sending */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg &= ~(BIT(5) | BIT(4));
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
}
}
@@ -551,6 +578,7 @@ static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
display->sink_hdcp22_support = false;
+ sde_hdmi_clear_hdr_info(bridge);
mutex_unlock(&display->display_lock);
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
index 1e673440f399..51f5c8d8dde6 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
@@ -338,6 +338,41 @@ static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
HDCP_STATE_AUTH_FAIL);
}
+static void sde_hdmi_hdcp2p2_fail_noreauth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void sde_hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ sde_hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
u8 *buf, int size, u32 timeout)
{
@@ -888,6 +923,7 @@ void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
static struct hdcp_client_ops client_ops = {
.wakeup = sde_hdmi_hdcp2p2_wakeup,
.notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
+ .srm_cb = sde_hdmi_hdcp2p2_srm_cb,
};
static struct hdcp_txmtr_ops txmtr_ops;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 421bdf7643ca..3cef7e6aca39 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -109,6 +109,9 @@
#define SDE_HDMI_HDCP_14 0x14
#define SDE_HDMI_HDCP_NONE 0x0
+#define SDE_HDMI_HDR_LUMINANCE_NONE 0x0
+#define SDE_HDMI_HDR_EOTF_NONE 0x0
+
/*
* Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
* read by the hardware
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b245a4c7c826..6f968e93d959 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -185,12 +185,16 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ LIST_HEAD(tmp_head);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &tmp_head);
+ }
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
@@ -199,11 +203,7 @@ static void vblank_ctrl_worker(struct kthread_work *work)
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
}
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
index 0ba644d5519d..29e746e1fdf5 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_perf.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,31 @@ static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
return false;
}
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ if (!crtc || !state || !perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -102,7 +127,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
sde_cstate = to_sde_crtc_state(state);
- bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
@@ -110,7 +137,7 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
struct sde_crtc_state *tmp_cstate =
to_sde_crtc_state(tmp_crtc->state);
- bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
}
}
@@ -126,11 +153,9 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
SDE_DEBUG("final threshold bw limit = %d\n", threshold);
if (!threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_ERROR("no bandwidth limits specified\n");
return -E2BIG;
} else if (bw > threshold) {
- sde_cstate->cur_perf.bw_ctl = 0;
SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
return -E2BIG;
}
@@ -138,26 +163,6 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc,
return 0;
}
-static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
- struct drm_crtc *crtc,
- struct sde_core_perf_params *perf)
-{
- struct sde_crtc_state *sde_cstate;
-
- sde_cstate = to_sde_crtc_state(crtc->state);
- memset(perf, 0, sizeof(struct sde_core_perf_params));
-
- perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
- perf->max_per_pipe_ib =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
- perf->core_clk_rate =
- sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
-
- SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
- crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib, perf->bw_ctl);
-}
-
static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
struct drm_crtc *crtc, struct sde_core_perf_params *perf,
bool nrt_client, u32 core_clk)
@@ -175,13 +180,13 @@ static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
to_sde_crtc_state(tmp_crtc->state);
perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
- sde_cstate->cur_perf.max_per_pipe_ib);
+ sde_cstate->new_perf.max_per_pipe_ib);
- bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
SDE_DEBUG("crtc=%d bw=%llu\n",
tmp_crtc->base.id,
- sde_cstate->cur_perf.bw_ctl);
+ sde_cstate->new_perf.bw_ctl);
}
}
@@ -249,6 +254,7 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
struct sde_crtc_state *sde_cstate;
struct sde_kms *kms;
@@ -263,6 +269,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
return;
}
+ sde_crtc = to_sde_crtc(crtc);
sde_cstate = to_sde_crtc_state(crtc->state);
/* only do this for command panel or writeback */
@@ -285,8 +292,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
/* Release the bandwidth */
if (kms->perf.enable_bw_release) {
trace_sde_cmd_release_bw(crtc->base.id);
- sde_cstate->cur_perf.bw_ctl = 0;
- sde_cstate->new_perf.bw_ctl = 0;
+ sde_crtc->cur_perf.bw_ctl = 0;
SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
_sde_core_perf_crtc_update_bus(kms, crtc, 0);
}
@@ -298,18 +304,27 @@ static int _sde_core_select_clk_lvl(struct sde_kms *kms,
return clk_round_rate(kms->perf.core_clk, clk_rate);
}
-static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+ struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
{
u32 clk_rate = 0;
- struct drm_crtc *crtc;
+ struct drm_crtc *tmp_crtc;
struct sde_crtc_state *sde_cstate;
int ncrtc = 0;
+ u32 tmp_rate;
+
+ drm_for_each_crtc(tmp_crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
- drm_for_each_crtc(crtc, kms->dev) {
- if (_sde_core_perf_crtc_is_power_on(crtc)) {
- sde_cstate = to_sde_crtc_state(crtc->state);
- clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
- clk_rate);
+ if (crtc->base.id == tmp_crtc->base.id) {
+ /* for current CRTC, use the cached value */
+ tmp_rate = crct_perf->core_clk_rate;
+ } else {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+ tmp_rate = sde_cstate->new_perf.core_clk_rate;
+ }
+
+ clk_rate = max(tmp_rate, clk_rate);
clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
}
ncrtc++;
@@ -353,13 +368,20 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
SDE_ATRACE_BEGIN(__func__);
- old = &sde_cstate->cur_perf;
- new = &sde_cstate->new_perf;
+ /*
+ * cache the performance numbers in the crtc prior to the
+ * crtc kickoff, so the same numbers are used during the
+ * perf update that happens post kickoff.
+ */
+
+ if (params_changed)
+ memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+ sizeof(struct sde_core_perf_params));
- if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- if (params_changed)
- _sde_core_perf_calc_crtc(kms, crtc, new);
+ old = &sde_crtc->cur_perf;
+ new = &sde_crtc->new_perf;
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote or writeback output vote
@@ -398,7 +420,7 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
* use the new clock for the rotator bw calculation.
*/
if (update_clk)
- clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
if (update_bus)
_sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
@@ -409,7 +431,9 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc,
*/
if (update_clk) {
SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
- SDE_EVT32(kms->dev, stop_req, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+ old->core_clk_rate, new->core_clk_rate);
+
ret = sde_power_clk_set_rate(&priv->phandle,
kms->perf.clk_name, clk_rate);
if (ret) {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index e99eba0dadb7..2a31bc7fedc7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -473,6 +473,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
struct sde_crtc_frame_event *fevent;
struct drm_crtc *crtc;
struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
struct sde_kms *sde_kms;
unsigned long flags;
@@ -482,13 +483,14 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
}
fevent = container_of(work, struct sde_crtc_frame_event, work);
- if (!fevent->crtc) {
+ if (!fevent->crtc || !fevent->crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
crtc = fevent->crtc;
sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
sde_kms = _sde_crtc_get_kms(crtc);
if (!sde_kms) {
@@ -522,6 +524,9 @@ static void sde_crtc_frame_event_work(struct kthread_work *work)
} else {
SDE_EVT32(DRMID(crtc), fevent->event, 2);
}
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ sde_core_perf_crtc_update(crtc, 0, false);
} else {
SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
ktime_to_ns(fevent->ts),
@@ -1883,15 +1888,18 @@ static const struct file_operations __prefix ## _fops = { \
static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
seq_printf(s, "is_rt: %d\n", cstate->is_rt);
seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
- seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
- seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n",
+ sde_crtc->cur_perf.core_clk_rate);
seq_printf(s, "max_per_pipe_ib: %llu\n",
- cstate->cur_perf.max_per_pipe_ib);
+ sde_crtc->cur_perf.max_per_pipe_ib);
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 0eed61580cd8..200073995d43 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -95,6 +95,8 @@ struct sde_crtc_frame_event {
* @frame_event_list : available frame event list
* @pending : Whether any page-flip events are pending signal
* @spin_lock : spin lock for frame event, transaction status, etc...
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @new_perf : new performance committed to clock/bandwidth driver
*/
struct sde_crtc {
struct drm_crtc base;
@@ -134,6 +136,9 @@ struct sde_crtc {
struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
spinlock_t spin_lock;
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
};
#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
@@ -148,6 +153,7 @@ struct sde_crtc {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
*/
struct sde_crtc_state {
struct drm_crtc_state base;
@@ -161,7 +167,6 @@ struct sde_crtc_state {
uint64_t input_fence_timeout_ns;
struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
- struct sde_core_perf_params cur_perf;
struct sde_core_perf_params new_perf;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index fe4b73b4ffea..de0551b22d2e 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -156,7 +156,7 @@ void sde_rm_init_hw_iter(
iter->type = type;
}
-bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
{
struct list_head *blk_list;
@@ -198,7 +198,21 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
return false;
}
-void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+ struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ int id)
{
struct list_head *blk_list;
struct sde_rm_hw_blk *blk;
@@ -225,6 +239,17 @@ void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
return hw;
}
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ void *ret = NULL;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
@@ -291,6 +316,8 @@ int sde_rm_destroy(struct sde_rm *rm)
sde_hw_mdp_destroy(rm->hw_mdp);
rm->hw_mdp = NULL;
+ mutex_destroy(&rm->rm_lock);
+
return 0;
}
@@ -387,6 +414,9 @@ int sde_rm_init(struct sde_rm *rm,
/* Clear, setup lists */
memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
INIT_LIST_HEAD(&rm->rsvps);
for (type = 0; type < SDE_HW_BLK_MAX; type++)
INIT_LIST_HEAD(&rm->hw_blks[type]);
@@ -568,7 +598,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
if (lm_cfg->dspp != DSPP_MAX) {
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->dspp) {
*dspp = iter.blk;
break;
@@ -589,7 +619,7 @@ static bool _sde_rm_check_lm_and_get_connected_blks(
}
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id == lm_cfg->pingpong) {
*pp = iter.blk;
break;
@@ -639,7 +669,8 @@ static int _sde_rm_reserve_lms(
/* Find a primary mixer */
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&dspp, 0, sizeof(dspp));
memset(&pp, 0, sizeof(pp));
@@ -657,7 +688,8 @@ static int _sde_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
- while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -693,7 +725,7 @@ static int _sde_rm_reserve_lms(
/* reserve a free PINGPONG_SLAVE block */
rc = -ENAVAIL;
sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
- while (sde_rm_get_hw(rm, &iter_i)) {
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
struct sde_pingpong_cfg *pp_cfg =
(struct sde_pingpong_cfg *)
(iter_i.blk->catalog);
@@ -724,7 +756,7 @@ static int _sde_rm_reserve_ctls(
memset(&ctls, 0, sizeof(ctls));
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
unsigned long caps;
bool has_split_display, has_ppsplit;
@@ -771,7 +803,7 @@ static int _sde_rm_reserve_cdm(
struct sde_cdm_cfg *cdm;
sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
bool match = false;
if (RESERVED_BY_OTHER(iter.blk, rsvp))
@@ -816,7 +848,7 @@ static int _sde_rm_reserve_intf_or_wb(
/* Find the block entry in the rm, and note the reservation */
sde_rm_init_hw_iter(&iter, 0, type);
- while (sde_rm_get_hw(rm, &iter)) {
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
if (iter.blk->id != id)
continue;
@@ -1073,7 +1105,7 @@ static struct drm_connector *_sde_rm_get_connector(
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-void _sde_rm_release_rsvp(
+static void _sde_rm_release_rsvp(
struct sde_rm *rm,
struct sde_rm_rsvp *rsvp,
struct drm_connector *conn)
@@ -1125,16 +1157,18 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
return;
}
+ mutex_lock(&rm->rm_lock);
+
rsvp = _sde_rm_get_rsvp(rm, enc);
if (!rsvp) {
SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
- return;
+ goto end;
}
conn = _sde_rm_get_connector(enc);
if (!conn) {
SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
- return;
+ goto end;
}
top_ctrl = sde_connector_get_property(conn->state,
@@ -1154,6 +1188,9 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
CONNECTOR_PROP_TOPOLOGY_NAME,
SDE_RM_TOPOLOGY_UNKNOWN);
}
+
+end:
+ mutex_unlock(&rm->rm_lock);
}
static int _sde_rm_commit_rsvp(
@@ -1232,13 +1269,15 @@ int sde_rm_reserve(
crtc_state->crtc->base.id, test_only);
SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+ mutex_lock(&rm->rm_lock);
+
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
conn_state, &reqs);
if (ret) {
SDE_ERROR("failed to populate hw requirements\n");
- return ret;
+ goto end;
}
/*
@@ -1253,8 +1292,10 @@ int sde_rm_reserve(
* replace the current with the next.
*/
rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
- if (!rsvp_nxt)
- return -ENOMEM;
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
rsvp_cur = _sde_rm_get_rsvp(rm, enc);
@@ -1306,5 +1347,8 @@ int sde_rm_reserve(
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+end:
+ mutex_unlock(&rm->rm_lock);
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 1cc22c5fbbf4..87e95bfebe98 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -70,6 +70,7 @@ enum sde_rm_topology_control {
* @hw_mdp: hardware object for mdp_top
* @lm_max_width: cached layer mixer maximum width
* @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
*/
struct sde_rm {
struct drm_device *dev;
@@ -78,6 +79,7 @@ struct sde_rm {
struct sde_hw_mdp *hw_mdp;
uint32_t lm_max_width;
uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
};
/**
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 49cca9399cb0..c414f68a8e0d 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -43,6 +43,7 @@ enum sde_hdcp_states {
HDCP_STATE_AUTHENTICATING,
HDCP_STATE_AUTHENTICATED,
HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH,
HDCP_STATE_AUTH_ENC_NONE,
HDCP_STATE_AUTH_ENC_1X,
HDCP_STATE_AUTH_ENC_2P2
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcfaff3c..4896474da320 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -180,6 +180,10 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429050c0..5d8dfe027b30 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d4d655a10df1..312aa1e33fb2 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -540,7 +540,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
{
/* the worst case is computed from the set_report command with a
* reportID > 15 and the maximum report length */
- int args_len = sizeof(__u8) + /* optional ReportID byte */
+ int args_len = sizeof(__u8) + /* ReportID */
+ sizeof(__u8) + /* optional ReportID byte */
sizeof(__u16) + /* data register */
sizeof(__u16) + /* size of the report */
report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 0df32fe0e345..b0eeb5090c91 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -971,6 +971,8 @@ static int usbhid_parse(struct hid_device *hid)
unsigned int rsize = 0;
char *rdesc;
int ret, n;
+ int num_descriptors;
+ size_t offset = offsetof(struct hid_descriptor, desc);
quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
@@ -993,10 +995,18 @@ static int usbhid_parse(struct hid_device *hid)
return -ENODEV;
}
+ if (hdesc->bLength < sizeof(struct hid_descriptor)) {
+ dbg_hid("hid descriptor is too short\n");
+ return -EINVAL;
+ }
+
hid->version = le16_to_cpu(hdesc->bcdHID);
hid->country = hdesc->bCountryCode;
- for (n = 0; n < hdesc->bNumDescriptors; n++)
+ num_descriptors = min_t(int, hdesc->bNumDescriptors,
+ (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
+
+ for (n = 0; n < num_descriptors; n++)
if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 1fb02dcbc500..12dcbd8226f2 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -155,6 +155,10 @@ static void fcopy_send_data(struct work_struct *dummy)
out_src = smsg_out;
break;
+ case WRITE_TO_FILE:
+ out_src = fcopy_transaction.fcopy_msg;
+ out_len = sizeof(struct hv_do_fcopy);
+ break;
default:
out_src = fcopy_transaction.fcopy_msg;
out_len = fcopy_transaction.recv_len;
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index dee93ec87d02..84e0994aafdd 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -208,11 +208,13 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
+#define VDD_FROM_REG(val) DIV_ROUND_CLOSEST((val) * 95, 4)
+#define VDD_CLAMP(val) clamp_val(val, 0, 255 * 95 / 4)
+#define VDD_TO_REG(val) DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
-#define IN_FROM_REG(val) ((val) * 19)
-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
+#define IN_FROM_REG(val) ((val) * 19)
+#define IN_CLAMP(val) clamp_val(val, 0, 255 * 19)
+#define IN_TO_REG(val) DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
#define DIV_FROM_REG(val) (1 << (val))
#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
- clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
+
+#define FAN_BASE(div) (480000 >> (div))
+#define FAN_CLAMP(val, div) clamp_val(val, FAN_BASE(div) / 255, \
+ FAN_BASE(div))
+#define FAN_TO_REG(val, div) ((val) == 0 ? 0 : \
+ DIV_ROUND_CLOSEST(480000, \
+ FAN_CLAMP(val, div) << (div)))
static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -513,9 +520,9 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
get_fan_off, set_fan_off);
-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
- (val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
+#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
+#define TEMP_CLAMP(val) clamp_val(val, -130000, 125000)
+#define TEMP_TO_REG(val) (DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
char *buf)
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index d57a2f75dccf..32c6a40a408f 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -72,6 +72,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 02095410cb33..cb07713aceda 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1065,7 +1065,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
stm_source_link_drop(src);
- device_destroy(&stm_source_class, src->dev.devt);
+ device_unregister(&src->dev);
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 10835d1f559b..dee0fc421054 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1131,6 +1131,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
static int at91_twi_resume_noirq(struct device *dev)
{
+ struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
int ret;
if (!pm_runtime_status_suspended(dev)) {
@@ -1142,6 +1143,8 @@ static int at91_twi_resume_noirq(struct device *dev)
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
+ at91_init_twi_bus(twi_dev);
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..639d1a9c8793 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..4b58e8aaf5c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 71d3929adf54..8d65f33af5da 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
- writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+ writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 4d960d3b93c0..91d34ed756ea 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -257,7 +257,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
unsigned int vref_mv)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int i, ret = -1;
+ int i, ret;
unsigned long long scale_uv;
u32 id;
@@ -266,7 +266,7 @@ static int ad7793_setup(struct iio_dev *indio_dev,
return ret;
/* reset the serial interface */
- ret = spi_write(st->sd.spi, (u8 *)&ret, sizeof(ret));
+ ret = ad_sd_reset(&st->sd, 32);
if (ret < 0)
goto out;
usleep_range(500, 2000); /* Wait for at least 500us */
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d10bd0c97233..22c4c17cd996 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -177,6 +177,34 @@ out:
}
EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+/**
+ * ad_sd_reset() - Reset the serial interface
+ *
+ * @sigma_delta: The sigma delta device
+ * @reset_length: Number of SCLKs with DIN = 1
+ *
+ * Returns 0 on success, an error code otherwise.
+ **/
+int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
+ unsigned int reset_length)
+{
+ uint8_t *buf;
+ unsigned int size;
+ int ret;
+
+ size = DIV_ROUND_UP(reset_length, 8);
+ buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memset(buf, 0xff, size);
+ ret = spi_write(sigma_delta->spi, buf, size);
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad_sd_reset);
+
static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
unsigned int mode, unsigned int channel)
{
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 0c904edd6c00..f684fe31f832 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -28,8 +28,6 @@
#include <linux/iio/driver.h>
#define AXP288_ADC_EN_MASK 0xF1
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
-#define AXP288_ADC_TS_PIN_ON 0xF3
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -123,16 +121,6 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
- unsigned long address)
-{
- /* channels other than GPADC do not need to switch TS pin */
- if (address != AXP288_GP_ADC_H)
- return 0;
-
- return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
-}
-
static int axp288_adc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -143,16 +131,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
- chan->address)) {
- dev_err(&indio_dev->dev, "GPADC mode\n");
- ret = -EINVAL;
- break;
- }
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
- chan->address))
- dev_err(&indio_dev->dev, "TS pin restore\n");
break;
default:
ret = -EINVAL;
@@ -162,15 +141,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int axp288_adc_set_state(struct regmap *regmap)
-{
- /* ADC should be always enabled for internal FG to function */
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
- return -EIO;
-
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
-}
-
static const struct iio_info axp288_adc_iio_info = {
.read_raw = &axp288_adc_read_raw,
.driver_module = THIS_MODULE,
@@ -199,7 +169,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = axp288_adc_set_state(axp20x->regmap);
+ ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8569c8e1f4b2..ad2681acce9a 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -17,6 +17,8 @@
* MCP3204
* MCP3208
* ------------
+ * 13 bit converter
+ * MCP3301
*
* Datasheet can be found here:
* http://ww1.microchip.com/downloads/en/DeviceDoc/21293C.pdf mcp3001
@@ -96,7 +98,7 @@ static int mcp320x_channel_to_tx_data(int device_index,
}
static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
- bool differential, int device_index)
+ bool differential, int device_index, int *val)
{
int ret;
@@ -117,19 +119,25 @@ static int mcp320x_adc_conversion(struct mcp320x *adc, u8 channel,
switch (device_index) {
case mcp3001:
- return (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ *val = (adc->rx_buf[0] << 5 | adc->rx_buf[1] >> 3);
+ return 0;
case mcp3002:
case mcp3004:
case mcp3008:
- return (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ *val = (adc->rx_buf[0] << 2 | adc->rx_buf[1] >> 6);
+ return 0;
case mcp3201:
- return (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ *val = (adc->rx_buf[0] << 7 | adc->rx_buf[1] >> 1);
+ return 0;
case mcp3202:
case mcp3204:
case mcp3208:
- return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ *val = (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+ return 0;
case mcp3301:
- return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
+ *val = sign_extend32((adc->rx_buf[0] & 0x1f) << 8
+ | adc->rx_buf[1], 12);
+ return 0;
default:
return -EINVAL;
}
@@ -150,12 +158,10 @@ static int mcp320x_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = mcp320x_adc_conversion(adc, channel->address,
- channel->differential, device_index);
-
+ channel->differential, device_index, val);
if (ret < 0)
goto out;
- *val = ret;
ret = IIO_VAL_INT;
break;
@@ -304,6 +310,7 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
+ spi_set_drvdata(spi, indio_dev);
chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
indio_dev->channels = chip_info->channels;
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 0c74869a540a..7ffc5db4d7ee 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -866,8 +866,10 @@ static int twl4030_madc_probe(struct platform_device *pdev)
/* Enable 3v1 bias regulator for MADC[3:6] */
madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1");
- if (IS_ERR(madc->usb3v1))
- return -ENODEV;
+ if (IS_ERR(madc->usb3v1)) {
+ ret = -ENODEV;
+ goto err_i2c;
+ }
ret = regulator_enable(madc->usb3v1);
if (ret)
@@ -876,11 +878,13 @@ static int twl4030_madc_probe(struct platform_device *pdev)
ret = iio_device_register(iio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio device\n");
- goto err_i2c;
+ goto err_usb3v1;
}
return 0;
+err_usb3v1:
+ regulator_disable(madc->usb3v1);
err_i2c:
twl4030_madc_set_current_generator(madc, 0, 0);
err_current_generator:
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 02e636a1c49a..475c5a74f2d1 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -1208,7 +1208,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
- goto err_free_samplerate_trigger;
+ goto err_clk_disable_unprepare;
ret = request_irq(irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
@@ -1268,6 +1268,8 @@ static int xadc_probe(struct platform_device *pdev)
err_free_irq:
free_irq(irq, indio_dev);
+err_clk_disable_unprepare:
+ clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->samplerate_trigger);
@@ -1277,8 +1279,6 @@ err_free_convst_trigger:
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
-err_clk_disable_unprepare:
- clk_disable_unprepare(xadc->clk);
err_device_free:
kfree(indio_dev->channels);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 131b434af994..e08a3c794120 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -221,8 +221,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
ret = indio_dev->info->debugfs_reg_access(indio_dev,
indio_dev->cached_reg_addr,
0, &val);
- if (ret)
+ if (ret) {
dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
+ return ret;
+ }
len = snprintf(buf, sizeof(buf), "0x%X\n", val);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6c8ff10101c0..77cc77ba998f 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7097,7 +7097,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
unsigned long flags;
while (wait) {
- unsigned long shadow;
+ unsigned long shadow = 0;
int cstart, previ = -1;
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 6699ecd855f0..bad76eed06b3 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1239,7 +1239,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1406,7 +1406,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
@@ -1491,7 +1491,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
@@ -1533,7 +1533,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
- list_del(&neigh->list);
+ list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 57a34f87dedf..9b47a437d6c9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -160,11 +160,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
out:
up_write(&ppriv->vlan_rwsem);
+ rtnl_unlock();
+
if (result)
free_netdev(priv->dev);
- rtnl_unlock();
-
return result;
}
@@ -185,7 +185,6 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
- unregister_netdevice(priv->dev);
list_del(&priv->list);
dev = priv->dev;
break;
@@ -193,6 +192,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
up_write(&ppriv->vlan_rwsem);
+ if (dev) {
+ ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
+ unregister_netdevice(dev);
+ }
+
rtnl_unlock();
if (dev) {
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index ce6ff9b301bb..7e2dc5e56632 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -381,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5be14ad29d46..dbf09836ff30 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -905,6 +905,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
},
},
{
+ /* Gigabyte P57 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
+ },
+ },
+ {
/* Schenker XMG C504 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a0ef57483ebb..52c36394dba5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3096,6 +3096,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
mutex_unlock(&domain->api_lock);
domain_flush_tlb_pde(domain);
+ domain_flush_complete(domain);
return unmap_size;
}
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7651545e3f2e..3f1617ca2fc0 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -492,8 +492,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
__arm_lpae_set_pte(ptep, pte, cfg);
- } else {
+ } else if (!iopte_leaf(pte, lvl)) {
cptep = iopte_deref(pte, data);
+ } else {
+ /* We require an unmap first */
+ WARN_ON(!selftest_running);
+ return -EEXIST;
}
/* Rinse, repeat */
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa431ba..63faee04a008 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ int i, size, reserved = 0;
+ u32 max = 0, entry;
const __be32 *irqsr;
int ret = -ENOMEM;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..6f1dbd52ec91 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index bf3fbd00a091..64b586458d3d 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
+
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 620268b63b2a..966227a3df1a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -596,9 +596,18 @@ config LEDS_QPNP
LEDs in both PWM and light pattern generator (LPG) modes. For older
PMICs, it also supports WLEDs and flash LEDs.
+config LEDS_QPNP_FLASH
+ tristate "Support for QPNP Flash LEDs"
+ depends on LEDS_CLASS && SPMI
+ help
+ This driver supports the flash LED functionality of Qualcomm
+ Technologies, Inc. QPNP PMICs. This driver supports PMICs up through
+ PM8994. It can configure the flash LED target current for several
+ independent channels.
+
config LEDS_QPNP_FLASH_V2
tristate "Support for QPNP V2 Flash LEDs"
- depends on LEDS_CLASS && MFD_SPMI_PMIC
+ depends on LEDS_CLASS && MFD_SPMI_PMIC && !LEDS_QPNP_FLASH
help
This driver supports the flash V2 LED functionality of Qualcomm
Technologies, Inc. QPNP PMICs. This driver supports PMICs starting
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index aa5ba0cf4de6..8d8ba9175810 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
obj-$(CONFIG_LEDS_QPNP_FLASH_V2) += leds-qpnp-flash-v2.o
obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
new file mode 100644
index 000000000000..493631774936
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -0,0 +1,2709 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/leds-qpnp-flash.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "leds.h"
+
+#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURRENT(base) (base + 0x41)
+#define FLASH_LED0_CURRENT(base) (base + 0x42)
+#define FLASH_LED1_CURRENT(base) (base + 0x43)
+#define FLASH_CLAMP_CURRENT(base) (base + 0x44)
+#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_THERMAL_DRATE(base) (base + 0x52)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A)
+#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C)
+#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D)
+#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0)
+#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA)
+#define FLASH_TORCH(base) (base + 0xE4)
+
+#define FLASH_STATUS_REG_MASK 0xFF
+#define FLASH_LED_FAULT_STATUS(base) (base + 0x08)
+#define INT_LATCHED_STS(base) (base + 0x18)
+#define IN_POLARITY_HIGH(base) (base + 0x12)
+#define INT_SET_TYPE(base) (base + 0x11)
+#define INT_EN_SET(base) (base + 0x15)
+#define INT_LATCHED_CLR(base) (base + 0x14)
+
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_STARTUP_DLY_MASK 0x03
+#define FLASH_VREG_OK_FORCE_MASK 0xC0
+#define FLASH_FAULT_DETECT_MASK 0x80
+#define FLASH_THERMAL_DERATE_MASK 0xBF
+#define FLASH_SECURE_MASK 0xFF
+#define FLASH_TORCH_MASK 0x03
+#define FLASH_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_MODULE_ENABLE_MASK 0xE0
+#define FLASH_STROBE_MASK 0xC0
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+#define FLASH_VPH_PWR_DROOP_MASK 0xF3
+#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
+#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
+#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
+
+#define FLASH_LED_TRIGGER_DEFAULT "none"
+#define FLASH_LED_HEADROOM_DEFAULT_MV 500
+#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128
+#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200
+#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80
+#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3
+#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3
+#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200
+#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10
+#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2
+#define FLASH_RAMP_UP_DELAY_US_MIN 1000
+#define FLASH_RAMP_UP_DELAY_US_MAX 1001
+#define FLASH_RAMP_DN_DELAY_US_MIN 2160
+#define FLASH_RAMP_DN_DELAY_US_MAX 2161
+#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000
+#define FLASH_TORCH_MAX_LEVEL 0x0F
+#define FLASH_MAX_LEVEL 0x4F
+#define FLASH_LED_FLASH_HW_VREG_OK 0x40
+#define FLASH_LED_FLASH_SW_VREG_OK 0x80
+#define FLASH_LED_STROBE_TYPE_HW 0x04
+#define FLASH_DURATION_DIVIDER 10
+#define FLASH_LED_HEADROOM_DIVIDER 100
+#define FLASH_LED_HEADROOM_OFFSET 2
+#define FLASH_LED_MAX_CURRENT_MA 1000
+#define FLASH_LED_THERMAL_THRESHOLD_MIN 95
+#define FLASH_LED_THERMAL_DEVIDER 10
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500
+#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100
+#define FLASH_LED_HDRM_SNS_ENABLE 0x81
+#define FLASH_LED_HDRM_SNS_DISABLE 0x01
+#define FLASH_LED_UA_PER_MA 1000
+#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20
+#define FLASH_LED_MASK3_ENABLE_SHIFT 7
+#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60
+#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000
+#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001
+#define FLASH_LED_OPEN_FAULT_DETECTED 0xC
+
+#define FLASH_UNLOCK_SECURE 0xA5
+#define FLASH_LED_TORCH_ENABLE 0x00
+#define FLASH_LED_TORCH_DISABLE 0x03
+#define FLASH_MODULE_ENABLE 0x80
+#define FLASH_LED0_TRIGGER 0x80
+#define FLASH_LED1_TRIGGER 0x40
+#define FLASH_LED0_ENABLEMENT 0x40
+#define FLASH_LED1_ENABLEMENT 0x20
+#define FLASH_LED_DISABLE 0x00
+#define FLASH_LED_MIN_CURRENT_MA 13
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+/*
+ * ID represents physical LEDs for individual control purpose.
+ */
+enum flash_led_id {
+ FLASH_LED_0 = 0,
+ FLASH_LED_1,
+ FLASH_LED_SWITCH,
+};
+
+enum flash_led_type {
+ FLASH = 0,
+ TORCH,
+ SWITCH,
+};
+
+enum thermal_derate_rate {
+ RATE_1_PERCENT = 0,
+ RATE_1P25_PERCENT,
+ RATE_2_PERCENT,
+ RATE_2P5_PERCENT,
+ RATE_5_PERCENT,
+};
+
+enum current_ramp_steps {
+ RAMP_STEP_0P2_US = 0,
+ RAMP_STEP_0P4_US,
+ RAMP_STEP_0P8_US,
+ RAMP_STEP_1P6_US,
+ RAMP_STEP_3P3_US,
+ RAMP_STEP_6P7_US,
+ RAMP_STEP_13P5_US,
+ RAMP_STEP_27US,
+};
+
+struct flash_regulator_data {
+ struct regulator *regs;
+ const char *reg_name;
+ u32 max_volt_uv;
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct flash_regulator_data *reg_data;
+ u16 max_current;
+ u16 prgm_current;
+ u16 prgm_current2;
+ u16 duration;
+ u8 id;
+ u8 type;
+ u8 trigger;
+ u8 enable;
+ u8 num_regulators;
+ bool flash_on;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+ unsigned int temp_threshold_num;
+ unsigned int temp_derate_curr_num;
+ unsigned int *die_temp_derate_curr_ma;
+ unsigned int *die_temp_threshold_degc;
+ u16 ramp_up_step;
+ u16 ramp_dn_step;
+ u16 vph_pwr_droop_threshold;
+ u16 headroom;
+ u16 clamp_current;
+ u8 thermal_derate_threshold;
+ u8 vph_pwr_droop_debounce_time;
+ u8 startup_dly;
+ u8 thermal_derate_rate;
+ bool pmic_charger_support;
+ bool self_check_en;
+ bool thermal_derate_en;
+ bool current_ramp_en;
+ bool vph_pwr_droop_en;
+ bool hdrm_sns_ch0_en;
+ bool hdrm_sns_ch1_en;
+ bool power_detect_en;
+ bool mask3_en;
+ bool follow_rb_disable;
+ bool die_current_derate_en;
+};
+
+struct qpnp_flash_led_buffer {
+ struct mutex debugfs_lock; /* Prevent thread concurrency */
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ struct qpnp_flash_led *led;
+ u32 buffer_cnt;
+ char data[0];
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+ struct pmic_revid_data *revid_data;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ struct flash_led_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct flash_node_data *flash_node;
+ struct power_supply *battery_psy;
+ struct workqueue_struct *ordered_workq;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct mutex flash_led_lock;
+ struct dentry *dbgfs_root;
+ int num_leds;
+ u16 base;
+ u16 current_addr;
+ u16 current2_addr;
+ u8 peripheral_type;
+ u8 fault_reg;
+ bool gpio_enabled;
+ bool charging_enabled;
+ bool strobe_debug;
+ bool dbg_feature_en;
+ bool open_fault;
+};
+
+static u8 qpnp_flash_led_ctrl_dbg_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D,
+};
+
+static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
+ struct file *file)
+{
+ struct qpnp_flash_led_buffer *log;
+ size_t logbufsize = SZ_4K;
+
+ log = kzalloc(logbufsize, GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+ mutex_init(&log->debugfs_lock);
+ log->led = led;
+
+ log->buffer_cnt = 1;
+ file->private_data = log;
+
+ return 0;
+}
+
+static int flash_led_dfs_open(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = inode->i_private;
+
+ return flash_led_dbgfs_file_open(led, file);
+}
+
+static int flash_led_dfs_close(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led_buffer *log = file->private_data;
+
+ if (log) {
+ file->private_data = NULL;
+ mutex_destroy(&log->debugfs_lock);
+ kfree(log);
+ }
+
+ return 0;
+}
+
+#define MIN_BUFFER_WRITE_LEN 20
+static int print_to_log(struct qpnp_flash_led_buffer *log,
+ const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *log_buf;
+ size_t size = log->len - log->wpos;
+
+ if (size < MIN_BUFFER_WRITE_LEN)
+ return 0; /* not enough buffer left */
+
+ log_buf = &log->data[log->wpos];
+ va_start(args, fmt);
+ cnt = vscnprintf(log_buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ uint val;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ rc = regmap_read(led->regmap, INT_LATCHED_STS(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ INT_LATCHED_STS(led->base), rc);
+ goto unlock_mutex;
+ }
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", val);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led_buffer *log = fp->private_data;
+ struct qpnp_flash_led *led;
+ int rc = 0;
+ size_t len;
+ size_t ret;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ if ((log->rpos >= log->wpos && log->buffer_cnt == 0) ||
+ ((log->len - log->wpos) < MIN_BUFFER_WRITE_LEN))
+ goto unlock_mutex;
+
+ log->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
+ if (rc == 0)
+ goto unlock_mutex;
+
+ rc = print_to_log(log, "0x%02X ", led->fault_reg);
+ if (rc == 0)
+ goto unlock_mutex;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ rc = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ rc = len;
+
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return rc;
+}
+
+static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (!ret) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->strobe_debug = true;
+ else
+ led->strobe_debug = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static ssize_t flash_led_dfs_dbg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+ struct qpnp_flash_led_buffer *log = file->private_data;
+ struct qpnp_flash_led *led;
+ char *kbuf;
+
+ if (!log) {
+ pr_err("error: file private data is NULL\n");
+ return -EFAULT;
+ }
+ led = log->led;
+
+ mutex_lock(&log->debugfs_lock);
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->dbg_feature_en = true;
+ else
+ led->dbg_feature_en = false;
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&log->debugfs_lock);
+ return ret;
+}
+
+static const struct file_operations flash_led_dfs_latched_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_latched_reg_read,
+};
+
+static const struct file_operations flash_led_dfs_strobe_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_fault_reg_read,
+ .write = flash_led_dfs_fault_reg_enable,
+};
+
+static const struct file_operations flash_led_dfs_dbg_feature_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .write = flash_led_dfs_dbg_enable,
+};
+
+static int
+qpnp_led_masked_write(struct qpnp_flash_led *led, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(led->regmap, addr, mask, val);
+ if (rc)
+ dev_err(&led->pdev->dev,
+ "Unable to update_bits to addr=%x, rc(%d)\n", addr, rc);
+
+ dev_dbg(&led->pdev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr);
+
+ return rc;
+}
+
+static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led,
+ int64_t die_temp_degc)
+{
+ int die_temp_curr_ma;
+
+ if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0])
+ die_temp_curr_ma = 0;
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3];
+ else
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4];
+
+ return die_temp_curr_ma;
+}
+
+static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc;
+
+ rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result);
+ if (rc) {
+ pr_err("failed to read the die temp\n");
+ return -EINVAL;
+ }
+
+ return die_temp_result.physical;
+}
+
+static int qpnp_get_pmic_revid(struct qpnp_flash_led *led)
+{
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(led->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ dev_err(&led->pdev->dev,
+ "qcom,pmic-revid property missing\n");
+ return -EINVAL;
+ }
+
+ led->revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(led->revid_data)) {
+ pr_err("Couldn't get revid data rc = %ld\n",
+ PTR_ERR(led->revid_data));
+ return PTR_ERR(led->revid_data);
+ }
+
+ return 0;
+}
+
+static int
+qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node,
+ struct qpnp_flash_led *led)
+{
+ union power_supply_propval prop;
+ int64_t chg_temp_milidegc, die_temp_degc;
+ int max_curr_avail_ma = 2000;
+ int allowed_die_temp_curr_ma = 2000;
+ int rc;
+
+ if (led->pdata->power_detect_en) {
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to query power supply\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When charging is enabled, enforce this new enablement
+ * sequence to reduce fuel gauge reading resolution.
+ */
+ if (led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ return -EINVAL;
+ }
+
+ usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN,
+ FLASH_LED_CURRENT_READING_DELAY_MAX);
+ }
+
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop);
+ if (!prop.intval) {
+ dev_err(&led->pdev->dev,
+ "battery too low for flash\n");
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA);
+ }
+
+ /*
+ * When thermal mitigation is available, this logic will execute to
+ * derate current based upon the PMIC die temperature.
+ */
+ if (led->pdata->die_current_derate_en) {
+ chg_temp_milidegc = qpnp_flash_led_get_die_temp(led);
+ if (chg_temp_milidegc < 0)
+ return -EINVAL;
+
+ die_temp_degc = div_s64(chg_temp_milidegc, 1000);
+ allowed_die_temp_curr_ma =
+ qpnp_flash_led_get_allowed_die_temp_curr(led,
+ die_temp_degc);
+ if (allowed_die_temp_curr_ma < 0)
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma)
+ ? allowed_die_temp_curr_ma : max_curr_avail_ma;
+
+ return max_curr_avail_ma;
+}
+
+static ssize_t qpnp_flash_led_die_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable die_temp feature; non-zero to enable feature*/
+ if (val == 0)
+ led->pdata->die_current_derate_en = false;
+ else
+ led->pdata->die_current_derate_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct flash_node_data *flash_node;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW;
+ else
+ flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int rc, i, count = 0;
+ u16 addr;
+ uint val;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+ for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) {
+ addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i];
+ rc = regmap_read(led->regmap, addr, &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ addr, rc);
+ return -EINVAL;
+ }
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "REG_0x%x = 0x%02x\n", addr, val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_current_derate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ /*'0' for disable derate feature; non-zero to enable derate feature */
+ if (val == 0)
+ led->pdata->power_detect_en = false;
+ else
+ led->pdata->power_detect_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int max_curr_avail_ma = 0;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (led->flash_node[0].flash_on)
+ max_curr_avail_ma += led->flash_node[0].max_current;
+ if (led->flash_node[1].flash_on)
+ max_curr_avail_ma += led->flash_node[1].max_current;
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
+}
+
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(strobe, 0664, NULL, qpnp_led_strobe_type_store),
+ __ATTR(reg_dump, 0664, qpnp_flash_led_dump_regs_show, NULL),
+ __ATTR(enable_current_derate, 0664, NULL,
+ qpnp_flash_led_current_derate_store),
+ __ATTR(max_allowed_current, 0664, qpnp_flash_led_max_current_show,
+ NULL),
+ __ATTR(enable_die_temp_current_derate, 0664, NULL,
+ qpnp_flash_led_die_temp_store),
+};
+
+static int qpnp_flash_led_get_thermal_derate_rate(const char *rate)
+{
+ /*
+ * return 5% derate as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(rate, "1_PERCENT") == 0)
+ return RATE_1_PERCENT;
+ else if (strcmp(rate, "1P25_PERCENT") == 0)
+ return RATE_1P25_PERCENT;
+ else if (strcmp(rate, "2_PERCENT") == 0)
+ return RATE_2_PERCENT;
+ else if (strcmp(rate, "2P5_PERCENT") == 0)
+ return RATE_2P5_PERCENT;
+ else if (strcmp(rate, "5_PERCENT") == 0)
+ return RATE_5_PERCENT;
+ else
+ return RATE_5_PERCENT;
+}
+
+static int qpnp_flash_led_get_ramp_step(const char *step)
+{
+ /*
+ * return 27 us as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(step, "0P2_US") == 0)
+ return RAMP_STEP_0P2_US;
+ else if (strcmp(step, "0P4_US") == 0)
+ return RAMP_STEP_0P4_US;
+ else if (strcmp(step, "0P8_US") == 0)
+ return RAMP_STEP_0P8_US;
+ else if (strcmp(step, "1P6_US") == 0)
+ return RAMP_STEP_1P6_US;
+ else if (strcmp(step, "3P3_US") == 0)
+ return RAMP_STEP_3P3_US;
+ else if (strcmp(step, "6P7_US") == 0)
+ return RAMP_STEP_6P7_US;
+ else if (strcmp(step, "13P5_US") == 0)
+ return RAMP_STEP_13P5_US;
+ else
+ return RAMP_STEP_27US;
+}
+
+static u8 qpnp_flash_led_get_droop_debounce_time(u8 val)
+{
+ /*
+ * return 10 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 0:
+ return 0;
+ case 10:
+ return 1;
+ case 32:
+ return 2;
+ case 64:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static u8 qpnp_flash_led_get_startup_dly(u8 val)
+{
+ /*
+ * return 128 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 10:
+ return 0;
+ case 32:
+ return 1;
+ case 64:
+ return 2;
+ case 128:
+ return 3;
+ default:
+ return 3;
+ }
+}
+
+static int
+qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led)
+{
+ int rc;
+ uint val;
+
+ rc = regmap_read(led->regmap,
+ FLASH_LED_PERIPHERAL_SUBTYPE(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read peripheral subtype\n");
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ union power_supply_propval psy_prop;
+ int rc;
+ uint val, tmp;
+
+ rc = regmap_read(led->regmap, FLASH_LED_STROBE_CTRL(led->base), &val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Unable to read strobe reg\n");
+ return -EINVAL;
+ }
+
+ tmp = (~flash_node->trigger) & val;
+ if (!tmp) {
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (led->battery_psy &&
+ led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to enble charger i/p current limit\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return -EINVAL;
+ }
+
+ if (led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_suspend);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to disable GPIO\n");
+ return -EINVAL;
+ }
+ led->gpio_enabled = false;
+ }
+
+ if (led->battery_psy) {
+ psy_prop.intval = false;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED0_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED1_TRIGGER) {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH)
+ flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW;
+
+ return 0;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int flash_regulator_parse_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node) {
+
+ int i = 0, rc;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ const char *temp_string;
+ u32 val;
+
+ flash_node->reg_data = devm_kzalloc(&led->pdev->dev,
+ sizeof(struct flash_regulator_data *) *
+ flash_node->num_regulators,
+ GFP_KERNEL);
+ if (!flash_node->reg_data) {
+ dev_err(&led->pdev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "regulator-name",
+ &temp_string);
+ if (!rc)
+ flash_node->reg_data[i].reg_name = temp_string;
+ else {
+ dev_err(&led->pdev->dev,
+ "Unable to read regulator name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "max-voltage", &val);
+ if (!rc) {
+ flash_node->reg_data[i].max_volt_uv = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read max voltage\n");
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int flash_regulator_setup(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_setup;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ flash_node->reg_data[i].regs =
+ regulator_get(flash_node->cdev.dev,
+ flash_node->reg_data[i].reg_name);
+ if (IS_ERR(flash_node->reg_data[i].regs)) {
+ rc = PTR_ERR(flash_node->reg_data[i].regs);
+ dev_err(&led->pdev->dev,
+ "Failed to get regulator\n");
+ goto error_regulator_setup;
+ }
+
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ rc = regulator_set_voltage(flash_node->reg_data[i].regs,
+ flash_node->reg_data[i].max_volt_uv,
+ flash_node->reg_data[i].max_volt_uv);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator set voltage failed\n");
+ regulator_put(flash_node->reg_data[i].regs);
+ goto error_regulator_setup;
+ }
+ }
+ }
+
+ return rc;
+
+error_regulator_setup:
+ while (i--) {
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ regulator_set_voltage(flash_node->reg_data[i].regs,
+ 0, flash_node->reg_data[i].max_volt_uv);
+ }
+
+ regulator_put(flash_node->reg_data[i].regs);
+ }
+
+ return rc;
+}
+
+static int flash_regulator_enable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_enable;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ rc = regulator_enable(flash_node->reg_data[i].regs);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "regulator enable failed\n");
+ goto error_regulator_enable;
+ }
+ }
+
+ return rc;
+
+error_regulator_enable:
+ while (i--)
+ regulator_disable(flash_node->reg_data[i].regs);
+
+ return rc;
+}
+
+int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
+ int *max_current)
+{
+ struct led_classdev *led_cdev = trigger_to_lcdev(trig);
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ int rc;
+
+ if (!led_cdev) {
+ pr_err("Invalid led_trigger provided\n");
+ return -EINVAL;
+ }
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (!(options & FLASH_LED_PREPARE_OPTIONS_MASK)) {
+ dev_err(&led->pdev->dev, "Invalid options %d\n", options);
+ return -EINVAL;
+ }
+
+ if (options & ENABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "enable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & DISABLE_REGULATOR) {
+ rc = flash_regulator_enable(led, flash_node, false);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "disable regulator failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (options & QUERY_MAX_CURRENT) {
+ rc = qpnp_flash_led_get_max_avail_current(flash_node, led);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "query max current failed, rc=%d\n", rc);
+ return rc;
+ }
+ *max_current = rc;
+ }
+
+ return 0;
+}
+
+static void qpnp_flash_led_work(struct work_struct *work)
+{
+ struct flash_node_data *flash_node = container_of(work,
+ struct flash_node_data, work);
+ struct qpnp_flash_led *led = dev_get_drvdata(&flash_node->pdev->dev);
+ union power_supply_propval psy_prop;
+ int rc, brightness = flash_node->cdev.brightness;
+ int max_curr_avail_ma = 0;
+ int total_curr_ma = 0;
+ int i;
+ u8 val;
+ uint temp;
+
+ mutex_lock(&led->flash_led_lock);
+
+ if (!brightness)
+ goto turn_off;
+
+ if (led->open_fault) {
+ dev_err(&led->pdev->dev, "Open fault detected\n");
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+
+ if (!flash_node->flash_on && flash_node->num_regulators > 0) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc) {
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+ }
+
+ if (!led->gpio_enabled && led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_active);
+ if (rc) {
+ dev_err(&led->pdev->dev, "failed to enable GPIO\n");
+ goto error_enable_gpio;
+ }
+ led->gpio_enabled = true;
+ }
+
+ if (led->dbg_feature_en) {
+ rc = qpnp_led_masked_write(led,
+ INT_SET_TYPE(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_SET_TYPE write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ IN_POLARITY_HIGH(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "IN_POLARITY_HIGH write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_EN_SET(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev, "INT_EN_SET write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ INT_LATCHED_CLR(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "INT_LATCHED_CLR write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH) {
+ led->flash_node[led->num_leds - 1].trigger |=
+ (0x80 >> flash_node->id);
+ if (flash_node->id == FLASH_LED_0)
+ led->flash_node[led->num_leds - 1].prgm_current =
+ flash_node->prgm_current;
+ else if (flash_node->id == FLASH_LED_1)
+ led->flash_node[led->num_leds - 1].prgm_current2 =
+ flash_node->prgm_current;
+ }
+
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL /
+ flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->type == FLASH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+
+ psy_prop.intval = true;
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ if (led->battery_psy) {
+ power_supply_get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &psy_prop);
+ if (psy_prop.intval < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid battery status\n");
+ goto exit_flash_led_work;
+ }
+
+ if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_CHARGING)
+ led->charging_enabled = true;
+ else if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_DISCHARGING
+ || psy_prop.intval ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING)
+ led->charging_enabled = false;
+ }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current
+ (flash_node, led);
+ if (max_curr_avail_ma < 0) {
+ dev_err(&led->pdev->dev,
+ "Failed to get max avail curr\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ total_curr_ma += flash_node->prgm_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ total_curr_ma += flash_node->prgm_current2;
+
+ if (max_curr_avail_ma < total_curr_ma) {
+ flash_node->prgm_current =
+ (flash_node->prgm_current *
+ max_curr_avail_ma) / total_curr_ma;
+ flash_node->prgm_current2 =
+ (flash_node->prgm_current2 *
+ max_curr_avail_ma) / total_curr_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led,
+ led->current2_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ if (max_curr_avail_ma < flash_node->prgm_current) {
+ dev_err(&led->pdev->dev,
+ "battery only supprots %d mA\n",
+ max_curr_avail_ma);
+ flash_node->prgm_current =
+ (u16)max_curr_avail_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL
+ / flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(
+ led,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER)
+ / FLASH_DURATION_DIVIDER);
+ rc = qpnp_led_masked_write(led,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Safety timer reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (!led->charging_enabled) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_UP_DELAY_US_MIN,
+ FLASH_RAMP_UP_DELAY_US_MAX);
+ }
+
+ if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ rc = power_supply_set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to disable charger i/p curr limit\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->strobe_debug && led->dbg_feature_en) {
+ udelay(2000);
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base),
+ &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from addr= %x, rc(%d)\n",
+ FLASH_LED_FAULT_STATUS(led->base), rc);
+ goto exit_flash_led_work;
+ }
+ led->fault_reg = temp;
+ }
+ } else {
+ pr_err("Both Torch and Flash cannot be select at same time\n");
+ for (i = 0; i < led->num_leds; i++)
+ led->flash_node[i].flash_on = false;
+ goto turn_off;
+ }
+
+ flash_node->flash_on = true;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+
+turn_off:
+ if (led->flash_node[led->num_leds - 1].id == FLASH_LED_SWITCH &&
+ flash_node->id != FLASH_LED_SWITCH)
+ led->flash_node[led->num_leds - 1].trigger &=
+ ~(0x80 >> flash_node->id);
+ if (flash_node->type == TORCH) {
+ /*
+ * Checking LED fault status detects hardware open fault.
+ * If fault occurs, all subsequent LED enablement requests
+ * will be rejected to protect hardware.
+ */
+ rc = regmap_read(led->regmap,
+ FLASH_LED_FAULT_STATUS(led->base), &temp);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Failed to read out fault status register\n");
+ goto exit_flash_led_work;
+ }
+
+ led->open_fault |= (val & FLASH_LED_OPEN_FAULT_DETECTED);
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger
+ | FLASH_LED_STROBE_TYPE_HW),
+ FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX);
+exit_flash_hdrm_sns:
+ if (led->pdata->hdrm_sns_ch0_en) {
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_1 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+exit_flash_led_work:
+ rc = qpnp_flash_led_module_disable(led, flash_node);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ goto exit_flash_led_work;
+ }
+error_enable_gpio:
+ if (flash_node->flash_on && flash_node->num_regulators > 0)
+ flash_regulator_enable(led, flash_node, false);
+
+ flash_node->flash_on = false;
+ mutex_unlock(&led->flash_led_lock);
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->pdev->dev);
+
+ if (value < LED_OFF) {
+ pr_err("Invalid brightness value\n");
+ return;
+ }
+
+ if (value > flash_node->cdev.max_brightness)
+ value = flash_node->cdev.max_brightness;
+
+ flash_node->cdev.brightness = value;
+ if (led->flash_node[led->num_leds - 1].id ==
+ FLASH_LED_SWITCH) {
+ if (flash_node->type == TORCH)
+ led->flash_node[led->num_leds - 1].type = TORCH;
+ else if (flash_node->type == FLASH)
+ led->flash_node[led->num_leds - 1].type = FLASH;
+
+ led->flash_node[led->num_leds - 1].max_current
+ = flash_node->max_current;
+
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_1) {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+
+ flash_node->prgm_current = value;
+ flash_node->flash_on = value ? true : false;
+ } else if (flash_node->id == FLASH_LED_SWITCH) {
+ if (!value) {
+ flash_node->prgm_current = 0;
+ flash_node->prgm_current2 = 0;
+ }
+ }
+ } else {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = value;
+ }
+
+ queue_work(led->ordered_workq, &flash_node->work);
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val, temp_val;
+ uint val_int;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Module disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Strobe disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "LED timer ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER -
+ FLASH_LED_HEADROOM_OFFSET);
+ rc = qpnp_led_masked_write(led,
+ FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Headroom reg write failed\n");
+ return rc;
+ }
+
+ val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly);
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_STARTUP_DELAY(led->base),
+ FLASH_STARTUP_DLY_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Startup delay reg write failed\n");
+ return rc;
+ }
+
+ val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL /
+ FLASH_LED_MAX_CURRENT_MA);
+ rc = qpnp_led_masked_write(led,
+ FLASH_CLAMP_CURRENT(led->base),
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Clamp current reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->pmic_charger_support)
+ val = FLASH_LED_FLASH_HW_VREG_OK;
+ else
+ val = FLASH_LED_FLASH_SW_VREG_OK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_OK_FORCE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VREG OK force reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->self_check_en)
+ val = FLASH_MODULE_ENABLE;
+ else
+ val = FLASH_LED_DISABLE;
+ rc = qpnp_led_masked_write(led,
+ FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Fault detect reg write failed\n");
+ return rc;
+ }
+
+ val = 0x0;
+ val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT;
+ val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE;
+ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_MODULE_CONTRL_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Mask module enable failed\n");
+ return rc;
+ }
+
+ rc = regmap_read(led->regmap, FLASH_PERPH_RESET_CTRL(led->base),
+ &val_int);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ FLASH_PERPH_RESET_CTRL(led->base), rc);
+ return -EINVAL;
+ }
+ val = (u8)val_int;
+
+ if (led->pdata->follow_rb_disable) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val |= FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ }
+
+ if (!led->pdata->thermal_derate_en)
+ val = 0x0;
+ else {
+ val = led->pdata->thermal_derate_en << 7;
+ val |= led->pdata->thermal_derate_rate << 3;
+ val |= (led->pdata->thermal_derate_threshold -
+ FLASH_LED_THERMAL_THRESHOLD_MIN) /
+ FLASH_LED_THERMAL_DEVIDER;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_THERMAL_DRATE(led->base),
+ FLASH_THERMAL_DERATE_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Thermal derate reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->current_ramp_en)
+ val = 0x0;
+ else {
+ val = led->pdata->current_ramp_en << 7;
+ val |= led->pdata->ramp_up_step << 3;
+ val |= led->pdata->ramp_dn_step;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "Current ramp reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->vph_pwr_droop_en)
+ val = 0x0;
+ else {
+ val = led->pdata->vph_pwr_droop_en << 7;
+ val |= ((led->pdata->vph_pwr_droop_threshold -
+ FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) /
+ FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4;
+ temp_val =
+ qpnp_flash_led_get_droop_debounce_time(
+ led->pdata->vph_pwr_droop_debounce_time);
+ if (temp_val == 0xFF) {
+ dev_err(&led->pdev->dev, "Invalid debounce time\n");
+ return temp_val;
+ }
+
+ val |= temp_val;
+ }
+ rc = qpnp_led_masked_write(led,
+ FLASH_VPH_PWR_DROOP(led->base),
+ FLASH_VPH_PWR_DROOP_MASK, val);
+ if (rc) {
+ dev_err(&led->pdev->dev, "VPH PWR droop reg write failed\n");
+ return rc;
+ }
+
+ led->battery_psy = power_supply_get_by_name("battery");
+ if (!led->battery_psy) {
+ dev_err(&led->pdev->dev,
+ "Failed to get battery power supply\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ const char *temp_string;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ int rc = 0, num_regs = 0;
+ u32 val;
+
+ rc = of_property_read_string(node, "label", &temp_string);
+ if (!rc) {
+ if (strcmp(temp_string, "flash") == 0)
+ flash_node->type = FLASH;
+ else if (strcmp(temp_string, "torch") == 0)
+ flash_node->type = TORCH;
+ else if (strcmp(temp_string, "switch") == 0)
+ flash_node->type = SWITCH;
+ else {
+ dev_err(&led->pdev->dev, "Wrong flash LED type\n");
+ return -EINVAL;
+ }
+ } else if (rc < 0) {
+ dev_err(&led->pdev->dev, "Unable to read flash type\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read current\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,id", &val);
+ if (!rc)
+ flash_node->id = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read led ID\n");
+ return rc;
+ }
+
+ if (flash_node->type == SWITCH || flash_node->type == FLASH) {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ flash_node->duration = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read duration\n");
+ return rc;
+ }
+ }
+
+ switch (led->peripheral_type) {
+ case FLASH_SUBTYPE_SINGLE:
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ break;
+ case FLASH_SUBTYPE_DUAL:
+ if (flash_node->id == FLASH_LED_0)
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ else if (flash_node->id == FLASH_LED_1)
+ flash_node->trigger = FLASH_LED1_TRIGGER;
+ break;
+ default:
+ dev_err(&led->pdev->dev, "Invalid peripheral type\n");
+ }
+
+ while ((temp = of_get_next_child(node, temp))) {
+ if (of_find_property(temp, "regulator-name", NULL))
+ num_regs++;
+ }
+
+ if (num_regs)
+ flash_node->num_regulators = num_regs;
+
+ return rc;
+}
+
+static int qpnp_flash_led_parse_common_dt(
+ struct qpnp_flash_led *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val, temp_val;
+ const char *temp;
+
+ led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV;
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->pdata->headroom = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read headroom\n");
+ return rc;
+ }
+
+ led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US;
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->pdata->startup_dly = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read startup delay\n");
+ return rc;
+ }
+
+ led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA;
+ rc = of_property_read_u32(node, "qcom,clamp-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->pdata->clamp_current = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to read clamp current\n");
+ return rc;
+ }
+
+ led->pdata->pmic_charger_support =
+ of_property_read_bool(node,
+ "qcom,pmic-charger-support");
+
+ led->pdata->self_check_en =
+ of_property_read_bool(node, "qcom,self-check-enabled");
+
+ led->pdata->thermal_derate_en =
+ of_property_read_bool(node,
+ "qcom,thermal-derate-enabled");
+
+ if (led->pdata->thermal_derate_en) {
+ led->pdata->thermal_derate_rate =
+ FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT;
+ rc = of_property_read_string(node, "qcom,thermal-derate-rate",
+ &temp);
+ if (!rc) {
+ temp_val =
+ qpnp_flash_led_get_thermal_derate_rate(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_rate = (u8)temp_val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_threshold =
+ FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C;
+ rc = of_property_read_u32(node, "qcom,thermal-derate-threshold",
+ &val);
+ if (!rc)
+ led->pdata->thermal_derate_threshold = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read thermal derate threshold\n");
+ return rc;
+ }
+ }
+
+ led->pdata->current_ramp_en =
+ of_property_read_bool(node,
+ "qcom,current-ramp-enabled");
+ if (led->pdata->current_ramp_en) {
+ led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_up_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp up step values\n");
+ return -EINVAL;
+ }
+ led->pdata->ramp_up_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp up steps\n");
+ return rc;
+ }
+
+ led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->pdev->dev,
+ "Invalid ramp down step values\n");
+ return rc;
+ }
+ led->pdata->ramp_dn_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read ramp down steps\n");
+ return rc;
+ }
+ }
+
+ led->pdata->vph_pwr_droop_en = of_property_read_bool(node,
+ "qcom,vph-pwr-droop-enabled");
+ if (led->pdata->vph_pwr_droop_en) {
+ led->pdata->vph_pwr_droop_threshold =
+ FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-threshold", &val);
+ if (!rc) {
+ led->pdata->vph_pwr_droop_threshold = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop threshold\n");
+ return rc;
+ }
+
+ led->pdata->vph_pwr_droop_debounce_time =
+ FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-debounce-time", &val);
+ if (!rc)
+ led->pdata->vph_pwr_droop_debounce_time = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev,
+ "Unable to read VPH PWR droop debounce time\n");
+ return rc;
+ }
+ }
+
+ led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch0-enabled");
+
+ led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch1-enabled");
+
+ led->pdata->power_detect_en = of_property_read_bool(node,
+ "qcom,power-detect-enabled");
+
+ led->pdata->mask3_en = of_property_read_bool(node,
+ "qcom,otst2-module-enabled");
+
+ led->pdata->follow_rb_disable = of_property_read_bool(node,
+ "qcom,follow-otst2-rb-disabled");
+
+ led->pdata->die_current_derate_en = of_property_read_bool(node,
+ "qcom,die-current-derate-enabled");
+
+ if (led->pdata->die_current_derate_en) {
+ led->vadc_dev = qpnp_get_vadc(&led->pdev->dev, "die-temp");
+ if (IS_ERR(led->vadc_dev)) {
+ pr_err("VADC channel property Missing\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(node, "qcom,die-temp-threshold",
+ &led->pdata->temp_threshold_num)) {
+ if (led->pdata->temp_threshold_num > 0) {
+ led->pdata->die_temp_threshold_degc =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_threshold_num,
+ GFP_KERNEL);
+
+ if (led->pdata->die_temp_threshold_degc
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die temp array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_threshold_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-threshold",
+ led->pdata->die_temp_threshold_degc,
+ led->pdata->temp_threshold_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ if (of_find_property(node, "qcom,die-temp-derate-current",
+ &led->pdata->temp_derate_curr_num)) {
+ if (led->pdata->temp_derate_curr_num > 0) {
+ led->pdata->die_temp_derate_curr_ma =
+ devm_kzalloc(&led->pdev->dev,
+ led->pdata->temp_derate_curr_num,
+ GFP_KERNEL);
+ if (led->pdata->die_temp_derate_curr_ma
+ == NULL) {
+ dev_err(&led->pdev->dev,
+ "failed to allocate die derate current array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_derate_curr_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-derate-current",
+ led->pdata->die_temp_derate_curr_ma,
+ led->pdata->temp_derate_curr_num);
+ if (rc) {
+ dev_err(&led->pdev->dev,
+ "couldn't read temp limits rc =%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+ if (led->pdata->temp_threshold_num !=
+ led->pdata->temp_derate_curr_num) {
+ pr_err("Both array size are not same\n");
+ return -EINVAL;
+ }
+ }
+
+ led->pinctrl = devm_pinctrl_get(&led->pdev->dev);
+ if (IS_ERR_OR_NULL(led->pinctrl)) {
+ dev_err(&led->pdev->dev, "Unable to acquire pinctrl\n");
+ led->pinctrl = NULL;
+ return 0;
+ }
+
+ led->gpio_state_active = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_enable");
+ if (IS_ERR_OR_NULL(led->gpio_state_active)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED active state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_active);
+ }
+
+ led->gpio_state_suspend = pinctrl_lookup_state(led->pinctrl,
+ "flash_led_disable");
+ if (IS_ERR_OR_NULL(led->gpio_state_suspend)) {
+ dev_err(&led->pdev->dev, "Cannot lookup LED disable state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_suspend);
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_probe(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led;
+ unsigned int base;
+ struct device_node *node, *temp;
+ struct dentry *root, *file;
+ int rc, i = 0, j, num_leds = 0;
+ u32 val;
+
+ root = NULL;
+ node = pdev->dev.of_node;
+ if (node == NULL) {
+ dev_info(&pdev->dev, "No flash device defined\n");
+ return -ENODEV;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!led->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ led->base = base;
+ led->pdev = pdev;
+ led->current_addr = FLASH_LED0_CURRENT(led->base);
+ led->current2_addr = FLASH_LED1_CURRENT(led->base);
+
+ led->pdata = devm_kzalloc(&pdev->dev, sizeof(*led->pdata), GFP_KERNEL);
+ if (!led->pdata)
+ return -ENOMEM;
+
+ led->peripheral_type = (u8)qpnp_flash_led_get_peripheral_type(led);
+ if (led->peripheral_type < 0) {
+ dev_err(&pdev->dev, "Failed to get peripheral type\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_parse_common_dt(led, node);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to get common config for flash LEDs\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_init_settings(led);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to initialize flash LED\n");
+ return rc;
+ }
+
+ rc = qpnp_get_pmic_revid(led);
+ if (rc)
+ return rc;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led->flash_node = devm_kzalloc(&pdev->dev,
+ (sizeof(struct flash_node_data) * num_leds),
+ GFP_KERNEL);
+ if (!led->flash_node) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&led->flash_led_lock);
+
+ led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0);
+ if (!led->ordered_workq) {
+ dev_err(&pdev->dev, "Failed to allocate ordered workqueue\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led->flash_node[i].cdev.brightness_set =
+ qpnp_flash_led_brightness_set;
+ led->flash_node[i].cdev.brightness_get =
+ qpnp_flash_led_brightness_get;
+ led->flash_node[i].pdev = pdev;
+
+ INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work);
+ rc = of_property_read_string(temp, "qcom,led-name",
+ &led->flash_node[i].cdev.name);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read flash name\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(temp, "qcom,default-led-trigger",
+ &led->flash_node[i].cdev.default_trigger);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev,
+ "Unable to read trigger name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->flash_node[i].max_current = (u16)val;
+ led->flash_node[i].cdev.max_brightness = val;
+ } else {
+ dev_err(&led->pdev->dev,
+ "Unable to read max current\n");
+ return rc;
+ }
+ rc = led_classdev_register(&pdev->dev,
+ &led->flash_node[i].cdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to register led\n");
+ goto error_led_register;
+ }
+
+ led->flash_node[i].cdev.dev->of_node = temp;
+
+ rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Failed to parse config for each LED\n");
+ goto error_led_register;
+ }
+
+ if (led->flash_node[i].num_regulators) {
+ rc = flash_regulator_parse_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to parse regulator data\n");
+ goto error_led_register;
+ }
+
+ rc = flash_regulator_setup(led, &led->flash_node[i],
+ true);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Unable to set up regulator\n");
+ goto error_led_register;
+ }
+ }
+
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc =
+ sysfs_create_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc)
+ goto error_led_register;
+ }
+
+ i++;
+ }
+
+ led->num_leds = i;
+
+ root = debugfs_create_dir("flashLED", NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in kernel");
+ goto error_led_debugfs;
+ }
+
+ led->dbgfs_root = root;
+ file = debugfs_create_file("enable_debug", 0600, root, led,
+ &flash_led_dfs_dbg_feature_fops);
+ if (!file) {
+ pr_err("error creating 'enable_debug' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("latched", 0600, root, led,
+ &flash_led_dfs_latched_reg_fops);
+ if (!file) {
+ pr_err("error creating 'latched' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("strobe", 0600, root, led,
+ &flash_led_dfs_strobe_reg_fops);
+ if (!file) {
+ pr_err("error creating 'strobe' entry\n");
+ goto error_led_debugfs;
+ }
+
+ dev_set_drvdata(&pdev->dev, led);
+
+ return 0;
+
+error_led_debugfs:
+ i = led->num_leds - 1;
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+error_led_register:
+ for (; i >= 0; i--) {
+ for (; j >= 0; j--)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return rc;
+}
+
+static int qpnp_flash_led_remove(struct platform_device *pdev)
+{
+ struct qpnp_flash_led *led = dev_get_drvdata(&pdev->dev);
+ int i, j;
+
+ for (i = led->num_leds - 1; i >= 0; i--) {
+ if (led->flash_node[i].reg_data) {
+ if (led->flash_node[i].flash_on)
+ flash_regulator_enable(led,
+ &led->flash_node[i], false);
+ flash_regulator_setup(led, &led->flash_node[i],
+ false);
+ }
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(led->dbgfs_root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return 0;
+}
+
+static const struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-flash-led",},
+ { },
+};
+
+static struct platform_driver qpnp_flash_led_driver = {
+ .driver = {
+ .name = "qcom,qpnp-flash-led",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_flash_led_probe,
+ .remove = qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+ return platform_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+ platform_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash");
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
index c85b3e42c8c8..b0519544759e 100644
--- a/drivers/leds/leds-qpnp-wled.c
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -63,7 +63,6 @@
#define QPNP_WLED_VLOOP_COMP_RES_MASK 0xF0
#define QPNP_WLED_VLOOP_COMP_RES_OVERWRITE 0x80
-#define QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM 320
#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM 20
#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM 320
@@ -106,10 +105,8 @@
#define QPNP_WLED_BOOST_DUTY_MIN_NS 26
#define QPNP_WLED_BOOST_DUTY_MAX_NS 156
#define QPNP_WLED_DEF_BOOST_DUTY_NS 104
-#define QPNP_WLED_SWITCH_FREQ_MASK 0x70
-#define QPNP_WLED_SWITCH_FREQ_800_KHZ 800
-#define QPNP_WLED_SWITCH_FREQ_1600_KHZ 1600
-#define QPNP_WLED_SWITCH_FREQ_OVERWRITE 0x80
+#define QPNP_WLED_SWITCH_FREQ_MASK GENMASK(3, 0)
+#define QPNP_WLED_SWITCH_FREQ_OVERWRITE BIT(7)
#define QPNP_WLED_OVP_MASK GENMASK(1, 0)
#define QPNP_WLED_TEST4_EN_DEB_BYPASS_ILIM_BIT BIT(6)
#define QPNP_WLED_TEST4_EN_SH_FOR_SS_BIT BIT(5)
@@ -123,6 +120,11 @@
#define QPNP_WLED_ILIM_FAULT_BIT BIT(0)
#define QPNP_WLED_OVP_FAULT_BIT BIT(1)
#define QPNP_WLED_SC_FAULT_BIT BIT(2)
+#define QPNP_WLED_OVP_FLT_RT_STS_BIT BIT(1)
+
+/* QPNP_WLED_SOFTSTART_RAMP_DLY */
+#define SOFTSTART_OVERWRITE_BIT BIT(7)
+#define SOFTSTART_RAMP_DELAY_MASK GENMASK(2, 0)
/* sink registers */
#define QPNP_WLED_CURR_SINK_REG(b) (b + 0x46)
@@ -403,6 +405,7 @@ struct qpnp_wled {
bool ovp_irq_disabled;
bool auto_calib_enabled;
bool auto_calib_done;
+ bool module_dis_perm;
ktime_t start_ovp_fault_time;
};
@@ -599,6 +602,9 @@ static int qpnp_wled_module_en(struct qpnp_wled *wled,
{
int rc;
+ if (wled->module_dis_perm)
+ return 0;
+
rc = qpnp_wled_masked_write_reg(wled,
QPNP_WLED_MODULE_EN_REG(base_addr),
QPNP_WLED_MODULE_EN_MASK,
@@ -1097,20 +1103,12 @@ static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
return 0;
}
-#define AUTO_CALIB_BRIGHTNESS 16
+#define AUTO_CALIB_BRIGHTNESS 200
static int wled_auto_calibrate(struct qpnp_wled *wled)
{
int rc = 0, i;
u8 reg = 0, sink_config = 0, sink_test = 0, sink_valid = 0, int_sts;
- mutex_lock(&wled->lock);
-
- /* disable OVP IRQ */
- if (wled->ovp_irq > 0 && !wled->ovp_irq_disabled) {
- disable_irq_nosync(wled->ovp_irq);
- wled->ovp_irq_disabled = true;
- }
-
/* read configured sink configuration */
rc = qpnp_wled_read_reg(wled,
QPNP_WLED_CURR_SINK_REG(wled->sink_base), &sink_config);
@@ -1135,6 +1133,17 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
goto failed_calib;
}
+ if (wled->en_cabc) {
+ for (i = 0; i < wled->max_strings; i++) {
+ reg = 0;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ QPNP_WLED_CABC_MASK, reg);
+ if (rc < 0)
+ goto failed_calib;
+ }
+ }
+
/* disable all sinks */
rc = qpnp_wled_write_reg(wled,
QPNP_WLED_CURR_SINK_REG(wled->sink_base), 0);
@@ -1143,21 +1152,6 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
goto failed_calib;
}
- rc = qpnp_wled_masked_write_reg(wled,
- QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
- QPNP_WLED_MODULE_EN_MASK,
- QPNP_WLED_MODULE_EN_MASK);
- if (rc < 0) {
- pr_err("Failed to enable WLED module rc=%d\n", rc);
- goto failed_calib;
- }
- /*
- * Delay for the WLED soft-start, check the OVP status
- * only after soft-start is complete
- */
- usleep_range(QPNP_WLED_SOFT_START_DLY_US,
- QPNP_WLED_SOFT_START_DLY_US + 1000);
-
/* iterate through the strings one by one */
for (i = 0; i < wled->max_strings; i++) {
sink_test = 1 << (QPNP_WLED_CURR_SINK_SHIFT + i);
@@ -1181,6 +1175,15 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
goto failed_calib;
}
+ /* Enable the module */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, QPNP_WLED_MODULE_EN_MASK);
+ if (rc < 0) {
+ pr_err("Failed to enable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
+
/* delay for WLED soft-start */
usleep_range(QPNP_WLED_SOFT_START_DLY_US,
QPNP_WLED_SOFT_START_DLY_US + 1000);
@@ -1197,6 +1200,15 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
i + 1);
else
sink_valid |= sink_test;
+
+ /* Disable the module */
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
+ QPNP_WLED_MODULE_EN_MASK, 0);
+ if (rc < 0) {
+ pr_err("Failed to disable WLED module rc=%d\n", rc);
+ goto failed_calib;
+ }
}
if (sink_valid == sink_config) {
@@ -1210,14 +1222,7 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
if (!sink_config) {
pr_warn("No valid WLED sinks found\n");
- goto failed_calib;
- }
-
- rc = qpnp_wled_masked_write_reg(wled,
- QPNP_WLED_MODULE_EN_REG(wled->ctrl_base),
- QPNP_WLED_MODULE_EN_MASK, 0);
- if (rc < 0) {
- pr_err("Failed to disable WLED module rc=%d\n", rc);
+ wled->module_dis_perm = true;
goto failed_calib;
}
@@ -1231,6 +1236,15 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
/* MODULATOR_EN setting for valid sinks */
for (i = 0; i < wled->max_strings; i++) {
+ if (wled->en_cabc) {
+ reg = 1 << QPNP_WLED_CABC_SHIFT;
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_CABC_REG(wled->sink_base, i),
+ QPNP_WLED_CABC_MASK, reg);
+ if (rc < 0)
+ goto failed_calib;
+ }
+
if (sink_config & (1 << (QPNP_WLED_CURR_SINK_SHIFT + i)))
reg = (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
else
@@ -1259,7 +1273,8 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
}
/* restore brightness */
- rc = qpnp_wled_set_level(wled, wled->cdev.brightness);
+ rc = qpnp_wled_set_level(wled, !wled->cdev.brightness ?
+ AUTO_CALIB_BRIGHTNESS : wled->cdev.brightness);
if (rc < 0) {
pr_err("Failed to set brightness after calibration rc=%d\n",
rc);
@@ -1280,11 +1295,6 @@ static int wled_auto_calibrate(struct qpnp_wled *wled)
QPNP_WLED_SOFT_START_DLY_US + 1000);
failed_calib:
- if (wled->ovp_irq > 0 && wled->ovp_irq_disabled) {
- enable_irq(wled->ovp_irq);
- wled->ovp_irq_disabled = false;
- }
- mutex_unlock(&wled->lock);
return rc;
}
@@ -1320,6 +1330,38 @@ static bool qpnp_wled_auto_cal_required(struct qpnp_wled *wled)
return false;
}
+static int qpnp_wled_auto_calibrate_at_init(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 fault_status = 0, rt_status = 0;
+
+ if (!wled->auto_calib_enabled)
+ return 0;
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_INT_RT_STS(wled->ctrl_base), &rt_status);
+ if (rc < 0)
+ pr_err("Failed to read RT status rc=%d\n", rc);
+
+ rc = qpnp_wled_read_reg(wled,
+ QPNP_WLED_FAULT_STATUS(wled->ctrl_base), &fault_status);
+ if (rc < 0)
+ pr_err("Failed to read fault status rc=%d\n", rc);
+
+ if ((rt_status & QPNP_WLED_OVP_FLT_RT_STS_BIT) ||
+ (fault_status & QPNP_WLED_OVP_FAULT_BIT)) {
+ mutex_lock(&wled->lock);
+ rc = wled_auto_calibrate(wled);
+ if (rc < 0)
+ pr_err("Failed auto-calibration rc=%d\n", rc);
+ else
+ wled->auto_calib_done = true;
+ mutex_unlock(&wled->lock);
+ }
+
+ return rc;
+}
+
/* ovp irq handler */
static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
{
@@ -1348,13 +1390,26 @@ static irqreturn_t qpnp_wled_ovp_irq_handler(int irq, void *_wled)
if (fault_sts & QPNP_WLED_OVP_FAULT_BIT) {
if (wled->auto_calib_enabled && !wled->auto_calib_done) {
if (qpnp_wled_auto_cal_required(wled)) {
+ mutex_lock(&wled->lock);
+ if (wled->ovp_irq > 0 &&
+ !wled->ovp_irq_disabled) {
+ disable_irq_nosync(wled->ovp_irq);
+ wled->ovp_irq_disabled = true;
+ }
+
rc = wled_auto_calibrate(wled);
- if (rc < 0) {
+ if (rc < 0)
pr_err("Failed auto-calibration rc=%d\n",
- rc);
- return IRQ_HANDLED;
+ rc);
+ else
+ wled->auto_calib_done = true;
+
+ if (wled->ovp_irq > 0 &&
+ wled->ovp_irq_disabled) {
+ enable_irq(wled->ovp_irq);
+ wled->ovp_irq_disabled = false;
}
- wled->auto_calib_done = true;
+ mutex_unlock(&wled->lock);
}
}
}
@@ -1422,20 +1477,26 @@ static int qpnp_wled_gm_config(struct qpnp_wled *wled)
u8 mask = 0, reg = 0;
/* Configure the LOOP COMP GM register */
- if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
- wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
- if (wled->loop_auto_gm_en)
- reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
-
- if (wled->loop_auto_gm_thresh >
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
- wled->loop_auto_gm_thresh =
- QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
-
- reg |= wled->loop_auto_gm_thresh <<
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
- mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
- QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)) {
+ if (wled->disp_type_amoled) {
+ reg = 0;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ } else {
+ if (wled->loop_auto_gm_en)
+ reg |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN;
+
+ if (wled->loop_auto_gm_thresh >
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX)
+ wled->loop_auto_gm_thresh =
+ QPNP_WLED_LOOP_AUTO_GM_THRESH_MAX;
+
+ reg |= wled->loop_auto_gm_thresh <<
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_SHIFT;
+ mask |= QPNP_WLED_VLOOP_COMP_AUTO_GM_EN |
+ QPNP_WLED_VLOOP_COMP_AUTO_GM_THRESH_MASK;
+ }
}
if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
@@ -1728,8 +1789,17 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
/* Configure the Soft start Ramp delay: for AMOLED - 0,for LCD - 2 */
reg = (wled->disp_type_amoled) ? 0 : 2;
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base), reg);
+ mask = SOFTSTART_RAMP_DELAY_MASK;
+ if ((wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ && wled->disp_type_amoled) {
+ reg |= SOFTSTART_OVERWRITE_BIT;
+ mask |= SOFTSTART_OVERWRITE_BIT;
+ }
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SOFTSTART_RAMP_DLY(wled->ctrl_base),
+ mask, reg);
if (rc)
return rc;
@@ -1751,21 +1821,24 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
/* Configure the SWITCHING FREQ register */
- if (wled->switch_freq_khz == QPNP_WLED_SWITCH_FREQ_1600_KHZ)
- temp = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
+ if (wled->switch_freq_khz == 1600)
+ reg = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
else
- temp = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
+ reg = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
- rc = qpnp_wled_read_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), &reg);
+ /*
+ * Do not set the overwrite bit when switching frequency is selected
+ * for AMOLED. This register is in logic reset block which can cause
+ * the value to be overwritten during module enable/disable.
+ */
+ mask = QPNP_WLED_SWITCH_FREQ_MASK | QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+ if (!wled->disp_type_amoled)
+ reg |= QPNP_WLED_SWITCH_FREQ_OVERWRITE;
+
+ rc = qpnp_wled_masked_write_reg(wled,
+ QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), mask, reg);
if (rc < 0)
return rc;
- reg &= QPNP_WLED_SWITCH_FREQ_MASK;
- reg |= (temp | QPNP_WLED_SWITCH_FREQ_OVERWRITE);
- rc = qpnp_wled_write_reg(wled,
- QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base), reg);
- if (rc)
- return rc;
rc = qpnp_wled_ovp_config(wled);
if (rc < 0) {
@@ -1946,6 +2019,10 @@ static int qpnp_wled_config(struct qpnp_wled *wled)
return rc;
}
+ rc = qpnp_wled_auto_calibrate_at_init(wled);
+ if (rc < 0)
+ pr_err("Failed to auto-calibrate at init rc=%d\n", rc);
+
/* setup ovp and sc irqs */
if (wled->ovp_irq >= 0) {
rc = devm_request_threaded_irq(&wled->pdev->dev, wled->ovp_irq,
@@ -2070,8 +2147,11 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->loop_comp_res_kohm =
- QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM;
+ wled->loop_comp_res_kohm = 320;
+ if (wled->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE ||
+ wled->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ wled->loop_comp_res_kohm = 300;
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,loop-comp-res-kohm", &temp_val);
if (!rc) {
@@ -2199,7 +2279,7 @@ static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
return rc;
}
- wled->switch_freq_khz = QPNP_WLED_SWITCH_FREQ_800_KHZ;
+ wled->switch_freq_khz = wled->disp_type_amoled ? 1600 : 800;
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,switch-freq-khz", &temp_val);
if (!rc) {
@@ -2403,7 +2483,7 @@ static int qpnp_wled_probe(struct platform_device *pdev)
wled->pmic_rev_id->pmic_subtype, wled->pmic_rev_id->rev4);
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_SINK_BASE,
- 0, 0);
+ NULL, NULL);
if (!prop) {
dev_err(&pdev->dev, "Couldnt find sink's addr rc %d\n", rc);
return rc;
@@ -2411,7 +2491,7 @@ static int qpnp_wled_probe(struct platform_device *pdev)
wled->sink_base = be32_to_cpu(*prop);
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_WLED_CTRL_BASE,
- 0, 0);
+ NULL, NULL);
if (!prop) {
dev_err(&pdev->dev, "Couldnt find ctrl's addr rc = %d\n", rc);
return rc;
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index c3ea03c9a1a8..02619cabda8b 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -333,6 +333,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
+ struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 2410df1c2a05..0ee41fd9d850 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
- wake_up_gc(op->c);
-
if (op->bypass)
return bch_data_invalidate(cl);
+ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+ wake_up_gc(op->c);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
@@ -1014,7 +1014,7 @@ static int cached_dev_congested(void *data, int bits)
struct request_queue *q = bdev_get_queue(dc->bdev);
int ret = 0;
- if (bdi_congested(&q->backing_dev_info, bits))
+ if (bdi_congested(q->backing_dev_info, bits))
return 1;
if (cached_dev_get(dc)) {
@@ -1023,7 +1023,7 @@ static int cached_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
cached_dev_put(dc);
@@ -1037,7 +1037,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
struct gendisk *g = dc->disk.disk;
g->queue->make_request_fn = cached_dev_make_request;
- g->queue->backing_dev_info.congested_fn = cached_dev_congested;
+ g->queue->backing_dev_info->congested_fn = cached_dev_congested;
dc->disk.cache_miss = cached_dev_cache_miss;
dc->disk.ioctl = cached_dev_ioctl;
}
@@ -1130,7 +1130,7 @@ static int flash_dev_congested(void *data, int bits)
for_each_cache(ca, d->c, i) {
q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
@@ -1141,7 +1141,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
struct gendisk *g = d->disk;
g->queue->make_request_fn = flash_dev_make_request;
- g->queue->backing_dev_info.congested_fn = flash_dev_congested;
+ g->queue->backing_dev_info->congested_fn = flash_dev_congested;
d->cache_miss = flash_dev_cache_miss;
d->ioctl = flash_dev_ioctl;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 7b5880b8874c..13acf48c5210 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -802,7 +802,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
blk_queue_make_request(q, NULL);
d->disk->queue = q;
q->queuedata = d;
- q->backing_dev_info.congested_data = d;
+ q->backing_dev_info->congested_data = d;
q->limits.max_hw_sectors = UINT_MAX;
q->limits.max_sectors = UINT_MAX;
q->limits.max_segment_size = UINT_MAX;
@@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(dc);
+ bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
+ if (dc->writeback_write_wq)
+ destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
@@ -1127,9 +1129,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
set_capacity(dc->disk.disk,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
- dc->disk.disk->queue->backing_dev_info.ra_pages =
- max(dc->disk.disk->queue->backing_dev_info.ra_pages,
- q->backing_dev_info.ra_pages);
+ dc->disk.disk->queue->backing_dev_info->ra_pages =
+ max(dc->disk.disk->queue->backing_dev_info->ra_pages,
+ q->backing_dev_info->ra_pages);
bch_cached_dev_request_init(dc);
bch_cached_dev_writeback_init(dc);
@@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err;
bcache_device_attach(d, c, u - c->uuids);
+ bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
+ if (!IS_ERR(bdev))
+ bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index b3ff57d61dde..4fbb5532f24c 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -191,7 +191,7 @@ STORE(__cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
+ ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
@@ -226,7 +226,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
+ v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index db3ae4c2b223..6c18e3ec3e48 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
+/**
+ * bch_hprint() - formats @v to human readable string for sysfs.
+ *
+ * @v - signed 64 bit integer
+ * @buf - the (at least 8 byte) buffer to format the result into.
+ *
+ * Returns the number of bytes used by format.
+ */
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
- char dec[4] = "";
- int u, t = 0;
-
- for (u = 0; v >= 1024 || v <= -1024; u++) {
- t = v & ~(~0 << 10);
- v >>= 10;
- }
-
- if (!u)
- return sprintf(buf, "%llu", v);
-
- if (v < 100 && v > -100)
- snprintf(dec, sizeof(dec), ".%i", t / 100);
-
- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
+ int u = 0, t;
+
+ uint64_t q;
+
+ if (v < 0)
+ q = -v;
+ else
+ q = v;
+
+ /* For as long as the number is more than 3 digits, but at least
+ * once, shift right / divide by 1024. Keep the remainder for
+ * a digit after the decimal point.
+ */
+ do {
+ u++;
+
+ t = q & ~(~0 << 10);
+ q >>= 10;
+ } while (q >= 1000);
+
+ if (v < 0)
+ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
+ * yields 8 bytes.
+ */
+ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
+ else
+ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..bbb1dc9e1639 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -21,7 +21,8 @@
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
+ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
+ bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
@@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty_finish, system_wq);
+ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
@@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl);
- continue_at(cl, write_dirty, system_wq);
+ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
@@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
-void bch_sectors_dirty_init(struct cached_dev *dc)
+void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
- op.inode = dc->disk.id;
+ op.inode = d->id;
- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
+ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
+ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
+ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
+ WQ_MEM_RECLAIM, 0);
+ if (!dc->writeback_write_wq)
+ return -ENOMEM;
+
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index 073a042aed24..daec4fd782ea 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
+static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
+{
+ uint64_t i, ret = 0;
+
+ mutex_lock(&bch_register_lock);
+
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+
+ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ ret += bcache_dev_sectors_dirty(d);
+ }
+
+ mutex_unlock(&bch_register_lock);
+
+ return ret;
+}
+
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
-void bch_sectors_dirty_init(struct cached_dev *dc);
+void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4f22e919787a..7a50728b9389 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
+ if (bitmap->storage.file && !init) {
+ pr_info("md: cannot resize file-based bitmap\n");
+ return -EINVAL;
+ }
+
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0da5efaad85c..54e50fc908e9 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2288,7 +2288,7 @@ static void do_waker(struct work_struct *ws)
static int is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 32e76c5ee741..11c52567304f 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index b3d78bba3a79..9411deaaddf9 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1660,7 +1660,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
char b[BDEVNAME_SIZE];
if (likely(q))
- r |= bdi_congested(&q->backing_dev_info, bdi_bits);
+ r |= bdi_congested(q->backing_dev_info, bdi_bits);
else
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
dm_device_name(t->md),
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a1cc797fe88f..5f1a943d9e81 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2634,7 +2634,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
return 1;
q = bdev_get_queue(pt->data_dev->bdev);
- return bdi_congested(&q->backing_dev_info, bdi_bits);
+ return bdi_congested(q->backing_dev_info, bdi_bits);
}
static void requeue_bios(struct pool *pool)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1a7b11d57256..47ac131099d9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2220,7 +2220,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* the query about congestion status of request_queue
*/
if (dm_request_based(md))
- r = md->queue->backing_dev_info.wb.state &
+ r = md->queue->backing_dev_info->wb.state &
bdi_bits;
else
r = dm_table_any_congested(map, bdi_bits);
@@ -2302,7 +2302,7 @@ static void dm_init_md_queue(struct mapped_device *md)
* - must do so here (in alloc_dev callchain) before queue is used
*/
md->queue->queuedata = md;
- md->queue->backing_dev_info.congested_data = md;
+ md->queue->backing_dev_info->congested_data = md;
}
static void dm_init_old_md_queue(struct mapped_device *md)
@@ -2313,7 +2313,7 @@ static void dm_init_old_md_queue(struct mapped_device *md)
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
- md->queue->backing_dev_info.congested_fn = dm_any_congested;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
}
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 6ba3227e29b2..b19205ea1a10 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -68,7 +68,7 @@ static int linear_congested(struct mddev *mddev, int bits)
for (i = 0; i < conf->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
rcu_read_unlock();
@@ -223,7 +223,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
* oldconf until no one uses it anymore.
*/
mddev_suspend(mddev);
- oldconf = rcu_dereference(mddev->private);
+ oldconf = rcu_dereference_protected(mddev->private,
+ lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
WARN_ONCE(mddev->raid_disks != newconf->raid_disks,
"copied raid_disks doesn't match mddev->raid_disks");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0a856cb181e9..1cd819202553 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5284,8 +5284,8 @@ int md_run(struct mddev *mddev)
return err;
}
if (mddev->queue) {
- mddev->queue->backing_dev_info.congested_data = mddev;
- mddev->queue->backing_dev_info.congested_fn = md_congested;
+ mddev->queue->backing_dev_info->congested_data = mddev;
+ mddev->queue->backing_dev_info->congested_fn = md_congested;
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -5642,7 +5642,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
__md_stop_writes(mddev);
__md_stop(mddev);
- mddev->queue->backing_dev_info.congested_fn = NULL;
+ mddev->queue->backing_dev_info->congested_fn = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent_safe(mddev->sysfs_state);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index dd483bb2e111..fb03ed86d57a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -166,7 +166,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8e5db0cb5aa..7a67e7dcf546 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -35,7 +35,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
for (i = 0; i < raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
return ret;
}
@@ -415,8 +415,8 @@ static int raid0_run(struct mddev *mddev)
*/
int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2* stripe;
}
dump_zones(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f24a9e14021d..a3ec3c5a8ee9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -730,9 +730,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
* non-congested targets, it can be removed
*/
if ((bits & (1 << WB_async_congested)) || 1)
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
else
- ret &= bdi_congested(&q->backing_dev_info, bits);
+ ret &= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e5ee4e9e0ea5..a67e1a36733f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -838,7 +838,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
- ret |= bdi_congested(&q->backing_dev_info, bits);
+ ret |= bdi_congested(q->backing_dev_info, bits);
}
}
rcu_read_unlock();
@@ -1414,11 +1414,24 @@ retry_write:
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
@@ -3698,8 +3711,8 @@ static int run(struct mddev *mddev)
* maybe...
*/
stripe /= conf->geo.near_copies;
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
if (md_integrity_register(mddev))
@@ -4493,8 +4506,8 @@ static void end_reshape(struct r10conf *conf)
int stripe = conf->geo.raid_disks *
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->geo.near_copies;
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
conf->fullsync = 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 8f60520c8392..867414210e8d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -818,6 +818,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
+ /*
+ * We must assign batch_head of this stripe within the
+ * batch_lock, otherwise clear_batch_ready of batch head
+ * stripe could clear BATCH_READY bit of this stripe and
+ * this stripe->batch_head doesn't get assigned, which
+ * could confuse clear_batch_ready for this stripe
+ */
+ sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
@@ -825,8 +833,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
-
- sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
@@ -4258,7 +4264,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED)),
+ (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;
@@ -5822,6 +5829,8 @@ static void raid5_do_work(struct work_struct *work)
spin_unlock_irq(&conf->device_lock);
+ r5l_flush_stripe_to_raid(conf->log);
+
async_tx_issue_pending_all();
blk_finish_plug(&plug);
@@ -6121,10 +6130,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
mddev_suspend(mddev);
conf->skip_copy = new;
if (new)
- mddev->queue->backing_dev_info.capabilities |=
+ mddev->queue->backing_dev_info->capabilities |=
BDI_CAP_STABLE_WRITES;
else
- mddev->queue->backing_dev_info.capabilities &=
+ mddev->queue->backing_dev_info->capabilities &=
~BDI_CAP_STABLE_WRITES;
mddev_resume(mddev);
}
@@ -6968,8 +6977,8 @@ static int run(struct mddev *mddev)
int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
- if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
@@ -7550,8 +7559,8 @@ static void end_reshape(struct r5conf *conf)
int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE);
- if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
- conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
+ if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
+ conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
}
}
}
diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
index 300bd3c94738..0992bb0e207e 100644
--- a/drivers/media/pci/ttpci/av7110_hw.c
+++ b/drivers/media/pci/ttpci/av7110_hw.c
@@ -56,11 +56,11 @@
by Nathan Laredo <laredo@gnu.org> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count)
+ int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
- if (count <= 0 || count > 32764) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
@@ -78,12 +78,12 @@ int av7110_debiwrite(struct av7110 *av7110, u32 config,
return 0;
}
-u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
+u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
- if (count > 32764 || count <= 0) {
+ if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/media/pci/ttpci/av7110_hw.h
index 1634aba5cb84..ccb148059406 100644
--- a/drivers/media/pci/ttpci/av7110_hw.h
+++ b/drivers/media/pci/ttpci/av7110_hw.h
@@ -377,14 +377,14 @@ extern int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
/* DEBI (saa7146 data extension bus interface) access */
extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
- int addr, u32 val, int count);
+ int addr, u32 val, unsigned int count);
extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
- int addr, int count);
+ int addr, unsigned int count);
/* DEBI during interrupt */
/* single word writes */
-static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
av7110_debiwrite(av7110, config, addr, val, count);
}
@@ -397,7 +397,7 @@ static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
av7110_debiwrite(av7110, config, addr, 0, count);
}
-static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
u32 res;
@@ -408,7 +408,7 @@ static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
}
/* DEBI outside interrupts, only for count <= 4! */
-static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
@@ -417,7 +417,7 @@ static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
spin_unlock_irqrestore(&av7110->debilock, flags);
}
-static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
+static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
{
unsigned long flags;
u32 res;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 9b9e423e4fc4..15c543d4b366 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -849,9 +849,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
- (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index 981832b5a586..e04229fed666 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -35,12 +35,13 @@
#define VFE40_STATS_BURST_LEN_8916_VERSION 2
#define VFE40_FETCH_BURST_LEN 3
#define VFE40_UB_SIZE 1536 /* 1536 * 128 bits = 24KB */
+#define VFE40_STATS_SIZE 392
#define VFE40_UB_SIZE_8952 2048 /* 2048 * 128 bits = 32KB */
#define VFE40_UB_SIZE_8916 3072 /* 3072 * 128 bits = 48KB */
#define VFE40_EQUAL_SLICE_UB 190 /* (UB_SIZE - STATS SIZE)/6 */
#define VFE40_EQUAL_SLICE_UB_8916 236
#define VFE40_TOTAL_WM_UB 1144 /* UB_SIZE - STATS SIZE */
-#define VFE40_TOTAL_WM_UB_8916 1656
+#define VFE40_TOTAL_WM_UB_8916 2680
#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
@@ -104,7 +105,11 @@ static uint32_t msm_vfe40_ub_reg_offset(struct vfe_device *vfe_dev, int idx)
static uint32_t msm_vfe40_get_ub_size(struct vfe_device *vfe_dev)
{
- if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION) {
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8939_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8953_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8917_VERSION) {
vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB_8916;
return VFE40_TOTAL_WM_UB_8916;
}
@@ -586,6 +591,11 @@ static void msm_vfe40_read_and_clear_irq_status(struct vfe_device *vfe_dev,
*irq_status0 &= vfe_dev->irq0_mask;
*irq_status1 &= vfe_dev->irq1_mask;
+ if (*irq_status0 &&
+ (*irq_status0 == msm_camera_io_r(vfe_dev->vfe_base + 0x38))) {
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
+ }
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
@@ -2188,7 +2198,7 @@ static struct msm_vfe_axi_hardware_info msm_vfe40_axi_hw_info = {
.num_comp_mask = 3,
.num_rdi = 3,
.num_rdi_master = 3,
- .min_wm_ub = 64,
+ .min_wm_ub = 96,
.scratch_buf_range = SZ_32M + SZ_4M,
};
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 6716bb6caad6..70950a88fc66 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -2561,6 +2561,7 @@ int msm_vfe47_get_clks(struct vfe_device *vfe_dev)
if (rc)
return rc;
+ vfe_dev->num_norm_clk = vfe_dev->num_clk;
for (i = 0; i < vfe_dev->num_clk; i++) {
if (strcmp(vfe_dev->vfe_clk_info[i].clk_name,
"camss_vfe_stream_clk") == 0) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 66c5ce11ea3d..5bcb3034b82a 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -2916,6 +2916,8 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
* those state transitions instead of directly forcing stream to
* be INACTIVE
*/
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
intf = SRC_TO_INTF(stream_info->stream_src);
if (stream_info->lpm_mode == 0 &&
stream_info->state != PAUSED) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 733aa4769941..a343bc2d59e4 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1223,10 +1223,7 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
}
if (capability) {
- fsize->type = capability->width.step_size == 1 &&
- capability->height.step_size == 1 ?
- V4L2_FRMSIZE_TYPE_CONTINUOUS :
- V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise.min_width = capability->width.min;
fsize->stepwise.max_width = capability->width.max;
fsize->stepwise.step_width = capability->width.step_size;
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 57d2f89350d2..9532235b07de 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -2004,6 +2004,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto done;
}
+ /* Validate the user-provided bit-size and offset */
+ if (mapping->size > 32 ||
+ mapping->offset + mapping->size > ctrl->info.size * 8) {
+ ret = -EINVAL;
+ goto done;
+ }
+
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 9db0dac938d3..18045a7e24e0 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -788,7 +788,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
+ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 5ffb21b8e1e5..65195f55d6f0 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -1399,19 +1399,19 @@ static int wcd9xxx_slim_probe(struct slim_device *slim)
("wcd9xxx_core", 0);
if (!IS_ERR(debugfs_wcd9xxx_dent)) {
debugfs_peek = debugfs_create_file("slimslave_peek",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_peek", &codec_debug_ops);
debugfs_poke = debugfs_create_file("slimslave_poke",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_poke", &codec_debug_ops);
debugfs_power_state = debugfs_create_file("power_state",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "power_state", &codec_debug_ops);
debugfs_reg_dump = debugfs_create_file("slimslave_reg_dump",
- S_IFREG | S_IRUGO, debugfs_wcd9xxx_dent,
+ S_IFREG | S_IRUSR, debugfs_wcd9xxx_dent,
(void *) "slimslave_reg_dump", &codec_debug_ops);
}
#endif
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index ea3eeb7011e1..690eb1a18caf 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -176,6 +176,10 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
kernel = false;
}
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
cxl_ctx_get();
if ((rc = cxl_attach_process(ctx, kernel, wed , 0))) {
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 10a02934bfc0..013558f4da4f 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -94,7 +94,6 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
pr_devel("afu_open pe: %i\n", ctx->pe);
file->private_data = ctx;
- cxl_ctx_get();
/* indicate success */
rc = 0;
@@ -205,11 +204,18 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /*
+ * Increment driver use count. Enables global TLBIs for hash
+ * and callbacks to handle the segment table
+ */
+ cxl_ctx_get();
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 687f55bd5afd..87daee7cf1c6 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -38,6 +38,7 @@
#include "qseecom_kernel.h"
+#define SRMAPP_NAME "hdcpsrm"
#define TZAPP_NAME "hdcp2p2"
#define HDCP1_APP_NAME "hdcp1"
#define QSEECOM_SBUFF_SIZE 0x1000
@@ -138,6 +139,7 @@
#define HDCP_SESSION_INIT SERVICE_CREATE_CMD(16)
#define HDCP_SESSION_DEINIT SERVICE_CREATE_CMD(17)
#define HDCP_TXMTR_START_AUTHENTICATE SERVICE_CREATE_CMD(18)
+#define HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST SERVICE_CREATE_CMD(19)
#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
@@ -485,6 +487,15 @@ struct __attribute__ ((__packed__)) hdcp_start_auth_rsp {
uint8_t message[MAX_TX_MESSAGE_SIZE];
};
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_req {
+ uint32_t commandid;
+ uint32_t ctxHandle;
+};
+struct __attribute__ ((__packed__)) hdcp_rcv_id_list_rsp {
+ uint32_t status;
+ uint32_t commandid;
+};
+
/*
* struct hdcp_lib_handle - handle for hdcp client
* @qseecom_handle - for sending commands to qseecom
@@ -575,6 +586,8 @@ static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle);
static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle);
static struct qseecom_handle *hdcp1_handle;
+static struct qseecom_handle *hdcpsrm_handle;
+
static bool hdcp1_supported = true;
static bool hdcp1_enc_enabled;
static struct mutex hdcp1_ta_cmd_lock;
@@ -1044,6 +1057,15 @@ static int hdcp_lib_library_load(struct hdcp_lib_handle *handle)
goto exit;
}
+ if (!hdcpsrm_handle) {
+ rc = qseecom_start_app(&hdcpsrm_handle,
+ SRMAPP_NAME, QSEECOM_SBUFF_SIZE);
+ if (rc) {
+ pr_err("qseecom_start_app failed for SRM TA %d\n", rc);
+ goto exit;
+ }
+ }
+
handle->hdcp_state |= HDCP_STATE_APP_LOADED;
pr_debug("qseecom_start_app success\n");
@@ -1113,10 +1135,17 @@ static int hdcp_lib_library_unload(struct hdcp_lib_handle *handle)
goto exit;
}
- /* deallocate the resources for qseecom handle */
+ /* deallocate the resources for qseecom hdcp2p2 handle */
rc = qseecom_shutdown_app(&handle->qseecom_handle);
if (rc) {
- pr_err("qseecom_shutdown_app failed err: %d\n", rc);
+ pr_err("hdcp2p2 qseecom_shutdown_app failed err: %d\n", rc);
+ goto exit;
+ }
+
+ /* deallocate the resources for qseecom hdcpsrm handle */
+ rc = qseecom_shutdown_app(&hdcpsrm_handle);
+ if (rc) {
+ pr_err("hdcpsrm qseecom_shutdown_app failed err: %d\n", rc);
goto exit;
}
@@ -2322,6 +2351,48 @@ int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb)
return 0;
}
+static int hdcp_validate_recv_id(struct hdcp_lib_handle *handle)
+{
+ int rc = 0;
+ struct hdcp_rcv_id_list_req *recv_id_req;
+ struct hdcp_rcv_id_list_rsp *recv_id_rsp;
+
+ if (!handle || !handle->qseecom_handle ||
+ !handle->qseecom_handle->sbuf) {
+ pr_err("invalid handle\n");
+ return -EINVAL;
+ }
+
+ /* validate the receiver ID list against the new SRM blob */
+ recv_id_req = (struct hdcp_rcv_id_list_req *)
+ handle->qseecom_handle->sbuf;
+ recv_id_req->commandid = HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST;
+ recv_id_req->ctxHandle = handle->tz_ctxhandle;
+
+ recv_id_rsp = (struct hdcp_rcv_id_list_rsp *)
+ (handle->qseecom_handle->sbuf +
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)));
+
+ rc = qseecom_send_command(handle->qseecom_handle,
+ recv_id_req,
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)),
+ recv_id_rsp,
+ QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_rsp)));
+
+
+ if ((rc < 0) || (recv_id_rsp->status != HDCP_SUCCESS) ||
+ (recv_id_rsp->commandid !=
+ HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST)) {
+ pr_err("qseecom cmd failed with err = %d status = %d\n",
+ rc, recv_id_rsp->status);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ return rc;
+}
+
int hdcp1_set_enc(bool enable)
{
int rc = 0;
@@ -2601,12 +2672,44 @@ static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev,
return ret;
}
+static ssize_t hdmi_hdcp_srm_updated(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ int srm_updated;
+ struct hdcp_lib_handle *handle;
+ ssize_t ret = count;
+
+ handle = hdcp_drv_mgr->handle;
+
+ rc = kstrtoint(buf, 10, &srm_updated);
+ if (rc) {
+ pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ return -EINVAL;
+ }
+
+ if (srm_updated) {
+ if (hdcp_validate_recv_id(handle)) {
+ pr_debug("SRM check FAILED\n");
+ if (handle && handle->client_ops->srm_cb)
+ handle->client_ops->srm_cb(handle->client_ctx);
+ } else {
+ pr_debug("SRM check PASSED\n");
+ }
+ }
+
+ return ret;
+}
+
static DEVICE_ATTR(tp, S_IRUGO | S_IWUSR, msm_hdcp_1x_sysfs_rda_tp,
msm_hdcp_1x_sysfs_wta_tp);
static DEVICE_ATTR(min_level_change, S_IWUSR, NULL,
hdmi_hdcp2p2_sysfs_wta_min_level_change);
+static DEVICE_ATTR(srm_updated, S_IWUSR, NULL,
+hdmi_hdcp_srm_updated);
+
void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp)
{
memcpy((void *)&hdcp_drv_mgr->cached_tp,
@@ -2617,6 +2720,7 @@ void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp)
static struct attribute *msm_hdcp_fs_attrs[] = {
&dev_attr_tp.attr,
&dev_attr_min_level_change.attr,
+ &dev_attr_srm_updated.attr,
NULL
};
diff --git a/drivers/misc/qcom/qdsp6v2/audio_aac.c b/drivers/misc/qcom/qdsp6v2/audio_aac.c
index 94d563a211ec..1f02576a0848 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -46,7 +46,9 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm(audio->ac, 0, 0);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
index 91bbba176dfd..42b45ec7d9d9 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c
@@ -2,7 +2,7 @@
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,9 +58,9 @@ static long audio_ioctl_shared(struct file *file, unsigned int cmd,
audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
- rc = q6asm_enc_cfg_blk_pcm_native(audio->ac,
- aac_cfg.sample_rate,
- aac_cfg.ch_cfg);
+ rc = q6asm_enc_cfg_blk_pcm(audio->ac,
+ audio->pcm_cfg.sample_rate,
+ audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 72bfdd835178..26fa4a4d96b0 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -591,9 +591,10 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
{
struct mmc_card *card = filp->private_data;
struct mmc_wr_pack_stats *pack_stats;
- int i;
+ int i, ret = 0;
int max_num_of_packed_reqs = 0;
- char *temp_buf;
+ char *temp_buf, *temp_ubuf;
+ size_t tubuf_cnt = 0;
if (!card)
return cnt;
@@ -619,15 +620,24 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
- temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+ if (cnt <= (strlen_user(ubuf) + 1))
+ goto exit;
+
+ temp_buf = kzalloc(TEMP_BUF_SIZE, GFP_KERNEL);
if (!temp_buf)
goto exit;
+ tubuf_cnt = cnt - strlen_user(ubuf) - 1;
+
+ temp_ubuf = kzalloc(tubuf_cnt, GFP_KERNEL);
+ if (!temp_ubuf)
+ goto cleanup;
+
spin_lock(&pack_stats->lock);
snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
if (pack_stats->packing_events[i]) {
@@ -635,63 +645,63 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
"%s: Packed %d reqs - %d times\n",
mmc_hostname(card->host), i,
pack_stats->packing_events[i]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
}
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: stopped packing due to the following reasons:\n",
mmc_hostname(card->host));
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of segments\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: exceed max num of sectors\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: wrong data direction\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: flush or discard\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: empty queue\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[EMPTY_QUEUE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[REL_WRITE]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: rel write\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[REL_WRITE]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[THRESHOLD]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: Threshold\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[THRESHOLD]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
@@ -699,25 +709,36 @@ static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
"%s: %d times: Large sector alignment\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[RANDOM]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: random request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[RANDOM]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
if (pack_stats->pack_stop_reason[FUA]) {
snprintf(temp_buf, TEMP_BUF_SIZE,
"%s: %d times: fua request\n",
mmc_hostname(card->host),
pack_stats->pack_stop_reason[FUA]);
- strlcat(ubuf, temp_buf, cnt);
+ strlcat(temp_ubuf, temp_buf, tubuf_cnt);
}
+ if (strlen_user(ubuf) < cnt - strlen(temp_ubuf))
+ ret = copy_to_user((ubuf + strlen_user(ubuf)),
+ temp_ubuf, tubuf_cnt);
+ else
+ ret = -EFAULT;
+ if (ret)
+ pr_err("%s: %s: Copy to userspace failed: %s\n",
+ mmc_hostname(card->host), __func__, ubuf);
spin_unlock(&pack_stats->lock);
+ kfree(temp_ubuf);
+
+cleanup:
kfree(temp_buf);
pr_info("%s", ubuf);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index e32ed3d28b06..60984899b135 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -277,7 +277,7 @@ static void sdio_release_func(struct device *dev)
sdio_free_func_cis(func);
kfree(func->info);
-
+ kfree(func->tmpbuf);
kfree(func);
}
@@ -292,6 +292,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
if (!func)
return ERR_PTR(-ENOMEM);
+ /*
+ * allocate buffer separately to make sure it's properly aligned for
+ * DMA usage (incl. 64 bit DMA)
+ */
+ func->tmpbuf = kmalloc(4, GFP_KERNEL);
+ if (!func->tmpbuf) {
+ kfree(func);
+ return ERR_PTR(-ENOMEM);
+ }
+
func->card = card;
device_initialize(&func->dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4cd2a7d0124f..7923bfdc9b30 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
- u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
+ u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index fdb5cdb3cd15..81abe46c9e0d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fa3b4cbea23b..a481ea64e287 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7658,6 +7658,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ /* In case of PCI error, adapter lose its HW address
+ * so we should re-assign it here.
+ */
+ hw->hw_addr = adapter->io_addr;
+
igb_reset(adapter);
wr32(E1000_WUS, ~0);
result = PCI_ERS_RESULT_RECOVERED;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 0e67145bc418..4f34e1b79705 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4415,13 +4415,12 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- mvpp2_txq_inc_get(txq_pcpu);
-
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
tx_buf->size, DMA_TO_DEVICE);
- if (!tx_buf->skb)
- continue;
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->skb)
+ dev_kfree_skb_any(tx_buf->skb);
+
+ mvpp2_txq_inc_get(txq_pcpu);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 1494997c4f7e..4dccf7287f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
}
}
+#define MLX4_EN_WRAP_AROUND_SEC 10UL
+/* By scheduling the overflow check every 5 seconds, we have a reasonably
+ * good chance we wont miss a wrap around.
+ * TOTO: Use a timer instead of a work queue to increase the guarantee.
+ */
+#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
+
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
{
bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
- mdev->overflow_period);
+ MLX4_EN_OVERFLOW_PERIOD);
unsigned long flags;
if (timeout) {
@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.enable = mlx4_en_phc_enable,
};
-#define MLX4_EN_WRAP_AROUND_SEC 10ULL
/* This function calculates the max shift that enables the user range
* of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
@@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
unsigned long flags;
- u64 ns, zero = 0;
/* mlx4_en_init_timestamp is called for each netdev.
* mdev->ptp_clock is common for all ports, skip initialization if
@@ -282,13 +287,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
ktime_to_ns(ktime_get_real()));
write_unlock_irqrestore(&mdev->clock_lock, flags);
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- mdev->overflow_period = ns;
-
/* Configure the PHC */
mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 31c491e02e69..99361352ed0d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -791,8 +791,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENOSYS;
}
- mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
-
dev->caps.hca_core_clock = hca_param.hca_core_clock;
memset(&dev_cap, 0, sizeof(dev_cap));
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c41f15102ae0..10aa6544cf4d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -409,7 +409,6 @@ struct mlx4_en_dev {
struct cyclecounter cycles;
struct timecounter clock;
unsigned long last_overflow_check;
- unsigned long overflow_period;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct notifier_block nb;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 829be21f97b2..be258d90de9e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
- memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 72fcfc924589..0d18be0fed8e 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -339,7 +339,7 @@ enum FELIC_MODE_BIT {
ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
- ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 49d9f0a789fe..7d0690433ee0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
-
- /* Now we can run the state machine synchronously */
- phy_state_machine(&phydev->state_queue.work);
}
/**
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a5f392ae30d5..61cd53838360 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2343,8 +2343,10 @@ start_again:
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
@@ -2611,8 +2613,10 @@ start_again:
hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_PORT_LIST_GET);
- if (!hdr)
+ if (!hdr) {
+ nlmsg_free(skb);
return -EMSGSIZE;
+ }
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a444294fb555..89ad2b750531 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1196,11 +1196,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
- switch (skb->data[0] & 0xf0) {
- case 0x40:
+ u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
+
+ switch (ip_version) {
+ case 4:
pi.proto = htons(ETH_P_IP);
break;
- case 0x60:
+ case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..1f6893ebce16 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -364,7 +364,7 @@ config USB_NET_NET1080
optionally with LEDs that indicate traffic
config USB_NET_PLUSB
- tristate "Prolific PL-2301/2302/25A1 based cables"
+ tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
depends on USB_USBNET
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 1bfe0fcaccf5..7c02231c1a1b 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
}
static const struct driver_info prolific_info = {
- .description = "Prolific PL-2301/PL-2302/PL-25A1",
+ .description = "Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
@@ -139,6 +139,17 @@ static const struct usb_device_id products [] = {
* Host-to-Host Cable
*/
.driver_info = (unsigned long) &prolific_info,
+
+},
+
+/* super speed cables */
+{
+ USB_DEVICE(0x067b, 0x27a1), /* PL-27A1, no eeprom
+ * also: goobay Active USB 3.0
+ * Data Link,
+ * Unitek Y-3501
+ */
+ .driver_info = (unsigned long) &prolific_info,
},
{ }, // END
@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c2ea4e5666fb..9710cf71054a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1990,6 +1990,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
elength = 1;
goto next_desc;
}
+ if ((buflen < elength) || (elength < 3)) {
+ dev_err(&intf->dev, "invalid descriptor buffer length\n");
+ break;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 041d1d5eb718..8b8bea5d546a 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1698,7 +1698,11 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
- ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ if (QCA_REV_WCN3990(ar))
+ ar->htt.max_num_pending_tx =
+ TARGET_HL_1_0_NUM_MSDU_DESC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
WMI_STAT_PEER;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 37479589b8e1..e0af0f766b02 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -628,6 +628,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define TARGET_HL_10_TLV_NUM_PEERS 14
#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+#define TARGET_HL_1_0_NUM_MSDU_DESC (3600)
/* Diagnostic Window */
#define CE_DIAG_PIPE 7
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 01175d94adca..5a84626dff14 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -7887,6 +7887,12 @@ static struct ieee80211_iface_combination ath10k_wcn3990_qcs_if_comb[] = {
.num_different_channels = 1,
.max_interfaces = 4,
.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
},
{
.limits = ath10k_wcn3990_qcs_if_limit,
@@ -7899,6 +7905,12 @@ static struct ieee80211_iface_combination ath10k_wcn3990_qcs_if_comb[] = {
.num_different_channels = 1,
.max_interfaces = 2,
.n_limits = ARRAY_SIZE(ath10k_wcn3990_if_limit_ibss),
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
},
};
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 096818610d40..213569d384e7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -631,6 +631,8 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
struct regulatory_request *request))
{
const struct ieee80211_regdomain *regd;
+ u32 chan_num;
+ struct ieee80211_channel *chan;
wiphy->reg_notifier = reg_notifier;
wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
@@ -653,6 +655,20 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
}
wiphy_apply_custom_regulatory(wiphy, regd);
+
+ /* For regulatory rules similar to the following:
+ * REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), channels 12/13 are enabled
+ * due to support of 5/10 MHz.
+ * Therefore, disable 2.4 Ghz channels that dont have 20 mhz bw
+ */
+ for (chan_num = 0;
+ chan_num < wiphy->bands[IEEE80211_BAND_2GHZ]->n_channels;
+ chan_num++) {
+ chan = &wiphy->bands[IEEE80211_BAND_2GHZ]->channels[chan_num];
+ if (chan->flags & IEEE80211_CHAN_NO_20MHZ)
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
ath_reg_apply_radar_flags(wiphy);
ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index da5826d788d6..5fecae0ba52e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -876,7 +876,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
eth_broadcast_addr(params_le->bssid);
params_le->bss_type = DOT11_BSSTYPE_ANY;
- params_le->scan_type = 0;
+ params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
params_le->channel_num = 0;
params_le->nprobes = cpu_to_le32(-1);
params_le->active_time = cpu_to_le32(-1);
@@ -884,12 +884,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
params_le->home_time = cpu_to_le32(-1);
memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
- /* if request is null exit so it will be all channel broadcast scan */
- if (!request)
- return;
-
n_ssids = request->n_ssids;
n_channels = request->n_channels;
+
/* Copy channel array if applicable */
brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
n_channels);
@@ -926,16 +923,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
ptr += sizeof(ssid_le);
}
} else {
- brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
- if ((request->ssids) && request->ssids->ssid_len) {
- brcmf_dbg(SCAN, "SSID %s len=%d\n",
- params_le->ssid_le.SSID,
- request->ssids->ssid_len);
- params_le->ssid_le.SSID_len =
- cpu_to_le32(request->ssids->ssid_len);
- memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
- request->ssids->ssid_len);
- }
+ brcmf_dbg(SCAN, "Performing passive scan\n");
+ params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
}
/* Adding mask to channel numbers */
params_le->channel_num =
@@ -2914,6 +2903,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
s32 status;
struct brcmf_escan_result_le *escan_result_le;
+ u32 escan_buflen;
struct brcmf_bss_info_le *bss_info_le;
struct brcmf_bss_info_le *bss = NULL;
u32 bi_length;
@@ -2930,11 +2920,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
if (status == BRCMF_E_STATUS_PARTIAL) {
brcmf_dbg(SCAN, "ESCAN Partial result\n");
+ if (e->datalen < sizeof(*escan_result_le)) {
+ brcmf_err("invalid event data length\n");
+ goto exit;
+ }
escan_result_le = (struct brcmf_escan_result_le *) data;
if (!escan_result_le) {
brcmf_err("Invalid escan result (NULL pointer)\n");
goto exit;
}
+ escan_buflen = le32_to_cpu(escan_result_le->buflen);
+ if (escan_buflen > WL_ESCAN_BUF_SIZE ||
+ escan_buflen > e->datalen ||
+ escan_buflen < sizeof(*escan_result_le)) {
+ brcmf_err("Invalid escan buffer length: %d\n",
+ escan_buflen);
+ goto exit;
+ }
if (le16_to_cpu(escan_result_le->bss_count) != 1) {
brcmf_err("Invalid bss_count %d: ignoring\n",
escan_result_le->bss_count);
@@ -2951,9 +2953,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
}
bi_length = le32_to_cpu(bss_info_le->length);
- if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
- WL_ESCAN_RESULTS_FIXED_SIZE)) {
- brcmf_err("Invalid bss_info length %d: ignoring\n",
+ if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
+ brcmf_err("Ignoring invalid bss_info length: %d\n",
bi_length);
goto exit;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index daa427b46712..4320c4cae53e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
+/* scan type definitions */
+#define BRCMF_SCANTYPE_DEFAULT 0xFF
+#define BRCMF_SCANTYPE_ACTIVE 0
+#define BRCMF_SCANTYPE_PASSIVE 1
+
/* primary (ie tx) key */
#define BRCMF_PRIMARY_KEY (1 << 1)
#define DOT11_BSSTYPE_ANY 2
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 80e2c8d7c86a..d3afb516b119 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -58,6 +58,7 @@ MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
enum cnss_debug_quirks {
LINK_DOWN_SELF_RECOVERY,
SKIP_DEVICE_BOOT,
+ USE_CORE_ONLY_FW,
};
unsigned long quirks;
@@ -1043,6 +1044,8 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
+ cnss_pm_request_resume(pci_priv);
+
cnss_driver_call_remove(plat_priv);
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
@@ -1111,6 +1114,12 @@ static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
return 0;
}
+ if (test_bit(USE_CORE_ONLY_FW, &quirks)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ return 0;
+ }
+
cnss_set_pin_connect_status(plat_priv);
if (qmi_bypass) {
@@ -1141,6 +1150,8 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
if (!pci_priv)
return -ENODEV;
+ cnss_pm_request_resume(pci_priv);
+
cnss_driver_call_remove(plat_priv);
cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 22fad9210945..2efc3aa63a75 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -117,6 +117,7 @@ int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
if (!pci_priv)
return -ENODEV;
+ cnss_pr_dbg("Suspending PCI link\n");
if (!pci_priv->pci_link_state) {
cnss_pr_info("PCI link is already suspended!\n");
goto out;
@@ -150,6 +151,7 @@ int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
if (!pci_priv)
return -ENODEV;
+ cnss_pr_dbg("Resuming PCI link\n");
if (pci_priv->pci_link_state) {
cnss_pr_info("PCI link is already resumed!\n");
goto out;
@@ -368,27 +370,37 @@ static int cnss_pci_suspend(struct device *dev)
driver_ops = plat_priv->driver_ops;
if (driver_ops && driver_ops->suspend) {
ret = driver_ops->suspend(pci_dev, state);
- if (pci_priv->pci_link_state) {
- if (cnss_pci_set_mhi_state(pci_priv,
- CNSS_MHI_SUSPEND)) {
+ if (ret) {
+ cnss_pr_err("Failed to suspend host driver, err = %d\n",
+ ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ if (pci_priv->pci_link_state) {
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND);
+ if (ret) {
+ if (driver_ops && driver_ops->resume)
driver_ops->resume(pci_dev);
- ret = -EAGAIN;
- goto out;
- }
-
- cnss_set_pci_config_space(pci_priv,
- SAVE_PCI_CONFIG_SPACE);
- pci_disable_device(pci_dev);
-
- ret = pci_set_power_state(pci_dev, PCI_D3hot);
- if (ret)
- cnss_pr_err("Failed to set D3Hot, err = %d\n",
- ret);
+ ret = -EAGAIN;
+ goto out;
}
+
+ cnss_set_pci_config_space(pci_priv,
+ SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
}
cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ return 0;
+
out:
return ret;
}
@@ -408,23 +420,30 @@ static int cnss_pci_resume(struct device *dev)
if (!plat_priv)
goto out;
- driver_ops = plat_priv->driver_ops;
- if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) {
- ret = pci_enable_device(pci_dev);
- if (ret)
- cnss_pr_err("Failed to enable PCI device, err = %d\n",
- ret);
+ if (pci_priv->pci_link_down_ind)
+ goto out;
- if (pci_priv->saved_state)
- cnss_set_pci_config_space(pci_priv,
- RESTORE_PCI_CONFIG_SPACE);
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
- pci_set_master(pci_dev);
- cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
+
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume) {
ret = driver_ops->resume(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to resume host driver, err = %d\n",
+ ret);
}
+ return 0;
+
out:
return ret;
}
@@ -600,14 +619,17 @@ int cnss_auto_suspend(void)
ret = pci_set_power_state(pci_dev, PCI_D3hot);
if (ret)
cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+
+ cnss_pr_dbg("Suspending PCI link\n");
if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
- cnss_pr_err("Failed to shutdown PCI link!\n");
+ cnss_pr_err("Failed to suspend PCI link!\n");
ret = -EAGAIN;
goto resume_mhi;
}
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
}
- pci_priv->pci_link_state = PCI_LINK_DOWN;
cnss_pci_set_auto_suspended(pci_priv, 1);
cnss_pci_set_monitor_wake_intr(pci_priv, true);
@@ -643,21 +665,24 @@ int cnss_auto_resume(void)
pci_dev = pci_priv->pci_dev;
if (!pci_priv->pci_link_state) {
+ cnss_pr_dbg("Resuming PCI link\n");
if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
cnss_pr_err("Failed to resume PCI link!\n");
ret = -EAGAIN;
goto out;
}
pci_priv->pci_link_state = PCI_LINK_UP;
+
ret = pci_enable_device(pci_dev);
if (ret)
cnss_pr_err("Failed to enable PCI device, err = %d\n",
ret);
+
+ cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
}
- cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
- pci_set_master(pci_dev);
- cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
cnss_pci_set_auto_suspended(pci_priv, 0);
bus_bw_info = &plat_priv->bus_bw_info;
@@ -668,6 +693,20 @@ out:
}
EXPORT_SYMBOL(cnss_auto_resume);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+ if (!pci_dev)
+ return -ENODEV;
+
+ return pm_request_resume(&pci_dev->dev);
+}
+
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index e26e331e2f8a..89edc6020d35 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -138,5 +138,6 @@ int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+int cnss_pm_request_resume(struct cnss_pci_data *pci_priv);
#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index d82984912e04..95b82cc132e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -73,6 +73,7 @@
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+ SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
/* NVM SW-Section offset (in words) definitions */
@@ -257,13 +258,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 * const nvm_ch_flags,
- bool lar_supported)
+ bool lar_supported, bool no_wide_in_5ghz)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
u16 ch_flags;
- bool is_5ghz;
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
@@ -278,12 +278,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ bool is_5ghz = (ch_idx >= num_2ghz_channels);
+
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
- if (ch_idx >= num_2ghz_channels &&
- !data->sku_cap_band_52GHz_enable)
+ if (is_5ghz && !data->sku_cap_band_52GHz_enable)
continue;
+ /* workaround to disable wide channels in 5GHz */
+ if (no_wide_in_5ghz && is_5ghz) {
+ ch_flags &= ~(NVM_CHANNEL_40MHZ |
+ NVM_CHANNEL_80MHZ |
+ NVM_CHANNEL_160MHZ);
+ }
+
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
@@ -303,8 +311,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
- channel->band = (ch_idx < num_2ghz_channels) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channel->band = is_5ghz ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -316,7 +324,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
- is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
@@ -405,7 +412,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *ch_section,
- u8 tx_chains, u8 rx_chains, bool lar_supported)
+ u8 tx_chains, u8 rx_chains, bool lar_supported,
+ bool no_wide_in_5ghz)
{
int n_channels;
int n_used = 0;
@@ -414,12 +422,14 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
n_channels = iwl_init_channel_map(
dev, cfg, data,
- &ch_section[NVM_CHANNELS], lar_supported);
+ &ch_section[NVM_CHANNELS], lar_supported,
+ no_wide_in_5ghz);
else
n_channels = iwl_init_channel_map(
dev, cfg, data,
&ch_section[NVM_CHANNELS_FAMILY_8000],
- lar_supported);
+ lar_supported,
+ no_wide_in_5ghz);
sband = &data->bands[IEEE80211_BAND_2GHZ];
sband->band = IEEE80211_BAND_2GHZ;
@@ -582,6 +592,39 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
#define IWL_4165_DEVICE_ID 0x5501
+static bool
+iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
+ const __le16 *nvm_hw)
+{
+ /*
+ * Workaround a bug in Indonesia SKUs where the regulatory in
+ * some 7000-family OTPs erroneously allow wide channels in
+ * 5GHz. To check for Indonesia, we take the SKU value from
+ * bits 1-4 in the subsystem ID and check if it is either 5 or
+ * 9. In those cases, we need to force-disable wide channels
+ * in 5GHz otherwise the FW will throw a sysassert when we try
+ * to use them.
+ */
+ if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ /*
+ * Unlike the other sections in the NVM, the hw
+ * section uses big-endian.
+ */
+ u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw
+ + SUBSYSTEM_ID);
+ u8 sku = (subsystem_id & 0x1e) >> 1;
+
+ if (sku == 5 || sku == 9) {
+ IWL_DEBUG_EEPROM(dev,
+ "disabling wide channels in 5GHz (0x%0x %d)\n",
+ subsystem_id, sku);
+ return true;
+ }
+ }
+
+ return false;
+}
+
struct iwl_nvm_data *
iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
const __le16 *nvm_hw, const __le16 *nvm_sw,
@@ -591,6 +634,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
u32 mac_addr0, u32 mac_addr1, u32 hw_id)
{
struct iwl_nvm_data *data;
+ bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw);
u32 sku;
u32 radio_cfg;
u16 lar_config;
@@ -657,7 +701,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_set_hw_address(cfg, data, nvm_hw);
iwl_init_sbands(dev, cfg, data, nvm_sw,
- tx_chains, rx_chains, lar_fw_supported);
+ tx_chains, rx_chains, lar_fw_supported,
+ no_wide_in_5ghz);
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_FAMILY_8000_OLD :
@@ -673,7 +718,8 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_sbands(dev, cfg, data, regulatory,
tx_chains, rx_chains,
- lar_fw_supported && data->lar_enabled);
+ lar_fw_supported && data->lar_enabled,
+ no_wide_in_5ghz);
}
data->calib_version = 255;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 1a8ea775de08..984cd2f05c4a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1906,6 +1906,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
struct iwl_mvm_mc_iter_data *data = _data;
struct iwl_mvm *mvm = data->mvm;
struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = MCAST_FILTER_CMD,
+ .flags = CMD_ASYNC,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
int ret, len;
/* if we don't have free ports, mcast frames will be dropped */
@@ -1920,7 +1925,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
+ hcmd.len[0] = len;
+ hcmd.data[0] = cmd;
+
+ ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (ret)
IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 019d7165a045..2a996a68fc2b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2884,6 +2884,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
+ const char *hwname = NULL;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
@@ -2897,8 +2898,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
- if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+ }
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
@@ -2926,11 +2933,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
s64 idx = -1;
const char *hwname = NULL;
- if (info->attrs[HWSIM_ATTR_RADIO_ID])
+ if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
- else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
- hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
- else
+ } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ } else
return -EINVAL;
spin_lock_bh(&hwsim_radio_lock);
@@ -2939,7 +2950,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (data->idx != idx)
continue;
} else {
- if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ if (!hwname ||
+ strcmp(hwname, wiphy_name(data->hw->wiphy)))
continue;
}
@@ -2947,10 +2959,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
+ kfree(hwname);
return 0;
}
spin_unlock_bh(&hwsim_radio_lock);
+ kfree(hwname);
return -ENODEV;
}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index c3331d6201c3..9a8982f581c5 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -3740,7 +3740,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter)
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c20017ced566..fb98f42cb5e7 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2170,6 +2170,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv,
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
+ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+ mwifiex_dbg(adapter, WARN,
+ "FW reported too many channel results (max %d)\n",
+ adapter->num_in_chan_stats);
+ return;
+ }
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 257a9eadd595..4ac6764f4897 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index a52230377e2c..c48b7e8ee0d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2269,7 +2269,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
/* Init IO handler */
@@ -2339,10 +2339,10 @@ fail3:
pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
+fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd4777954f87..9bee3f11898a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b8a5a8e8f57d..88cf4f5025b0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -14,6 +14,7 @@
#ifndef _NVME_H
#define _NVME_H
+#include <linux/mutex.h>
#include <linux/nvme.h>
#include <linux/pci.h>
#include <linux/kref.h>
@@ -62,6 +63,7 @@ struct nvme_dev {
struct work_struct reset_work;
struct work_struct probe_work;
struct work_struct scan_work;
+ struct mutex shutdown_lock;
char name[12];
char serial[20];
char model[40];
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4c673d45f1bd..669edbd47602 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2954,6 +2954,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_list_remove(dev);
+ mutex_lock(&dev->shutdown_lock);
if (pci_is_enabled(to_pci_dev(dev->dev))) {
nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
@@ -2972,6 +2973,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i >= 0; i--)
nvme_clear_queue(dev->queues[i]);
+ mutex_unlock(&dev->shutdown_lock);
}
static void nvme_dev_remove(struct nvme_dev *dev)
@@ -3328,6 +3330,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&dev->namespaces);
INIT_WORK(&dev->reset_work, nvme_reset_work);
+ mutex_init(&dev->shutdown_lock);
dev->dev = get_device(&pdev->dev);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 7d223e9080ef..77dddee2753a 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ } else {
+ pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index f8b2b5987ea9..ec91cd17bf34 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -522,7 +522,7 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- char *driver_override, *old = pdev->driver_override, *cp;
+ char *driver_override, *old, *cp;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
@@ -536,12 +536,15 @@ static ssize_t driver_override_store(struct device *dev,
if (cp)
*cp = '\0';
+ device_lock(dev);
+ old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
+ device_unlock(dev);
kfree(old);
@@ -552,8 +555,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t len;
- return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index df741c1c8e5f..9e19fa625daa 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -536,6 +536,7 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
@@ -549,6 +550,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -565,8 +568,8 @@ static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_c
/* cache the cne event */
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa_ctx->ipa_cne_evt_req_cache[
ipa_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 12b43882ed5b..23e4d2b0d6e8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1541,6 +1541,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
mutex_unlock(&add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 73321df80ada..e9fd1560b1e8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -603,6 +603,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
int retval;
struct ipa_wan_msg *wan_msg;
struct ipa_msg_meta msg_meta;
+ struct ipa_wan_msg cache_wan_msg;
wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
if (!wan_msg) {
@@ -616,6 +617,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
return -EFAULT;
}
+ memcpy(&cache_wan_msg, wan_msg, sizeof(cache_wan_msg));
+
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
msg_meta.msg_type = msg_type;
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
@@ -632,8 +635,8 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type, bool is_
/* cache the cne event */
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].wan_msg,
- wan_msg,
- sizeof(struct ipa_wan_msg));
+ &cache_wan_msg,
+ sizeof(cache_wan_msg));
memcpy(&ipa3_ctx->ipa_cne_evt_req_cache[
ipa3_ctx->num_ipa_cne_evt_req].msg_meta,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 8c6bd48cfb2c..97b9f04f51de 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1353,6 +1353,7 @@ int ipa3_set_usb_max_packet_size(
return 0;
}
+/* This function called as part of usb pipe resume */
int ipa3_xdci_connect(u32 clnt_hdl)
{
int result;
@@ -1392,11 +1393,14 @@ exit:
return result;
}
+
+/* This function called as part of usb pipe connect */
int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
{
struct ipa3_ep_context *ep;
int result = -EFAULT;
enum gsi_status gsi_res;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -1418,6 +1422,22 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
goto write_chan_scratch_fail;
}
}
+
+ if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = true;
+ ep->ep_delay_set = true;
+
+ result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ if (result)
+ IPAERR("client (ep: %d) failed result=%d\n",
+ clnt_hdl, result);
+ else
+ IPADBG("client (ep: %d) success\n", clnt_hdl);
+ } else {
+ ep->ep_delay_set = false;
+ }
+
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error starting channel: %d\n", gsi_res);
@@ -1622,13 +1642,15 @@ static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
/* Clocks should be voted for before invoking this function */
static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
- u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+ u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl,
+ bool remove_delay)
{
int result;
bool is_empty = false;
int i;
bool stop_in_proc;
struct ipa3_ep_context *ep;
+ struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -1649,6 +1671,22 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
if (!stop_in_proc)
goto exit;
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
+
/* if stop_in_proc, lets wait for emptiness */
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
result = ipa3_is_xdci_channel_empty(ep, &is_empty);
@@ -1714,6 +1752,21 @@ disable_force_clear_and_exit:
if (should_force_clear)
ipa3_disable_force_clear(qmi_req_id);
exit:
+ if (remove_delay && ep->ep_delay_set == true) {
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ result = ipa3_cfg_ep_ctrl(clnt_hdl,
+ &ep_cfg_ctrl);
+ if (result) {
+ IPAERR
+ ("client (ep: %d) failed to remove delay result=%d\n",
+ clnt_hdl, result);
+ } else {
+ IPADBG("client (ep: %d) delay removed\n",
+ clnt_hdl);
+ ep->ep_delay_set = false;
+ }
+ }
return result;
}
@@ -1743,7 +1796,8 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
source_pipe_bitmask = 1 <<
ipa3_get_ep_mapping(ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, clnt_hdl);
+ source_pipe_bitmask, should_force_clear, clnt_hdl,
+ true);
if (result) {
IPAERR("Fail to stop UL channel with data drain\n");
WARN_ON(1);
@@ -1918,7 +1972,8 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (!is_dpl) {
source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
- source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+ source_pipe_bitmask, should_force_clear, ul_clnt_hdl,
+ false);
if (result) {
IPAERR("Error stopping UL channel: result = %d\n",
result);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 89c7b66b98d6..8e6db8f63fc1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -577,6 +577,7 @@ struct ipa3_ep_context {
bool switch_to_intr;
int inactive_cycles;
u32 eot_in_poll_err;
+ bool ep_delay_set;
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index c810adc466b3..01ef670dba51 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1677,6 +1677,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
add_mux_channel_lock);
return -EFAULT;
}
+ extend_ioctl_data.u.rmnet_mux_val.vchannel_name
+ [IFNAMSIZ-1] = '\0';
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 47b201738672..b919c688e627 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -1,5 +1,23 @@
menu "Qualcomm Technologies Inc Charger and Fuel Gauge support"
+config QPNP_SMBCHARGER
+ tristate "QPNP SMB Charger driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the dual path switch mode battery charger which
+ supports USB detection and battery charging up to 3A.
+ The driver also offers relevant information to userspace via the
+ power supply framework.
+
+config QPNP_FG
+ tristate "QPNP fuel gauge driver"
+ depends on MFD_SPMI_PMIC
+ help
+ Say Y here to enable the Fuel Gauge driver. This adds support for
+ battery fuel gauging and state of charge of battery connected to the
+ fuel gauge. The state of charge is reported through a BMS power
+ supply property and also sends uevents when the capacity is updated.
+
config QPNP_FG_GEN3
tristate "QPNP GEN3 fuel gauge driver"
depends on MFD_SPMI_PMIC
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 87ab2b24175f..92310ef5c803 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -1,3 +1,5 @@
+obj-$(CONFIG_QPNP_SMBCHARGER) += qpnp-smbcharger.o batterydata-lib.o pmic-voter.o
+obj-$(CONFIG_QPNP_FG) += qpnp-fg.o
obj-$(CONFIG_QPNP_FG_GEN3) += qpnp-fg-gen3.o fg-memif.o fg-util.o
obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o pmic-voter.o
obj-$(CONFIG_SMB1351_USB_CHARGER) += battery.o smb1351-charger.o pmic-voter.o
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index b75d7db57c3e..1935704fcf09 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -404,6 +404,7 @@ struct fg_chip {
struct mutex sram_rw_lock;
struct mutex charge_full_lock;
struct mutex qnovo_esr_ctrl_lock;
+ spinlock_t suspend_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
@@ -438,6 +439,7 @@ struct fg_chip {
bool slope_limit_en;
bool use_ima_single_mode;
bool qnovo_enable;
+ bool suspended;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 256d9ed8ada5..491dda6ff7e8 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -762,7 +762,19 @@ static int fg_get_msoc(struct fg_chip *chip, int *msoc)
if (rc < 0)
return rc;
- *msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /*
+ * To have better endpoints for 0 and 100, it is good to tune the
+ * calculation discarding values 0 and 255 while rounding off. Rest
+ * of the values 1-254 will be scaled to 1-99. DIV_ROUND_UP will not
+ * be suitable here as it rounds up any value higher than 252 to 100.
+ */
+ if (*msoc == FULL_SOC_RAW)
+ *msoc = 100;
+ else if (*msoc == 0)
+ *msoc = 0;
+ else
+ *msoc = DIV_ROUND_CLOSEST((*msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
return 0;
}
@@ -3776,6 +3788,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
struct power_supply *psy = data;
struct fg_chip *chip = container_of(nb, struct fg_chip, nb);
+ spin_lock(&chip->suspend_lock);
+ if (chip->suspended) {
+ /* Return if we are still suspended */
+ spin_unlock(&chip->suspend_lock);
+ return NOTIFY_OK;
+ }
+ spin_unlock(&chip->suspend_lock);
+
if (event != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
@@ -5089,6 +5109,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
mutex_init(&chip->qnovo_esr_ctrl_lock);
+ spin_lock_init(&chip->suspend_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -5186,6 +5207,10 @@ static int fg_gen3_suspend(struct device *dev)
struct fg_chip *chip = dev_get_drvdata(dev);
int rc;
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = true;
+ spin_unlock(&chip->suspend_lock);
+
rc = fg_esr_timer_config(chip, true);
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
@@ -5209,6 +5234,16 @@ static int fg_gen3_resume(struct device *dev)
if (fg_sram_dump)
schedule_delayed_work(&chip->sram_dump_work,
msecs_to_jiffies(fg_sram_dump_period_ms));
+
+ if (!work_pending(&chip->status_change_work)) {
+ pm_stay_awake(chip->dev);
+ schedule_work(&chip->status_change_work);
+ }
+
+ spin_lock(&chip->suspend_lock);
+ chip->suspended = false;
+ spin_unlock(&chip->suspend_lock);
+
return 0;
}
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
new file mode 100644
index 000000000000..cfd2f64a9bb8
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -0,0 +1,7051 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "FG: %s: " fmt, __func__
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/power_supply.h>
+#include <linux/of_batterydata.h>
+#include <linux/string_helpers.h>
+#include <linux/alarmtimer.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* Register offsets */
+
+/* Interrupt offsets */
+#define INT_RT_STS(base) (base + 0x10)
+#define INT_EN_CLR(base) (base + 0x16)
+
+/* SPMI Register offsets */
+#define SOC_MONOTONIC_SOC 0x09
+#define SOC_BOOT_MOD 0x50
+#define SOC_RESTART 0x51
+
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RAM register offsets */
+#define RAM_OFFSET 0x400
+
+/* Bit/Mask definitions */
+#define FULL_PERCENT 0xFF
+#define MAX_TRIES_SOC 5
+#define MA_MV_BIT_RES 39
+#define MSB_SIGN BIT(7)
+#define IBAT_VBAT_MASK 0x7F
+#define NO_OTP_PROF_RELOAD BIT(6)
+#define REDO_FIRST_ESTIMATE BIT(3)
+#define RESTART_GO BIT(0)
+#define THERM_DELAY_MASK 0xE0
+
+/* SUBTYPE definitions */
+#define FG_SOC 0x9
+#define FG_BATT 0xA
+#define FG_ADC 0xB
+#define FG_MEMIF 0xC
+
+#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
+#define MEM_IF_TIMEOUT_MS 5000
+#define BUCKET_COUNT 8
+#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+
+#define BCL_MA_TO_ADC(_current, _adc_val) { \
+ _adc_val = (u8)((_current) * 100 / 976); \
+}
+
+/* Debug Flag Definitions */
+enum {
+ FG_SPMI_DEBUG_WRITES = BIT(0), /* Show SPMI writes */
+ FG_SPMI_DEBUG_READS = BIT(1), /* Show SPMI reads */
+ FG_IRQS = BIT(2), /* Show interrupts */
+ FG_MEM_DEBUG_WRITES = BIT(3), /* Show SRAM writes */
+ FG_MEM_DEBUG_READS = BIT(4), /* Show SRAM reads */
+ FG_POWER_SUPPLY = BIT(5), /* Show POWER_SUPPLY */
+ FG_STATUS = BIT(6), /* Show FG status changes */
+ FG_AGING = BIT(7), /* Show FG aging algorithm */
+};
+
+/* PMIC REVISIONS */
+#define REVID_RESERVED 0
+#define REVID_VARIANT 1
+#define REVID_ANA_MAJOR 2
+#define REVID_DIG_MAJOR 3
+
+enum dig_major {
+ DIG_REV_1 = 0x1,
+ DIG_REV_2 = 0x2,
+ DIG_REV_3 = 0x3,
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum wa_flags {
+ IADC_GAIN_COMP_WA = BIT(0),
+ USE_CC_SOC_REG = BIT(1),
+ PULSE_REQUEST_WA = BIT(2),
+ BCL_HI_POWER_FOR_CHGLED_WA = BIT(3)
+};
+
+enum current_sense_type {
+ INTERNAL_CURRENT_SENSE,
+ EXTERNAL_CURRENT_SENSE,
+};
+
+struct fg_mem_setting {
+ u16 address;
+ u8 offset;
+ int value;
+};
+
+struct fg_mem_data {
+ u16 address;
+ u8 offset;
+ unsigned int len;
+ int value;
+};
+
+struct fg_learning_data {
+ int64_t cc_uah;
+ int64_t learned_cc_uah;
+ int init_cc_pc_val;
+ bool active;
+ bool feedback_on;
+ struct mutex learning_lock;
+ ktime_t time_stamp;
+ /* configuration properties */
+ int max_start_soc;
+ int max_increment;
+ int max_decrement;
+ int min_temp;
+ int max_temp;
+ int vbat_est_thr_uv;
+};
+
+struct fg_rslow_data {
+ u8 rslow_cfg;
+ u8 rslow_thr;
+ u8 rs_to_rslow[2];
+ u8 rslow_comp[4];
+ uint32_t chg_rs_to_rslow;
+ uint32_t chg_rslow_comp_c1;
+ uint32_t chg_rslow_comp_c2;
+ uint32_t chg_rslow_comp_thr;
+ bool active;
+ struct mutex lock;
+};
+
+struct fg_cyc_ctr_data {
+ bool en;
+ bool started[BUCKET_COUNT];
+ u16 count[BUCKET_COUNT];
+ u8 last_soc[BUCKET_COUNT];
+ int id;
+ struct mutex lock;
+};
+
+struct fg_iadc_comp_data {
+ u8 dfl_gain_reg[2];
+ bool gain_active;
+ int64_t dfl_gain;
+};
+
+struct fg_cc_soc_data {
+ int init_sys_soc;
+ int init_cc_soc;
+ int full_capacity;
+ int delta_soc;
+};
+
+/* FG_MEMIF setting index */
+enum fg_mem_setting_index {
+ FG_MEM_SOFT_COLD = 0,
+ FG_MEM_SOFT_HOT,
+ FG_MEM_HARD_COLD,
+ FG_MEM_HARD_HOT,
+ FG_MEM_RESUME_SOC,
+ FG_MEM_BCL_LM_THRESHOLD,
+ FG_MEM_BCL_MH_THRESHOLD,
+ FG_MEM_TERM_CURRENT,
+ FG_MEM_CHG_TERM_CURRENT,
+ FG_MEM_IRQ_VOLT_EMPTY,
+ FG_MEM_CUTOFF_VOLTAGE,
+ FG_MEM_VBAT_EST_DIFF,
+ FG_MEM_DELTA_SOC,
+ FG_MEM_BATT_LOW,
+ FG_MEM_THERM_DELAY,
+ FG_MEM_SETTING_MAX,
+};
+
+/* FG_MEMIF data index */
+enum fg_mem_data_index {
+ FG_DATA_BATT_TEMP = 0,
+ FG_DATA_OCV,
+ FG_DATA_VOLTAGE,
+ FG_DATA_CURRENT,
+ FG_DATA_BATT_ESR,
+ FG_DATA_BATT_ESR_COUNT,
+ FG_DATA_BATT_SOC,
+ FG_DATA_CC_CHARGE,
+ FG_DATA_VINT_ERR,
+ FG_DATA_CPRED_VOLTAGE,
+ /* values below this only gets read once per profile reload */
+ FG_DATA_BATT_ID,
+ FG_DATA_BATT_ID_INFO,
+ FG_DATA_MAX,
+};
+
+#define SETTING(_idx, _address, _offset, _value) \
+ [FG_MEM_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = {
+ /* ID Address, Offset, Value*/
+ SETTING(SOFT_COLD, 0x454, 0, 100),
+ SETTING(SOFT_HOT, 0x454, 1, 400),
+ SETTING(HARD_COLD, 0x454, 2, 50),
+ SETTING(HARD_HOT, 0x454, 3, 450),
+ SETTING(RESUME_SOC, 0x45C, 1, 0),
+ SETTING(BCL_LM_THRESHOLD, 0x47C, 2, 50),
+ SETTING(BCL_MH_THRESHOLD, 0x47C, 3, 752),
+ SETTING(TERM_CURRENT, 0x40C, 2, 250),
+ SETTING(CHG_TERM_CURRENT, 0x4F8, 2, 250),
+ SETTING(IRQ_VOLT_EMPTY, 0x458, 3, 3100),
+ SETTING(CUTOFF_VOLTAGE, 0x40C, 0, 3200),
+ SETTING(VBAT_EST_DIFF, 0x000, 0, 30),
+ SETTING(DELTA_SOC, 0x450, 3, 1),
+ SETTING(BATT_LOW, 0x458, 0, 4200),
+ SETTING(THERM_DELAY, 0x4AC, 3, 0),
+};
+
+#define DATA(_idx, _address, _offset, _length, _value) \
+ [FG_DATA_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_data[FG_DATA_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ DATA(BATT_TEMP, 0x550, 2, 2, -EINVAL),
+ DATA(OCV, 0x588, 3, 2, -EINVAL),
+ DATA(VOLTAGE, 0x5CC, 1, 2, -EINVAL),
+ DATA(CURRENT, 0x5CC, 3, 2, -EINVAL),
+ DATA(BATT_ESR, 0x554, 2, 2, -EINVAL),
+ DATA(BATT_ESR_COUNT, 0x558, 2, 2, -EINVAL),
+ DATA(BATT_SOC, 0x56C, 1, 3, -EINVAL),
+ DATA(CC_CHARGE, 0x570, 0, 4, -EINVAL),
+ DATA(VINT_ERR, 0x560, 0, 4, -EINVAL),
+ DATA(CPRED_VOLTAGE, 0x540, 0, 2, -EINVAL),
+ DATA(BATT_ID, 0x594, 1, 1, -EINVAL),
+ DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
+};
+
+static int fg_debug_mask;
+module_param_named(
+ debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int fg_sense_type = -EINVAL;
+static int fg_restart;
+
+static int fg_est_dump;
+module_param_named(
+ first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR
+);
+
+static char *fg_batt_type;
+module_param_named(
+ battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR
+);
+
+static int fg_sram_update_period_ms = 30000;
+module_param_named(
+ sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
+);
+
+struct fg_irq {
+ int irq;
+ unsigned long disabled;
+};
+
+enum fg_soc_irq {
+ HIGH_SOC,
+ LOW_SOC,
+ FULL_SOC,
+ EMPTY_SOC,
+ DELTA_SOC,
+ FIRST_EST_DONE,
+ SW_FALLBK_OCV,
+ SW_FALLBK_NEW_BATT,
+ FG_SOC_IRQ_COUNT,
+};
+
+enum fg_batt_irq {
+ JEITA_SOFT_COLD,
+ JEITA_SOFT_HOT,
+ VBATT_LOW,
+ BATT_IDENTIFIED,
+ BATT_ID_REQ,
+ BATTERY_UNKNOWN,
+ BATT_MISSING,
+ BATT_MATCH,
+ FG_BATT_IRQ_COUNT,
+};
+
+enum fg_mem_if_irq {
+ FG_MEM_AVAIL,
+ TA_RCVRY_SUG,
+ FG_MEM_IF_IRQ_COUNT,
+};
+
+enum fg_batt_aging_mode {
+ FG_AGING_NONE,
+ FG_AGING_ESR,
+ FG_AGING_CC,
+};
+
+enum register_type {
+ MEM_INTF_CFG,
+ MEM_INTF_CTL,
+ MEM_INTF_ADDR_LSB,
+ MEM_INTF_RD_DATA0,
+ MEM_INTF_WR_DATA0,
+ MAX_ADDRESS,
+};
+
+struct register_offset {
+ u16 address[MAX_ADDRESS];
+};
+
+static struct register_offset offset[] = {
+ [0] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x40, 0x41, 0x42, 0x4C, 0x48},
+ },
+ [1] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x50, 0x51, 0x61, 0x67, 0x63},
+ },
+};
+
+#define MEM_INTF_CFG(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG])
+#define MEM_INTF_CTL(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL])
+#define MEM_INTF_ADDR_LSB(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB])
+#define MEM_INTF_RD_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0])
+#define MEM_INTF_WR_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0])
+
+struct fg_wakeup_source {
+ struct wakeup_source source;
+ unsigned long enabled;
+};
+
+static void fg_stay_awake(struct fg_wakeup_source *source)
+{
+ if (!__test_and_set_bit(0, &source->enabled)) {
+ __pm_stay_awake(&source->source);
+ pr_debug("enabled source %s\n", source->source.name);
+ }
+}
+
+static void fg_relax(struct fg_wakeup_source *source)
+{
+ if (__test_and_clear_bit(0, &source->enabled)) {
+ __pm_relax(&source->source);
+ pr_debug("disabled source %s\n", source->source.name);
+ }
+}
+
+#define THERMAL_COEFF_N_BYTES 6
+struct fg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ u8 pmic_subtype;
+ u8 pmic_revision[4];
+ u8 revision[4];
+ u16 soc_base;
+ u16 batt_base;
+ u16 mem_base;
+ u16 vbat_adc_addr;
+ u16 ibat_adc_addr;
+ u16 tp_rev_addr;
+ u32 wa_flag;
+ atomic_t memif_user_cnt;
+ struct fg_irq soc_irq[FG_SOC_IRQ_COUNT];
+ struct fg_irq batt_irq[FG_BATT_IRQ_COUNT];
+ struct fg_irq mem_irq[FG_MEM_IF_IRQ_COUNT];
+ struct completion sram_access_granted;
+ struct completion sram_access_revoked;
+ struct completion batt_id_avail;
+ struct completion first_soc_done;
+ struct power_supply *bms_psy;
+ struct power_supply_desc bms_psy_d;
+ struct mutex rw_lock;
+ struct mutex sysfs_restart_lock;
+ struct delayed_work batt_profile_init;
+ struct work_struct dump_sram;
+ struct work_struct status_change_work;
+ struct work_struct cycle_count_work;
+ struct work_struct battery_age_work;
+ struct work_struct update_esr_work;
+ struct work_struct set_resume_soc_work;
+ struct work_struct rslow_comp_work;
+ struct work_struct sysfs_restart_work;
+ struct work_struct init_work;
+ struct work_struct charge_full_work;
+ struct work_struct gain_comp_work;
+ struct work_struct bcl_hi_power_work;
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct fg_wakeup_source memif_wakeup_source;
+ struct fg_wakeup_source profile_wakeup_source;
+ struct fg_wakeup_source empty_check_wakeup_source;
+ struct fg_wakeup_source resume_soc_wakeup_source;
+ struct fg_wakeup_source gain_comp_wakeup_source;
+ struct fg_wakeup_source capacity_learning_wakeup_source;
+ bool first_profile_loaded;
+ struct fg_wakeup_source update_temp_wakeup_source;
+ struct fg_wakeup_source update_sram_wakeup_source;
+ bool fg_restarting;
+ bool profile_loaded;
+ bool use_otp_profile;
+ bool battery_missing;
+ bool power_supply_registered;
+ bool sw_rbias_ctrl;
+ bool use_thermal_coefficients;
+ bool esr_strict_filter;
+ bool soc_empty;
+ bool charge_done;
+ bool resume_soc_lowered;
+ bool vbat_low_irq_enabled;
+ bool charge_full;
+ bool hold_soc_while_full;
+ bool input_present;
+ bool otg_present;
+ bool safety_timer_expired;
+ bool bad_batt_detection_en;
+ bool bcl_lpm_disabled;
+ bool charging_disabled;
+ struct delayed_work update_jeita_setting;
+ struct delayed_work update_sram_data;
+ struct delayed_work update_temp_work;
+ struct delayed_work check_empty_work;
+ char *batt_profile;
+ u8 thermal_coefficients[THERMAL_COEFF_N_BYTES];
+ u32 cc_cv_threshold_mv;
+ unsigned int batt_profile_len;
+ unsigned int batt_max_voltage_uv;
+ const char *batt_type;
+ const char *batt_psy_name;
+ unsigned long last_sram_update_time;
+ unsigned long last_temp_update_time;
+ int64_t ocv_coeffs[12];
+ int64_t cutoff_voltage;
+ int evaluation_current;
+ int ocv_junction_p1p2;
+ int ocv_junction_p2p3;
+ int nom_cap_uah;
+ int actual_cap_uah;
+ int status;
+ int prev_status;
+ int health;
+ enum fg_batt_aging_mode batt_aging_mode;
+ /* capacity learning */
+ struct fg_learning_data learning_data;
+ struct alarm fg_cap_learning_alarm;
+ struct work_struct fg_cap_learning_work;
+ struct fg_cc_soc_data sw_cc_soc_data;
+ /* rslow compensation */
+ struct fg_rslow_data rslow_comp;
+ /* cycle counter */
+ struct fg_cyc_ctr_data cyc_ctr;
+ /* iadc compensation */
+ struct fg_iadc_comp_data iadc_comp_data;
+ /* interleaved memory access */
+ u16 *offset;
+ bool ima_supported;
+ bool init_done;
+ /* jeita hysteresis */
+ bool jeita_hysteresis_support;
+ bool batt_hot;
+ bool batt_cold;
+ int cold_hysteresis;
+ int hot_hysteresis;
+ /* ESR pulse tuning */
+ struct fg_wakeup_source esr_extract_wakeup_source;
+ struct work_struct esr_extract_config_work;
+ bool esr_extract_disabled;
+ bool imptr_pulse_slow_en;
+ bool esr_pulse_tune_en;
+};
+
+/* FG_MEMIF DEBUGFS structures */
+#define ADDR_LEN 4 /* 3 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 4 /* 4 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "fg_memif";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+static const char *default_batt_type = "Unknown Battery";
+static const char *loading_batt_type = "Loading Battery Data";
+static const char *missing_batt_type = "Disconnected Battery";
+
+/* Log buffer */
+struct fg_log_buffer {
+ size_t rpos; /* Current 'read' position in buffer */
+ size_t wpos; /* Current 'write' position in buffer */
+ size_t len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* transaction parameters */
+struct fg_trans {
+ u32 cnt; /* Number of bytes to read */
+ u16 addr; /* 12-bit address in SRAM */
+ u32 offset; /* Offset of last read data + byte offset */
+ struct fg_chip *chip;
+ struct fg_log_buffer *log; /* log buffer */
+ u8 *data; /* fg data that is read */
+ struct mutex memif_dfs_lock; /* Prevent thread concurrency */
+};
+
+struct fg_dbgfs {
+ u32 cnt;
+ u32 addr;
+ struct fg_chip *chip;
+ struct dentry *root;
+ struct mutex lock;
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct fg_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .help_msg = {
+ .data =
+"FG Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/fg_memif\n"
+" /help -- Static help text\n"
+" /address -- Starting register address for reads or writes\n"
+" /count -- Number of registers to read (only used for reads)\n"
+" /data -- Initiates the SRAM read (formatted output)\n"
+"\n",
+ },
+};
+
+static const struct of_device_id fg_match_table[] = {
+ { .compatible = QPNP_FG_DEV_NAME, },
+ {}
+};
+
+static char *fg_supplicants[] = {
+ "battery",
+ "bcl",
+ "fg_adc"
+};
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, " ");
+ }
+}
+
+static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_write(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("write(0x%04X), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, len);
+ if (rc) {
+ pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr,
+ to_spmi_device(pdev->dev.parent)->usid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("read(0x%04x), sid=%d, len=%d; %s\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, len,
+ str);
+ }
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, addr, mask, val);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define RIF_MEM_ACCESS_REQ BIT(7)
+static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read rif_mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ *status = mem_if_sts & RIF_MEM_ACCESS_REQ;
+ return 0;
+}
+
+static bool fg_check_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+ bool rif_mem_sts = false;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return false;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0)
+ return false;
+
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return false;
+
+ return rif_mem_sts;
+}
+
+static inline int fg_assert_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INTF_CTL_BURST BIT(7)
+#define INTF_CTL_WR_EN BIT(6)
+static int fg_config_access(struct fg_chip *chip, bool write,
+ bool burst)
+{
+ int rc;
+ u8 intf_ctl = 0;
+
+ intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0);
+
+ rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+static int fg_req_and_wait_access(struct fg_chip *chip, int timeout)
+{
+ int rc = 0, ret = 0;
+ bool tried_again = false;
+
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+ fg_stay_awake(&chip->memif_wakeup_source);
+ }
+
+wait:
+ /* Wait for MEM_AVAIL IRQ. */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_granted,
+ msecs_to_jiffies(timeout));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int fg_release_access(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, 0, 1);
+ fg_relax(&chip->memif_wakeup_source);
+ reinit_completion(&chip->sram_access_granted);
+
+ return rc;
+}
+
+static void fg_release_access_if_necessary(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) {
+ fg_release_access(chip);
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+/*
+ * fg_mem_lock disallows the fuel gauge to release access until it has been
+ * released.
+ *
+ * an equal number of calls must be made to fg_mem_release for the fuel gauge
+ * driver to release the sram access.
+ */
+static void fg_mem_lock(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_mem_release(struct fg_chip *chip)
+{
+ fg_release_access_if_necessary(chip);
+}
+
+static int fg_set_ram_addr(struct fg_chip *chip, u16 *address)
+{
+ int rc;
+
+ rc = fg_write(chip, (u8 *) address,
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n",
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define BUF_LEN 4
+static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len,
+ int offset)
+{
+ int rc, total_len;
+ u8 *rd_data = val;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_config_access(chip, 0, (len > 4));
+ if (rc)
+ return rc;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ if (!offset) {
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip),
+ min(len, BUF_LEN));
+ } else {
+ rc = fg_read(chip, rd_data,
+ MEM_INTF_RD_DATA0(chip) + offset,
+ min(len, BUF_LEN - offset));
+
+ /* manually set address to allow continous reads */
+ address += BUF_LEN;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+ }
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ rd_data += (BUF_LEN - offset);
+ len -= (BUF_LEN - offset);
+ offset = 0;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+ return rc;
+}
+
+static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, orig_address = address;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ rc = fg_sub_mem_read(chip, val, address, len, offset);
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, sublen;
+ bool access_configured = false;
+ u8 *wr_data = val, word[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3)
+ return -EINVAL;
+
+ address = ((address + offset) / 4) * 4;
+ offset = (address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES) {
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len);
+ pr_info("writing: %s\n", str);
+ }
+
+ while (len > 0) {
+ if (offset != 0) {
+ sublen = min(4 - offset, len);
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word + offset, wr_data, sublen);
+ /* configure access as burst if more to write */
+ rc = fg_config_access(chip, 1, (len - sublen) > 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ offset = 0;
+ access_configured = true;
+ } else if (len >= 4) {
+ if (!access_configured) {
+ rc = fg_config_access(chip, 1, len > 4);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ }
+ sublen = 4;
+ memcpy(word, wr_data, 4);
+ } else if (len > 0 && len < 4) {
+ sublen = len;
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word, wr_data, sublen);
+ rc = fg_config_access(chip, 1, 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ } else {
+ pr_err("Invalid length: %d\n", len);
+ break;
+ }
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip), rc);
+ goto out;
+ }
+ len -= sublen;
+ wr_data += sublen;
+ address += 4;
+ }
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+#define MEM_INTF_IMA_CFG 0x52
+#define MEM_INTF_IMA_OPR_STS 0x54
+#define MEM_INTF_IMA_ERR_STS 0x5F
+#define MEM_INTF_IMA_EXP_STS 0x55
+#define MEM_INTF_IMA_HW_STS 0x56
+#define MEM_INTF_IMA_BYTE_EN 0x60
+#define IMA_ADDR_STBL_ERR BIT(7)
+#define IMA_WR_ACS_ERR BIT(6)
+#define IMA_RD_ACS_ERR BIT(5)
+#define IMA_IACS_CLR BIT(2)
+#define IMA_IACS_RDY BIT(1)
+static int fg_check_ima_exception(struct fg_chip *chip)
+{
+ int rc = 0, ret = 0;
+ u8 err_sts, exp_sts = 0, hw_sts = 0;
+
+ rc = fg_read(chip, &err_sts,
+ chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ return rc;
+ }
+
+ if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
+ u8 temp;
+
+ fg_read(chip, &exp_sts,
+ chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
+ fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+ rc = err_sts;
+
+ /* clear the error */
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ temp = 0x4;
+ ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ temp = 0x0;
+ ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+ int rc = 0, timeout = 250;
+ u8 ima_opr_sts = 0;
+
+ /*
+ * Additional delay to make sure IACS ready bit is set after
+ * Read/Write operation.
+ */
+
+ usleep_range(30, 35);
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
+ break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
+ }
+
+ if (!timeout || rc) {
+ pr_err("IACS_RDY not set\n");
+ /* perform IACS_CLR sequence */
+ fg_check_ima_exception(chip);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define IACS_SLCT BIT(5)
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val,
+ u16 address, int offset, int len)
+{
+ int rc = 0, i;
+ u8 *word = val, byte_enable = 0, num_bytes = 0;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ?
+ (BUF_LEN - offset) : len;
+ /* write to byte_enable */
+ for (i = offset; i < (offset + num_bytes); i++)
+ byte_enable |= BIT(i);
+
+ rc = fg_write(chip, &byte_enable,
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ if (rc) {
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* write data */
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ /*
+ * The last-byte WR_DATA3 starts the write transaction.
+ * Write a dummy value to WR_DATA3 if it does not have
+ * valid data. This dummy data is not written to the
+ * SRAM as byte_en for WR_DATA3 is not set.
+ */
+ if (!(byte_enable & BIT(3))) {
+ u8 dummy_byte = 0x0;
+ rc = fg_write(chip, &dummy_byte,
+ MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ word += num_bytes;
+ len -= num_bytes;
+ offset = byte_enable = 0;
+ }
+
+ return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int offset, int len)
+{
+ int rc = 0, total_len;
+ u8 *rd_data = val, num_bytes;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len;
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+
+ rd_data += num_bytes;
+ len -= num_bytes;
+ offset = 0;
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ if (len && (len + offset) < BUF_LEN) {
+ /* move to single mode */
+ u8 intr_ctl = 0;
+
+ rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+
+ return rc;
+}
+
+#define IMA_REQ_ACCESS (IACS_SLCT | RIF_MEM_ACCESS_REQ)
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+ u16 address, int len, int offset, int op)
+{
+ int rc = 0;
+ bool rif_mem_sts = true;
+ int time_count = 0;
+
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return rc;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+ op ? "write" : "read");
+
+ /*
+ * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not
+ * clear, then return an error instead of waiting for it again.
+ */
+ if (time_count > 4) {
+ pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+
+ /* configure for IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit rc = %d\n", rc);
+ return rc;
+ }
+
+ /* configure for the read/write single/burst mode */
+ rc = fg_config_access(chip, op, (offset + len) > 4);
+ if (rc) {
+ pr_err("failed to set configure memory access rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* write addresses to the register */
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc) {
+ pr_err("failed to set SRAM address rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc)
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+
+ return rc;
+}
+
+#define MEM_INTF_FG_BEAT_COUNT 0x57
+#define BEAT_COUNT_MASK 0x0F
+#define RETRY_COUNT 3
+static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 start_beat_count, end_beat_count, count = 0;
+ bool retry = false;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ if (address < RAM_OFFSET) {
+ /*
+ * OTP memory reads need a conventional memory access, do a
+ * conventional read when SRAM offset < RAM_OFFSET.
+ */
+ rc = fg_conventional_mem_read(chip, val, address, len, offset,
+ 0);
+ if (rc)
+ pr_err("Failed to read OTP memory %d\n", rc);
+ goto exit;
+ }
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
+ if (rc) {
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* read the start beat count */
+ rc = fg_read(chip, &start_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ /* read data */
+ rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to read SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* read the end beat count */
+ rc = fg_read(chip, &end_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ start_beat_count &= BEAT_COUNT_MASK;
+ end_beat_count &= BEAT_COUNT_MASK;
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Start beat_count = %x End beat_count = %x\n",
+ start_beat_count, end_beat_count);
+ if (start_beat_count != end_beat_count) {
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Beat count do not match - retry transaction\n");
+ retry = true;
+ }
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+exit:
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 count = 0;
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
+ if (rc) {
+ pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* write data */
+ rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to write SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ mutex_unlock(&chip->rw_lock);
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_read(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_read(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_write(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_write(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, u8 offset)
+{
+ int rc = 0;
+ u8 reg[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_mem_read(chip, reg, addr, 4, 0, 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ reg[offset] &= ~mask;
+ reg[offset] |= val & mask;
+
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4);
+ pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset);
+
+ rc = fg_mem_write(chip, reg, addr, 4, 0, 0);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int soc_to_setpoint(int soc)
+{
+ return DIV_ROUND_CLOSEST(soc * 255, 100);
+}
+
+static void batt_to_setpoint_adc(int vbatt_mv, u8 *data)
+{
+ int val;
+ /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */
+ val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
+ data[0] = val & 0xFF;
+ data[1] = val >> 8;
+ return;
+}
+
+static u8 batt_to_setpoint_8b(int vbatt_mv)
+{
+ int val;
+ /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */
+ val = (vbatt_mv - 2500) * 512 / 1000;
+ return DIV_ROUND_CLOSEST(val, 5);
+}
+
+static u8 therm_delay_to_setpoint(u32 delay_us)
+{
+ u8 val;
+
+ if (delay_us < 2560)
+ val = 0;
+ else if (delay_us > 163840)
+ val = 7;
+ else
+ val = ilog2(delay_us / 10) - 7;
+ return val << 5;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define BATTERY_SOC_REG 0x56C
+#define BATTERY_SOC_OFFSET 1
+#define FULL_PERCENT_3B 0xFFFFFF
+static int get_battery_soc_raw(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[3];
+
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ return 0;
+ }
+ return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]);
+}
+
+#define COUNTER_IMPTR_REG 0X558
+#define COUNTER_PULSE_REG 0X55C
+#define SOC_FULL_REG 0x564
+#define COUNTER_IMPTR_OFFSET 2
+#define COUNTER_PULSE_OFFSET 0
+#define SOC_FULL_OFFSET 3
+#define ESR_PULSE_RECONFIG_SOC 0xFFF971
+static int fg_configure_soc(struct fg_chip *chip)
+{
+ u32 batt_soc;
+ u8 cntr[2] = {0, 0};
+ int rc = 0;
+
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ /* Read Battery SOC */
+ batt_soc = get_battery_soc_raw(chip);
+
+ if (batt_soc > ESR_PULSE_RECONFIG_SOC) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Configuring soc registers batt_soc: %x\n",
+ batt_soc);
+ batt_soc = ESR_PULSE_RECONFIG_SOC;
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write BATT_SOC rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2,
+ COUNTER_IMPTR_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ }
+out:
+ fg_release_access_if_necessary(chip);
+ return rc;
+}
+
+#define SOC_EMPTY BIT(3)
+static bool fg_is_batt_empty(struct fg_chip *chip)
+{
+ u8 fg_soc_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_EMPTY) != 0;
+}
+
+static int get_monotonic_soc_raw(struct fg_chip *chip)
+{
+ u8 cap[2];
+ int rc, tries = 0;
+
+ while (tries < MAX_TRIES_SOC) {
+ rc = fg_read(chip, cap,
+ chip->soc_base + SOC_MONOTONIC_SOC, 2);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ chip->soc_base + SOC_MONOTONIC_SOC, rc);
+ return rc;
+ }
+
+ if (cap[0] == cap[1])
+ break;
+
+ tries++;
+ }
+
+ if (tries == MAX_TRIES_SOC) {
+ pr_err("shadow registers do not match\n");
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("raw: 0x%02x\n", cap[0]);
+ return cap[0];
+}
+
+#define EMPTY_CAPACITY 0
+#define DEFAULT_CAPACITY 50
+#define MISSING_CAPACITY 100
+#define FULL_CAPACITY 100
+#define FULL_SOC_RAW 0xFF
+static int get_prop_capacity(struct fg_chip *chip)
+{
+ int msoc;
+
+ if (chip->battery_missing)
+ return MISSING_CAPACITY;
+ if (!chip->profile_loaded && !chip->use_otp_profile)
+ return DEFAULT_CAPACITY;
+ if (chip->charge_full)
+ return FULL_CAPACITY;
+ if (chip->soc_empty) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("capacity: %d, EMPTY\n",
+ EMPTY_CAPACITY);
+ return EMPTY_CAPACITY;
+ }
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0)
+ return EMPTY_CAPACITY;
+ else if (msoc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+}
+
+#define HIGH_BIAS 3
+#define MED_BIAS BIT(1)
+#define LOW_BIAS BIT(0)
+static u8 bias_ua[] = {
+ [HIGH_BIAS] = 150,
+ [MED_BIAS] = 15,
+ [LOW_BIAS] = 5,
+};
+
+static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info)
+{
+ u64 battery_id_ohm;
+
+ if ((bid_info & 0x3) == 0) {
+ pr_err("can't determine battery id 0x%02x\n", bid_info);
+ return -EINVAL;
+ }
+
+ battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]);
+
+ return battery_id_ohm;
+}
+
+#define DEFAULT_TEMP_DEGC 250
+static int get_sram_prop_now(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d value %d\n",
+ fg_data[type].address, fg_data[type].offset,
+ fg_data[type].value);
+
+ if (type == FG_DATA_BATT_ID)
+ return get_batt_id(fg_data[type].value,
+ fg_data[FG_DATA_BATT_ID_INFO].value);
+
+ return fg_data[type].value;
+}
+
+#define MIN_TEMP_DEGC -300
+#define MAX_TEMP_DEGC 970
+static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d\n", settings[type].address,
+ settings[type].offset);
+
+ return settings[type].value;
+}
+
+static int set_prop_jeita_temp(struct fg_chip *chip,
+ unsigned int type, int decidegc)
+{
+ int rc = 0;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d temp%d\n",
+ settings[type].address,
+ settings[type].offset, decidegc);
+
+ settings[type].value = decidegc;
+
+ cancel_delayed_work_sync(
+ &chip->update_jeita_setting);
+ schedule_delayed_work(
+ &chip->update_jeita_setting, 0);
+
+ return rc;
+}
+
+#define EXTERNAL_SENSE_SELECT 0x4AC
+#define EXTERNAL_SENSE_OFFSET 0x2
+#define EXTERNAL_SENSE_BIT BIT(2)
+static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ EXTERNAL_SENSE_BIT,
+ ext_sense_type ? EXTERNAL_SENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define EXPONENT_MASK 0xF800
+#define MANTISSA_MASK 0x3FF
+#define SIGN BIT(10)
+#define EXPONENT_SHIFT 11
+#define MICRO_UNIT 1000000ULL
+static int64_t float_decode(u16 reg)
+{
+ int64_t final_val, exponent_val, mantissa_val;
+ int exponent, mantissa, n;
+ bool sign;
+
+ exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT;
+ mantissa = (reg & MANTISSA_MASK);
+ sign = !!(reg & SIGN);
+
+ pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign);
+
+ mantissa_val = mantissa * MICRO_UNIT;
+
+ n = exponent - 15;
+ if (n < 0)
+ exponent_val = MICRO_UNIT >> -n;
+ else
+ exponent_val = MICRO_UNIT << n;
+
+ n = n - 10;
+ if (n < 0)
+ mantissa_val >>= -n;
+ else
+ mantissa_val <<= n;
+
+ final_val = exponent_val + mantissa_val;
+
+ if (sign)
+ final_val *= -1;
+
+ return final_val;
+}
+
+#define MIN_HALFFLOAT_EXP_N -15
+#define MAX_HALFFLOAT_EXP_N 16
+static int log2_floor(int64_t uval)
+{
+ int n = 0;
+ int64_t i = MICRO_UNIT;
+
+ if (uval > i) {
+ while (uval > i && n > MIN_HALFFLOAT_EXP_N) {
+ i <<= 1;
+ n += 1;
+ }
+ if (uval < i)
+ n -= 1;
+ } else if (uval < i) {
+ while (uval < i && n < MAX_HALFFLOAT_EXP_N) {
+ i >>= 1;
+ n -= 1;
+ }
+ }
+
+ return n;
+}
+
+static int64_t exp2_int(int64_t n)
+{
+ int p = n - 1;
+
+ if (p > 0)
+ return (2 * MICRO_UNIT) << p;
+ else
+ return (2 * MICRO_UNIT) >> abs(p);
+}
+
+static u16 float_encode(int64_t uval)
+{
+ int sign = 0, n, exp, mantissa;
+ u16 half = 0;
+
+ if (uval < 0) {
+ sign = 1;
+ uval = abs(uval);
+ }
+ n = log2_floor(uval);
+ exp = n + 15;
+ mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n),
+ MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT);
+
+ half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN)
+ | ((exp << 11) & EXPONENT_MASK);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n",
+ uval, mantissa, sign, exp, half);
+ return half;
+}
+
+#define BATT_IDED BIT(3)
+static int fg_is_batt_id_valid(struct fg_chip *chip)
+{
+ u8 fg_batt_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+
+ return (fg_batt_sts & BATT_IDED) ? 1 : 0;
+}
+
+static int64_t twos_compliment_extend(int64_t val, int nbytes)
+{
+ int i;
+ int64_t mask;
+
+ mask = 0x80LL << ((nbytes - 1) * 8);
+ if (val & mask) {
+ for (i = 8; i > nbytes; i--) {
+ mask = 0xFFLL << ((i - 1) * 8);
+ val |= mask;
+ }
+ }
+
+ return val;
+}
+
+#define LSB_24B_NUMRTR 596046
+#define LSB_24B_DENMTR 1000000
+#define LSB_16B_NUMRTR 152587
+#define LSB_16B_DENMTR 1000
+#define LSB_8B 9800
+#define TEMP_LSB_16B 625
+#define DECIKELVIN 2730
+#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
+#define FULL_PERCENT_28BIT 0xFFFFFFF
+static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+{
+ int i, j, rc = 0;
+ u8 reg[4];
+ int64_t temp;
+ int battid_valid = fg_is_batt_id_valid(chip);
+
+ fg_stay_awake(&chip->update_sram_wakeup_source);
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_mem_lock(chip);
+ for (i = 1; i < FG_DATA_MAX; i++) {
+ if (chip->profile_loaded && i >= FG_DATA_BATT_ID)
+ continue;
+ rc = fg_mem_read(chip, reg, fg_data[i].address,
+ fg_data[i].len, fg_data[i].offset, 0);
+ if (rc) {
+ pr_err("Failed to update sram data\n");
+ break;
+ }
+
+ temp = 0;
+ for (j = 0; j < fg_data[i].len; j++)
+ temp |= reg[j] << (8 * j);
+
+ switch (i) {
+ case FG_DATA_OCV:
+ case FG_DATA_VOLTAGE:
+ case FG_DATA_CPRED_VOLTAGE:
+ fg_data[i].value = div_u64(
+ (u64)(u16)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_CURRENT:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div_s64(
+ (s64)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_BATT_ESR:
+ fg_data[i].value = float_decode((u16) temp);
+ break;
+ case FG_DATA_BATT_ESR_COUNT:
+ fg_data[i].value = (u16)temp;
+ break;
+ case FG_DATA_BATT_ID:
+ if (battid_valid)
+ fg_data[i].value = reg[0] * LSB_8B;
+ break;
+ case FG_DATA_BATT_ID_INFO:
+ if (battid_valid)
+ fg_data[i].value = reg[0];
+ break;
+ case FG_DATA_BATT_SOC:
+ fg_data[i].value = div64_s64((temp * 10000),
+ FULL_PERCENT_3B);
+ break;
+ case FG_DATA_CC_CHARGE:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(
+ temp * (int64_t)chip->nom_cap_uah,
+ FULL_PERCENT_28BIT);
+ break;
+ case FG_DATA_VINT_ERR:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(temp * chip->nom_cap_uah,
+ FULL_PERCENT_3B);
+ break;
+ };
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("%d %lld %d\n", i, temp, fg_data[i].value);
+ }
+ fg_mem_release(chip);
+
+ if (!rc)
+ get_current_time(&chip->last_sram_update_time);
+
+resched:
+ if (battid_valid) {
+ complete_all(&chip->batt_id_avail);
+ *resched_ms = fg_sram_update_period_ms;
+ } else {
+ *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
+ }
+ fg_relax(&chip->update_sram_wakeup_source);
+}
+
+#define SRAM_TIMEOUT_MS 3000
+static void update_sram_data_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_sram_data.work);
+ int resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS, ret;
+ bool tried_again = false;
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(SRAM_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ update_sram_data(chip, &resched_ms);
+
+out:
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
+}
+
+#define BATT_TEMP_OFFSET 3
+#define BATT_TEMP_CNTRL_MASK 0x17
+#define DISABLE_THERM_BIT BIT(0)
+#define TEMP_SENSE_ALWAYS_BIT BIT(1)
+#define TEMP_SENSE_CHARGE_BIT BIT(2)
+#define FORCE_RBIAS_ON_BIT BIT(4)
+#define BATT_TEMP_OFF DISABLE_THERM_BIT
+#define BATT_TEMP_ON (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \
+ TEMP_SENSE_CHARGE_BIT)
+#define TEMP_PERIOD_UPDATE_MS 10000
+#define TEMP_PERIOD_TIMEOUT_MS 3000
+static void update_temp_data(struct work_struct *work)
+{
+ s16 temp;
+ u8 reg[2];
+ bool tried_again = false;
+ int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_temp_work.work);
+
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_stay_awake(&chip->update_temp_wakeup_source);
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_ON,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc);
+ goto out;
+ }
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(timeout));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Read FG_DATA_BATT_TEMP now */
+ rc = fg_mem_read(chip, reg, fg_data[0].address,
+ fg_data[0].len, fg_data[0].offset,
+ chip->sw_rbias_ctrl ? 1 : 0);
+ if (rc) {
+ pr_err("Failed to update temp data\n");
+ goto out;
+ }
+
+ temp = reg[0] | (reg[1] << 8);
+ fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
+ - DECIKELVIN;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
+
+ get_current_time(&chip->last_temp_update_time);
+
+out:
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_OFF,
+ BATT_TEMP_OFFSET);
+ if (rc)
+ pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc);
+ }
+ fg_relax(&chip->update_temp_wakeup_source);
+
+resched:
+ schedule_delayed_work(
+ &chip->update_temp_work,
+ msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS));
+}
+
+static void update_jeita_setting(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_jeita_setting.work);
+ u8 reg[4];
+ int i, rc;
+
+ for (i = 0; i < 4; i++)
+ reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30;
+
+ rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address,
+ 4, settings[FG_MEM_SOFT_COLD].offset, 0);
+ if (rc)
+ pr_err("failed to update JEITA setting rc=%d\n", rc);
+}
+
+static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold)
+{
+ u16 address;
+ int offset, rc;
+
+ address = settings[FG_MEM_RESUME_SOC].address;
+ offset = settings[FG_MEM_RESUME_SOC].offset;
+
+ rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset);
+
+ if (rc)
+ pr_err("write failed rc=%d\n", rc);
+ else
+ pr_debug("setting resume-soc to %x\n", threshold);
+
+ return rc;
+}
+
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (!rc)
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+ return rc;
+}
+
+#define BATT_CYCLE_NUMBER_REG 0x5E8
+#define BATT_CYCLE_OFFSET 0
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i, address;
+ u8 data[2];
+
+ fg_mem_lock(chip);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ address = BATT_CYCLE_NUMBER_REG + i * 2;
+ rc = fg_mem_read(chip, (u8 *)&data, address, 2,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n",
+ i, rc);
+ else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+ }
+ fg_mem_release(chip);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, len, i;
+
+ if (!chip->cyc_ctr.en)
+ return;
+
+ len = sizeof(chip->cyc_ctr.count);
+ memset(chip->cyc_ctr.count, 0, len);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ chip->cyc_ctr.started[i] = false;
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count,
+ BATT_CYCLE_NUMBER_REG, len,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+ int rc = 0, address;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+ return 0;
+
+ cyc_count = chip->cyc_ctr.count[bucket];
+ cyc_count++;
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ address = BATT_CYCLE_NUMBER_REG + bucket * 2;
+
+ rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n",
+ bucket, rc);
+ else
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+}
+
+static void update_cycle_count(struct work_struct *work)
+{
+ int rc = 0, bucket, i;
+ u8 reg[3], batt_soc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ cycle_count_work);
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read battery soc rc: %d\n", rc);
+ goto out;
+ }
+ batt_soc = reg[2];
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket);
+
+ /*
+ * If we've started counting for the previous bucket,
+ * then store the counter for that bucket if the
+ * counter for current bucket is getting started.
+ */
+ if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+ !chip->cyc_ctr.started[bucket]) {
+ rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+ if (rc) {
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ goto out;
+ } else {
+ chip->cyc_ctr.started[bucket - 1] = false;
+ chip->cyc_ctr.last_soc[bucket - 1] = 0;
+ }
+ }
+ if (!chip->cyc_ctr.started[bucket]) {
+ chip->cyc_ctr.started[bucket] = true;
+ chip->cyc_ctr.last_soc[bucket] = batt_soc;
+ }
+ } else {
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ if (chip->cyc_ctr.started[i] &&
+ batt_soc > chip->cyc_ctr.last_soc[i]) {
+ rc = fg_inc_store_cycle_ctr(chip, i);
+ if (rc)
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ chip->cyc_ctr.started[i] = false;
+ }
+ }
+out:
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+ int count;
+
+ if (!chip->cyc_ctr.en)
+ return 0;
+
+ if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return count;
+}
+
+static void half_float_to_buffer(int64_t uval, u8 *buffer)
+{
+ u16 raw;
+
+ raw = float_encode(uval);
+ buffer[0] = (u8)(raw & 0xFF);
+ buffer[1] = (u8)((raw >> 8) & 0xFF);
+}
+
+static int64_t half_float(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return float_decode(val);
+}
+
+static int voltage_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */
+ return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR);
+}
+
+static int bcap_uah_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return ((int)val) * 1000;
+}
+
+static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
+{
+ int64_t *coeffs;
+
+ if (soc > chip->ocv_junction_p1p2 * 10)
+ coeffs = chip->ocv_coeffs;
+ else if (soc > chip->ocv_junction_p2p3 * 10)
+ coeffs = chip->ocv_coeffs + 4;
+ else
+ coeffs = chip->ocv_coeffs + 8;
+ /* the range of ocv will fit in a 32 bit int */
+ return (int)(coeffs[0]
+ + div_s64(coeffs[1] * soc, 1000LL)
+ + div_s64(coeffs[2] * soc * soc, 1000000LL)
+ + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL));
+}
+
+static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv)
+{
+ int64_t val;
+ int soc = -EINVAL;
+ /*
+ * binary search variables representing the valid start and end
+ * percentages to search
+ */
+ int start = 0, end = 1000, mid;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("target_ocv = %d\n", ocv);
+ /* do a binary search for the closest soc to match the ocv */
+ while (end - start > 1) {
+ mid = (start + end) / 2;
+ val = lookup_ocv_for_soc(chip, mid);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n",
+ start, mid, end, val);
+ if (ocv < val) {
+ end = mid;
+ } else if (ocv > val) {
+ start = mid;
+ } else {
+ soc = mid;
+ break;
+ }
+ }
+ /*
+ * if the exact soc was not found and there are two or less values
+ * remaining, just compare them and see which one is closest to the ocv
+ */
+ if (soc == -EINVAL) {
+ if (abs(ocv - lookup_ocv_for_soc(chip, start))
+ > abs(ocv - lookup_ocv_for_soc(chip, end)))
+ soc = end;
+ else
+ soc = start;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n",
+ soc, ocv, lookup_ocv_for_soc(chip, soc));
+ return soc;
+}
+
+#define ESR_ACTUAL_REG 0x554
+#define BATTERY_ESR_REG 0x4F4
+#define TEMP_RS_TO_RSLOW_REG 0x514
+static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
+{
+ int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
+ int64_t esr_actual, battery_esr, val;
+ int soc_cutoff_aged, soc_cutoff_new, rc;
+ int battery_soc, unusable_soc, batt_temp;
+ u8 buffer[3];
+
+ if (chip->batt_aging_mode != FG_AGING_ESR)
+ return 0;
+
+ if (chip->nom_cap_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv coefficients not loaded, aborting\n");
+ return 0;
+ }
+ fg_mem_lock(chip);
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (batt_temp < 150 || batt_temp > 400) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery temp (%d) out of range, aborting\n",
+ (int)batt_temp);
+ rc = 0;
+ goto done;
+ }
+
+ battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
+ if (battery_soc < 25 || battery_soc > 75) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery SoC (%d) out of range, aborting\n",
+ (int)battery_soc);
+ rc = 0;
+ goto done;
+ }
+
+ rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
+ esr_actual = half_float(buffer);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ battery_esr = half_float(buffer);
+
+ if (rc) {
+ goto error_done;
+ } else if (esr_actual < battery_esr) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Batt ESR lower than ESR actual, aborting\n");
+ rc = 0;
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0);
+ temp_rs_to_rslow = half_float(buffer);
+
+ if (rc)
+ goto error_done;
+
+ fg_mem_release(chip);
+
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n",
+ battery_soc, chip->cutoff_voltage,
+ chip->evaluation_current);
+ pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n",
+ temp_rs_to_rslow, battery_esr, esr_actual);
+ }
+
+ /* calculate soc_cutoff_new */
+ val = (1000000LL + temp_rs_to_rslow) * battery_esr;
+ do_div(val, 1000000);
+ ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ /* calculate soc_cutoff_aged */
+ val = (1000000LL + temp_rs_to_rslow) * esr_actual;
+ do_div(val, 1000000);
+ ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n",
+ ocv_cutoff_new, ocv_cutoff_aged);
+
+ soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new);
+ soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged);
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("aged soc = %d, new soc = %d\n",
+ soc_cutoff_aged, soc_cutoff_new);
+ unusable_soc = soc_cutoff_aged - soc_cutoff_new;
+
+ *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah)
+ * (1000 - unusable_soc), 1000);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("nom cap = %d, actual cap = %d\n",
+ chip->nom_cap_uah, *actual_capacity);
+
+ return rc;
+
+error_done:
+ pr_err("some register reads failed: %d\n", rc);
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+static void battery_age_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ battery_age_work);
+
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ val->intval = !!chip->bcl_lpm_disabled;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int correction_times[] = {
+ 1470,
+ 2940,
+ 4410,
+ 5880,
+ 7350,
+ 8820,
+ 10290,
+ 11760,
+ 13230,
+ 14700,
+ 16170,
+ 17640,
+ 19110,
+ 20580,
+ 22050,
+ 23520,
+ 24990,
+ 26460,
+ 27930,
+ 29400,
+ 30870,
+ 32340,
+ 33810,
+ 35280,
+ 36750,
+ 38220,
+ 39690,
+ 41160,
+ 42630,
+ 44100,
+ 45570,
+ 47040,
+};
+
+static int correction_factors[] = {
+ 1000000,
+ 1007874,
+ 1015789,
+ 1023745,
+ 1031742,
+ 1039780,
+ 1047859,
+ 1055979,
+ 1064140,
+ 1072342,
+ 1080584,
+ 1088868,
+ 1097193,
+ 1105558,
+ 1113964,
+ 1122411,
+ 1130899,
+ 1139427,
+ 1147996,
+ 1156606,
+ 1165256,
+ 1173947,
+ 1182678,
+ 1191450,
+ 1200263,
+ 1209115,
+ 1218008,
+ 1226942,
+ 1235915,
+ 1244929,
+ 1253983,
+ 1263076,
+};
+
+#define FG_CONVERSION_FACTOR (64198531LL)
+static int iavg_3b_to_uah(u8 *buffer, int delta_ms)
+{
+ int64_t val, i_filtered;
+ int i, correction_factor;
+
+ for (i = 0; i < ARRAY_SIZE(correction_times); i++) {
+ if (correction_times[i] > delta_ms)
+ break;
+ }
+ if (i >= ARRAY_SIZE(correction_times)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge took more than 32 cycles\n");
+ i = ARRAY_SIZE(correction_times) - 1;
+ }
+ correction_factor = correction_factors[i];
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_ms = %d, cycles = %d, correction = %d\n",
+ delta_ms, i, correction_factor);
+ val = buffer[2] << 16 | buffer[1] << 8 | buffer[0];
+ /* convert val from signed 24b to signed 64b */
+ i_filtered = (val << 40) >> 40;
+ val = i_filtered * correction_factor;
+ val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n",
+ i_filtered, i_filtered, val);
+
+ return val;
+}
+
+static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip)
+{
+ int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (batt_temp > chip->learning_data.max_temp
+ || batt_temp < chip->learning_data.min_temp) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("temp (%d) out of range [%d, %d], aborting\n",
+ batt_temp,
+ chip->learning_data.min_temp,
+ chip->learning_data.max_temp);
+ return false;
+ }
+ return true;
+}
+
+static void fg_cap_learning_stop(struct fg_chip *chip)
+{
+ chip->learning_data.cc_uah = 0;
+ chip->learning_data.active = false;
+}
+
+#define I_FILTERED_REG 0x584
+static void fg_cap_learning_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ fg_cap_learning_work);
+ u8 i_filtered[3], data[3];
+ int rc, cc_uah, delta_ms;
+ ktime_t now_kt, delta_kt;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (!chip->learning_data.active)
+ goto fail;
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ mutex_unlock(&chip->learning_data.learning_lock);
+ fg_relax(&chip->capacity_learning_wakeup_source);
+ return;
+ }
+
+ fg_mem_lock(chip);
+
+ rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to read i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ memset(data, 0, 3);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+
+ now_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp);
+ chip->learning_data.time_stamp = now_kt;
+
+ delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000);
+
+ cc_uah = iavg_3b_to_uah(i_filtered, delta_ms);
+ chip->learning_data.cc_uah -= cc_uah;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return;
+
+}
+
+#define CC_SOC_BASE_REG 0x5BC
+#define CC_SOC_OFFSET 3
+#define CC_SOC_MAGNITUDE_MASK 0x1FFFFFFF
+#define CC_SOC_NEGATIVE_BIT BIT(29)
+static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc)
+{
+ int rc;
+ u8 reg[4];
+ unsigned int temp, magnitude;
+
+ rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read CC_SOC_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
+ magnitude = temp & CC_SOC_MAGNITUDE_MASK;
+ if (temp & CC_SOC_NEGATIVE_BIT)
+ *cc_soc = -1 * (~magnitude + 1);
+ else
+ *cc_soc = magnitude;
+
+ return 0;
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int cc_pc_val, rc = -EINVAL;
+ unsigned int cc_soc_delta_pc;
+ int64_t delta_cc_uah;
+
+ if (!chip->learning_data.active)
+ goto fail;
+
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ rc = fg_get_cc_soc(chip, &cc_pc_val);
+ if (rc) {
+ pr_err("failed to get CC_SOC, stopping capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST(
+ abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
+ * 100, FULL_PERCENT_28BIT);
+
+ delta_cc_uah = div64_s64(
+ chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
+ 100);
+ chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.cc_uah);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+#define FG_CAP_LEARNING_INTERVAL_NS 30000000000
+static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm, struct fg_chip,
+ fg_cap_learning_alarm);
+
+ if (chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm fired\n");
+ schedule_work(&chip->fg_cap_learning_work);
+ alarm_forward_now(alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ return ALARMTIMER_RESTART;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm misfired\n");
+ return ALARMTIMER_NORESTART;
+}
+
+#define FG_AGING_STORAGE_REG 0x5E4
+#define ACTUAL_CAPACITY_REG 0x578
+#define MAH_TO_SOC_CONV_REG 0x4A0
+#define CC_SOC_COEFF_OFFSET 0
+#define ACTUAL_CAPACITY_OFFSET 2
+#define MAH_TO_SOC_CONV_CS_OFFSET 0
+static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah)
+{
+ int rc;
+ int64_t cc_to_soc_coeff, mah_to_soc;
+ u8 data[2];
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2,
+ ACTUAL_CAPACITY_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to store actual capacity: %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2,
+ MAH_TO_SOC_CONV_CS_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc);
+ } else {
+ mah_to_soc = data[1] << 8 | data[0];
+ mah_to_soc *= MICRO_UNIT;
+ cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah);
+ half_float_to_buffer(cc_to_soc_coeff, data);
+ rc = fg_mem_write(chip, (u8 *)data,
+ ACTUAL_CAPACITY_REG, 2,
+ CC_SOC_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to write cc_soc_coeff_offset: %d\n",
+ rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n",
+ cc_to_soc_coeff, data[0], data[1]);
+ }
+ return rc;
+}
+
+static void fg_cap_learning_load_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int64_t old_cap = chip->learning_data.learned_cc_uah;
+ int rc;
+
+ rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to load aged capacity: %d\n", rc);
+ } else {
+ chip->learning_data.learned_cc_uah = cc_mah * 1000;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld-> %lld/%x uah\n",
+ old_cap,
+ chip->learning_data.learned_cc_uah,
+ cc_mah);
+ }
+}
+
+static void fg_cap_learning_save_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc)
+ pr_err("Failed to store aged capacity: %d\n", rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n",
+ chip->learning_data.learned_cc_uah,
+ cc_mah, cc_mah);
+
+ if (chip->learning_data.feedback_on) {
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc);
+ }
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+
+ max_inc_val = chip->learning_data.learned_cc_uah
+ * (1000 + chip->learning_data.max_increment);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->learning_data.learned_cc_uah
+ * (1000 - chip->learning_data.max_decrement);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->learning_data.learned_cc_uah;
+ if (chip->learning_data.cc_uah > max_inc_val)
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ else if (chip->learning_data.cc_uah < min_dec_val)
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ else
+ chip->learning_data.learned_cc_uah =
+ chip->learning_data.cc_uah;
+
+ fg_cap_learning_save_data(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->learning_data.cc_uah,
+ old_cap, chip->learning_data.learned_cc_uah);
+}
+
+static int get_vbat_est_diff(struct fg_chip *chip)
+{
+ return abs(fg_data[FG_DATA_VOLTAGE].value
+ - fg_data[FG_DATA_CPRED_VOLTAGE].value);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define IBATTF_TAU_MASK 0x38
+#define IBATTF_TAU_99_S 0x30
+static int fg_cap_learning_check(struct fg_chip *chip)
+{
+ u8 data[4];
+ int rc = 0, battery_soc, cc_pc_val;
+ int vbat_est_diff, vbat_est_thr_uv;
+ unsigned int cc_pc_100 = FULL_PERCENT_28BIT;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING
+ && !chip->learning_data.active
+ && chip->batt_aging_mode == FG_AGING_CC) {
+ if (chip->learning_data.learned_cc_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("no capacity, aborting\n");
+ goto fail;
+ }
+
+ if (!fg_is_temperature_ok_for_learning(chip))
+ goto fail;
+
+ fg_mem_lock(chip);
+ if (!chip->learning_data.feedback_on) {
+ vbat_est_diff = get_vbat_est_diff(chip);
+ vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
+ if (vbat_est_diff >= vbat_est_thr_uv &&
+ vbat_est_thr_uv > 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("vbat_est_diff (%d) < threshold (%d)\n",
+ vbat_est_diff, vbat_est_thr_uv);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ }
+ battery_soc = get_battery_soc_raw(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("checking battery soc (%d vs %d)\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ /* check if the battery is low enough to start soc learning */
+ if (battery_soc * 100 / FULL_PERCENT_3B
+ > chip->learning_data.max_start_soc) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery soc too low (%d < %d), aborting\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* set the coulomb counter to a percentage of the capacity */
+ chip->learning_data.cc_uah = div64_s64(
+ (chip->learning_data.learned_cc_uah * battery_soc),
+ FULL_PERCENT_3B);
+
+ /* Use CC_SOC_REG based capacity learning */
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ fg_mem_release(chip);
+ /* SW_CC_SOC based capacity learning */
+ if (fg_get_cc_soc(chip, &cc_pc_val)) {
+ pr_err("failed to get CC_SOC, stop capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ chip->learning_data.init_cc_pc_val = cc_pc_val;
+ chip->learning_data.active = true;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n",
+ chip->learning_data.init_cc_pc_val);
+ } else {
+ rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG,
+ IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0);
+ if (rc) {
+ pr_err("Failed to write IF IBAT Tau: %d\n",
+ rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* clear the i_filtered register */
+ memset(data, 0, 4);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+ chip->learning_data.time_stamp = ktime_get_boottime();
+ chip->learning_data.active = true;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cap learning started, soc = %d cc_uah = %lld\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.cc_uah);
+ alarm_start_relative(&chip->fg_cap_learning_alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ }
+ } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING)
+ && chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("capacity learning stopped\n");
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ /* reset SW_CC_SOC register to 100% */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ }
+ fg_cap_learning_post_process(chip);
+ }
+
+ fg_cap_learning_stop(chip);
+ }
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return rc;
+}
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
+static bool is_otg_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_USB_OTG, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_charger_available(struct fg_chip *chip)
+{
+ if (!chip->batt_psy_name)
+ return false;
+
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static int set_prop_enable_charging(struct fg_chip *chip, bool enable)
+{
+ int rc = 0;
+ union power_supply_propval ret = {enable, };
+
+ if (!is_charger_available(chip)) {
+ pr_err("Charger not available yet!\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ &ret);
+ if (rc) {
+ pr_err("couldn't configure batt chg %d\n", rc);
+ return rc;
+ }
+
+ chip->charging_disabled = !enable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("%sabling charging\n", enable ? "en" : "dis");
+
+ return rc;
+}
+
+#define MAX_BATTERY_CC_SOC_CAPACITY 150
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ status_change_work);
+ unsigned long current_time = 0;
+ int cc_soc, rc, capacity = get_prop_capacity(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (capacity >= 99 && chip->hold_soc_while_full
+ && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("holding soc at 100\n");
+ chip->charge_full = true;
+ } else if (fg_debug_mask & FG_STATUS) {
+ pr_info("terminated charging at %d/0x%02x\n",
+ capacity, get_monotonic_soc_raw(chip));
+ }
+ }
+ if (chip->status == POWER_SUPPLY_STATUS_FULL ||
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
+ fg_configure_soc(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ fg_cap_learning_check(chip);
+ schedule_work(&chip->update_esr_work);
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ if (fg_get_cc_soc(chip, &cc_soc)) {
+ pr_err("failed to get CC_SOC\n");
+ return;
+ }
+ }
+
+ if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ get_current_time(&current_time);
+ /*
+ * When charging status changes, update SRAM parameters if it
+ * was not updated before 5 seconds from now.
+ */
+ if (chip->last_sram_update_time + 5 < current_time) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ }
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ if ((chip->wa_flag & USE_CC_SOC_REG) &&
+ chip->bad_batt_detection_en &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ chip->sw_cc_soc_data.init_sys_soc = capacity;
+ chip->sw_cc_soc_data.init_cc_soc = cc_soc;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info(" Init_sys_soc %d init_cc_soc %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc);
+ }
+ }
+ if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
+ && chip->safety_timer_expired) {
+ chip->sw_cc_soc_data.delta_soc =
+ DIV_ROUND_CLOSEST(abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc)
+ * 100, FULL_PERCENT_28BIT);
+ chip->sw_cc_soc_data.full_capacity =
+ chip->sw_cc_soc_data.delta_soc +
+ chip->sw_cc_soc_data.init_sys_soc;
+ pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc, cc_soc,
+ chip->sw_cc_soc_data.delta_soc,
+ chip->sw_cc_soc_data.full_capacity);
+ /*
+ * If sw_cc_soc capacity greater than 150, then it's a bad
+ * battery. else, reset timer and restart charging.
+ */
+ if (chip->sw_cc_soc_data.full_capacity >
+ MAX_BATTERY_CC_SOC_CAPACITY) {
+ pr_info("Battery possibly damaged, do not restart charging\n");
+ } else {
+ pr_info("Reset safety-timer and restart charging\n");
+ rc = set_prop_enable_charging(chip, false);
+ if (rc) {
+ pr_err("failed to disable charging %d\n", rc);
+ return;
+ }
+
+ chip->safety_timer_expired = false;
+ msleep(200);
+
+ rc = set_prop_enable_charging(chip, true);
+ if (rc) {
+ pr_err("failed to enable charging %d\n", rc);
+ return;
+ }
+ }
+ }
+}
+
+/*
+ * Check for change in the status of input or OTG and schedule
+ * IADC gain compensation work.
+ */
+static void check_gain_compensation(struct fg_chip *chip)
+{
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if ((chip->wa_flag & IADC_GAIN_COMP_WA)
+ && ((chip->input_present ^ input_present)
+ || (chip->otg_present ^ otg_present))) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ chip->input_present = input_present;
+ chip->otg_present = otg_present;
+ cancel_work_sync(&chip->gain_comp_work);
+ schedule_work(&chip->gain_comp_work);
+ }
+}
+
+static void fg_hysteresis_config(struct fg_chip *chip)
+{
+ int hard_hot = 0, hard_cold = 0;
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) {
+ /* turn down the hard hot threshold */
+ chip->batt_hot = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n",
+ hard_hot, hard_hot - chip->hot_hysteresis);
+ } else if (chip->health == POWER_SUPPLY_HEALTH_COLD &&
+ !chip->batt_cold) {
+ /* turn up the hard cold threshold */
+ chip->batt_cold = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n",
+ hard_cold, hard_cold + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ chip->batt_hot) {
+ /* restore the hard hot threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot + chip->hot_hysteresis);
+ chip->batt_hot = !chip->batt_hot;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n",
+ hard_hot,
+ hard_hot + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_COLD &&
+ chip->batt_cold) {
+ /* restore the hard cold threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold - chip->cold_hysteresis);
+ chip->batt_cold = !chip->batt_cold;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n",
+ hard_cold,
+ hard_cold - chip->cold_hysteresis);
+ }
+}
+
+#define BATT_INFO_STS(base) (base + 0x09)
+#define JEITA_HARD_HOT_RT_STS BIT(6)
+#define JEITA_HARD_COLD_RT_STS BIT(5)
+static int fg_init_batt_temp_state(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 batt_info_sts;
+ int hard_hot = 0, hard_cold = 0;
+
+ /*
+ * read the batt_info_sts register to parse battery's
+ * initial status and do hysteresis config accordingly.
+ */
+ rc = fg_read(chip, &batt_info_sts,
+ BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("failed to read batt info sts, rc=%d\n", rc);
+ return rc;
+ }
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ chip->batt_hot =
+ (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false;
+ chip->batt_cold =
+ (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false;
+ if (chip->batt_hot || chip->batt_cold) {
+ if (chip->batt_hot) {
+ chip->health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ } else {
+ chip->health = POWER_SUPPLY_HEALTH_COLD;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ }
+ }
+
+ return rc;
+}
+
+static int fg_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+ int rc = 0, unused;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ if (val->intval)
+ update_sram_data(chip, &unused);
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ chip->prev_status = chip->status;
+ chip->status = val->intval;
+ schedule_work(&chip->status_change_work);
+ check_gain_compensation(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ chip->health = val->intval;
+ if (chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+
+ if (chip->jeita_hysteresis_support)
+ fg_hysteresis_config(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ chip->charge_done = val->intval;
+ if (!chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) {
+ chip->cyc_ctr.id = val->intval;
+ } else {
+ pr_err("rejecting invalid cycle_count_id = %d\n",
+ val->intval);
+ rc = -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED:
+ chip->safety_timer_expired = val->intval;
+ schedule_work(&chip->status_change_work);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) {
+ chip->bcl_lpm_disabled = !!val->intval;
+ schedule_work(&chip->bcl_hi_power_work);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return rc;
+};
+
+static int fg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define SRAM_DUMP_START 0x400
+#define SRAM_DUMP_LEN 0x200
+static void dump_sram(struct work_struct *work)
+{
+ int i, rc;
+ u8 *buffer, rt_sts;
+ char str[16];
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ dump_sram);
+
+ buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL);
+ if (buffer == NULL) {
+ pr_err("Can't allocate buffer\n");
+ return;
+ }
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ else
+ pr_info("soc rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ pr_info("batt rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->mem_base), rc);
+ else
+ pr_info("memif rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < SRAM_DUMP_LEN; i += 4) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4);
+ pr_info("%03X %s\n", SRAM_DUMP_START + i, str);
+ }
+ devm_kfree(chip->dev, buffer);
+}
+
+#define MAXRSCHANGE_REG 0x434
+#define ESR_VALUE_OFFSET 1
+#define ESR_STRICT_VALUE 0x4120391F391F3019
+#define ESR_DEFAULT_VALUE 0x58CD4A6761C34A67
+static void update_esr_value(struct work_struct *work)
+{
+ union power_supply_propval prop = {0, };
+ u64 esr_value;
+ int rc = 0;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_esr_work);
+
+ if (!is_charger_available(chip))
+ return;
+
+ power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+
+ if (!chip->esr_strict_filter) {
+ if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_FULL)) {
+ esr_value = ESR_STRICT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value,
+ MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write strict ESR value rc=%d\n",
+ rc);
+ else
+ chip->esr_strict_filter = true;
+ }
+ } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) {
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ chip->esr_strict_filter = false;
+ }
+}
+
+#define TEMP_COUNTER_REG 0x580
+#define VBAT_FILTERED_OFFSET 1
+#define GAIN_REG 0x424
+#define GAIN_OFFSET 1
+#define K_VCOR_REG 0x484
+#define DEF_GAIN_OFFSET 2
+#define PICO_UNIT 0xE8D4A51000LL
+#define ATTO_UNIT 0xDE0B6B3A7640000LL
+#define VBAT_REF 3800000
+
+/*
+ * IADC Gain compensation steps:
+ * If Input/OTG absent:
+ * - read VBAT_FILTERED, KVCOR, GAIN
+ * - calculate the gain compensation using following formula:
+ * gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1;
+ * else
+ * - reset to the default gain compensation
+ */
+static void iadc_gain_comp_work(struct work_struct *work)
+{
+ u8 reg[4];
+ int rc;
+ uint64_t vbat_filtered;
+ int64_t gain, kvcor, temp, numerator;
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ gain_comp_work);
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if (!chip->init_done)
+ goto done;
+
+ if (!input_present && !otg_present) {
+ /* read VBAT_FILTERED */
+ rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3,
+ VBAT_FILTERED_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read VBAT: rc=%d\n", rc);
+ goto done;
+ }
+ temp = (reg[2] << 16) | (reg[1] << 8) | reg[0];
+ vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR,
+ LSB_24B_DENMTR);
+
+ /* read K_VCOR */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to KVCOR rc=%d\n", rc);
+ goto done;
+ }
+ kvcor = half_float(reg);
+
+ /* calculate gain */
+ numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain)
+ * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF))
+ - ATTO_UNIT;
+ gain = div64_s64(numerator, PICO_UNIT);
+
+ /* write back gain */
+ half_float_to_buffer(gain, reg);
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain reg rc=%d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]);
+ chip->iadc_comp_data.gain_active = true;
+ } else {
+ /* reset gain register */
+ rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg,
+ GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write gain comp: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain reset [%x %x]\n",
+ chip->iadc_comp_data.dfl_gain_reg[1],
+ chip->iadc_comp_data.dfl_gain_reg[0]);
+ chip->iadc_comp_data.gain_active = false;
+ }
+
+done:
+ fg_relax(&chip->gain_comp_wakeup_source);
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
+#define SOC_FIRST_EST_DONE BIT(5)
+static bool is_first_est_done(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_soc_sts;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
+}
+
+static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ int rc;
+ bool vbatt_low_sts;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("vbatt-low triggered\n");
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ goto out;
+ }
+ if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("disabling vbatt_low irq\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->batt_type = default_batt_type;
+ mutex_lock(&chip->cyc_ctr.lock);
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("battery missing, clearing cycle counters\n");
+ clear_cycle_counter(chip);
+ mutex_unlock(&chip->cyc_ctr.lock);
+ } else {
+ if (!chip->use_otp_profile) {
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->first_soc_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ } else {
+ chip->battery_missing = false;
+ }
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("batt-missing triggered: %s\n",
+ batt_missing ? "missing" : "present");
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 mem_if_sts;
+ int rc;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fg_check_sram_access(chip)) {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access granted\n");
+ reinit_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_granted);
+ } else {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access revoked\n");
+ complete_all(&chip->sram_access_revoked);
+ }
+
+ if (!rc && (fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("mem_if sts 0x%02x\n", mem_if_sts);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+
+ schedule_work(&chip->battery_age_work);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ if (chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ schedule_work(&chip->update_esr_work);
+ if (chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+ if (chip->wa_flag & IADC_GAIN_COMP_WA
+ && chip->iadc_comp_data.gain_active) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ schedule_work(&chip->gain_comp_work);
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG
+ && chip->learning_data.active) {
+ fg_stay_awake(&chip->capacity_learning_wakeup_source);
+ schedule_work(&chip->fg_cap_learning_work);
+ }
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define FG_EMPTY_DEBOUNCE_MS 1500
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (fg_is_batt_empty(chip)) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ chip->soc_empty = false;
+ }
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered\n");
+
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ complete_all(&chip->first_soc_done);
+
+ return IRQ_HANDLED;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+ struct fg_chip *chip = power_supply_get_drvdata(psy);
+
+ if (is_input_present(chip) && chip->rslow_comp.active &&
+ chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (!is_input_present(chip) && chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ if (!is_input_present(chip) && chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+}
+
+static void set_resume_soc_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ set_resume_soc_work);
+ int rc, resume_soc_raw;
+
+ if (is_input_present(chip) && !chip->resume_soc_lowered) {
+ if (!chip->charge_done)
+ goto done;
+ resume_soc_raw = get_monotonic_soc_raw(chip)
+ - (0xFF - settings[FG_MEM_RESUME_SOC].value);
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc lowered to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->charge_done = false;
+ chip->resume_soc_lowered = true;
+ } else if (chip->resume_soc_lowered && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc set to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->resume_soc_lowered = false;
+ }
+done:
+ fg_relax(&chip->resume_soc_wakeup_source);
+}
+
+
+#define OCV_COEFFS_START_REG 0x4C0
+#define OCV_JUNCTION_REG 0x4D8
+#define NOM_CAP_REG 0x4F4
+#define CUTOFF_VOLTAGE_REG 0x40C
+#define RSLOW_CFG_REG 0x538
+#define RSLOW_CFG_OFFSET 2
+#define RSLOW_THRESH_REG 0x52C
+#define RSLOW_THRESH_OFFSET 0
+#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RSLOW_COMP_REG 0x528
+#define RSLOW_COMP_C1_OFFSET 0
+#define RSLOW_COMP_C2_OFFSET 2
+static int populate_system_data(struct fg_chip *chip)
+{
+ u8 buffer[24];
+ int rc, i;
+ int16_t cc_mah;
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
+ if (rc) {
+ pr_err("Failed to read ocv coefficients: %d\n", rc);
+ goto done;
+ }
+ for (i = 0; i < 12; i += 1)
+ chip->ocv_coeffs[i] = half_float(buffer + (i * 2));
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("coeffs1 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[0], chip->ocv_coeffs[1],
+ chip->ocv_coeffs[2], chip->ocv_coeffs[3]);
+ pr_info("coeffs2 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[4], chip->ocv_coeffs[5],
+ chip->ocv_coeffs[6], chip->ocv_coeffs[7]);
+ pr_info("coeffs3 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[8], chip->ocv_coeffs[9],
+ chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
+ }
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
+ chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ if (rc) {
+ pr_err("Failed to read ocv junctions: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto done;
+ }
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
+ }
+ rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read cutoff voltage: %d\n", rc);
+ goto done;
+ }
+ chip->cutoff_voltage = voltage_2b(buffer);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n",
+ chip->cutoff_voltage, chip->nom_cap_uah,
+ chip->ocv_junction_p1p2,
+ chip->ocv_junction_p2p3);
+
+ rc = fg_mem_read(chip, buffer, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_cfg = buffer[0];
+ rc = fg_mem_read(chip, buffer, RSLOW_THRESH_REG, 1,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow thresh: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_thr = buffer[0];
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
+ rc = fg_mem_read(chip, buffer, RSLOW_COMP_REG, 4,
+ RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow comp: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rslow_comp, buffer, 4);
+
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
+#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
+#define RSLOW_THRESH_FULL_VAL 0xFF
+static int fg_rslow_charge_comp_set(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[2];
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
+ rc = fg_mem_write(chip, buffer,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Activated rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+#define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5))
+static int fg_rslow_charge_comp_clear(struct fg_chip *chip)
+{
+ u8 reg;
+ int rc;
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp,
+ RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = false;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Cleared rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+static void rslow_comp_work(struct work_struct *work)
+{
+ int battery_soc_1b;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ rslow_comp_work);
+
+ battery_soc_1b = get_battery_soc_raw(chip) >> 16;
+ if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr
+ && chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->rslow_comp.active)
+ fg_rslow_charge_comp_set(chip);
+ } else {
+ if (chip->rslow_comp.active)
+ fg_rslow_charge_comp_clear(chip);
+ }
+}
+
+#define MICROUNITS_TO_ADC_RAW(units) \
+ div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR)
+static int update_chg_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data,
+ settings[FG_MEM_CHG_TERM_CURRENT].address,
+ 2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0);
+}
+
+#define CC_CV_SETPOINT_REG 0x4F8
+#define CC_CV_SETPOINT_OFFSET 0
+static void update_cc_cv_setpoint(struct fg_chip *chip)
+{
+ int rc;
+ u8 tmp[2];
+
+ if (!chip->cc_cv_threshold_mv)
+ return;
+ batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp);
+ rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2,
+ CC_CV_SETPOINT_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write CC_CV_VOLT rc=%d\n", rc);
+ return;
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Wrote %x %x to address %x for CC_CV setpoint\n",
+ tmp[0], tmp[1], CC_CV_SETPOINT_REG);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define CBITS_RMEAS1_OFFSET 1
+#define CBITS_RMEAS2_OFFSET 2
+#define CBITS_RMEAS1_DEFAULT_VAL 0x65
+#define CBITS_RMEAS2_DEFAULT_VAL 0x65
+#define IMPTR_FAST_TIME_SHIFT 1
+#define IMPTR_LONG_TIME_SHIFT (1 << 4)
+#define IMPTR_PULSE_CTR_CHG 1
+#define IMPTR_PULSE_CTR_DISCHG (1 << 4)
+static int fg_config_imptr_pulse(struct fg_chip *chip, bool slow)
+{
+ int rc;
+ u8 cntr[2] = {0, 0};
+ u8 val;
+
+ if (slow == chip->imptr_pulse_slow_en) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled already\n",
+ slow ? "en" : "dis");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = slow ? (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT) :
+ CBITS_RMEAS1_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas1_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ val = slow ? (IMPTR_PULSE_CTR_CHG | IMPTR_PULSE_CTR_DISCHG) :
+ CBITS_RMEAS2_DEFAULT_VAL;
+ rc = fg_mem_write(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write cbits_rmeas2_offset rc=%d\n", rc);
+ goto done;
+ }
+
+ if (slow) {
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 4,
+ COUNTER_IMPTR_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto done;
+ }
+ }
+
+ chip->imptr_pulse_slow_en = slow;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n", slow ? "en" : "dis");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define CURRENT_DELTA_MIN_REG 0x42C
+#define CURRENT_DELTA_MIN_OFFSET 1
+#define SYS_CFG_1_REG 0x4AC
+#define SYS_CFG_1_OFFSET 0
+#define CURRENT_DELTA_MIN_DEFAULT 0x16
+#define CURRENT_DELTA_MIN_500MA 0xCD
+#define RSLOW_CFG_USE_FIX_RSER_VAL BIT(7)
+#define ENABLE_ESR_PULSE_VAL BIT(3)
+static int fg_config_esr_extract(struct fg_chip *chip, bool disable)
+{
+ int rc;
+ u8 val;
+
+ if (disable == chip->esr_extract_disabled) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract already %sabled\n",
+ disable ? "dis" : "en");
+ return 0;
+ }
+
+ fg_mem_lock(chip);
+
+ val = disable ? CURRENT_DELTA_MIN_500MA :
+ CURRENT_DELTA_MIN_DEFAULT;
+ rc = fg_mem_write(chip, &val, CURRENT_DELTA_MIN_REG, 1,
+ CURRENT_DELTA_MIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write curr_delta_min rc=%d\n", rc);
+ goto done;
+ }
+
+ val = disable ? RSLOW_CFG_USE_FIX_RSER_VAL : 0;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_USE_FIX_RSER_VAL, val, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg rc= %d\n", rc);
+ goto done;
+ }
+
+ val = disable ? 0 : ENABLE_ESR_PULSE_VAL;
+ rc = fg_mem_masked_write(chip, SYS_CFG_1_REG,
+ ENABLE_ESR_PULSE_VAL, val, SYS_CFG_1_OFFSET);
+ if (rc) {
+ pr_err("unable to write sys_cfg_1 rc= %d\n", rc);
+ goto done;
+ }
+
+ chip->esr_extract_disabled = disable;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n", disable ? "dis" : "en");
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define ESR_EXTRACT_STOP_SOC 2
+#define IMPTR_PULSE_CONFIG_SOC 5
+static void esr_extract_config_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ esr_extract_config_work);
+ bool input_present = is_input_present(chip);
+ int capacity = get_prop_capacity(chip);
+
+ if (input_present && capacity <= ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, true);
+ } else if (capacity > ESR_EXTRACT_STOP_SOC) {
+ fg_config_esr_extract(chip, false);
+
+ if (capacity <= IMPTR_PULSE_CONFIG_SOC)
+ fg_config_imptr_pulse(chip, true);
+ else
+ fg_config_imptr_pulse(chip, false);
+ }
+
+ fg_relax(&chip->esr_extract_wakeup_source);
+}
+
+#define LOW_LATENCY BIT(6)
+#define BATT_PROFILE_OFFSET 0x4C0
+#define PROFILE_INTEGRITY_REG 0x53C
+#define PROFILE_INTEGRITY_BIT BIT(0)
+#define FIRST_EST_DONE_BIT BIT(5)
+#define MAX_TRIES_FIRST_EST 3
+#define FIRST_EST_WAIT_MS 2000
+#define PROFILE_LOAD_TIMEOUT_MS 5000
+static int fg_do_restart(struct fg_chip *chip, bool write_profile)
+{
+ int rc, ibat_ua;
+ u8 reg = 0;
+ u8 buf[2];
+ bool tried_once = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restarting fuel gauge...\n");
+
+try_again:
+ if (write_profile) {
+ if (!chip->charging_disabled) {
+ pr_err("Charging not yet disabled!\n");
+ return -EINVAL;
+ }
+
+ ibat_ua = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ if (ibat_ua == -EINVAL) {
+ pr_err("SRAM not updated yet!\n");
+ return ibat_ua;
+ }
+
+ if (ibat_ua < 0) {
+ pr_warn("Charging enabled?, ibat_ua: %d\n", ibat_ua);
+
+ if (!tried_once) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ msleep(1000);
+ tried_once = true;
+ goto try_again;
+ }
+ }
+ }
+
+ chip->fg_restarting = true;
+ /*
+ * save the temperature if the sw rbias control is active so that there
+ * is no gap of time when there is no valid temperature read after the
+ * restart
+ */
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_read(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to read batt temp rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+ /*
+ * release the sram access and configure the correct settings
+ * before re-requesting access.
+ */
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto unlock_and_fail;
+ }
+
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto unlock_and_fail;
+ }
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ LOW_LATENCY, LOW_LATENCY, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+ /* read once to get a fg cycle in */
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto fail;
+ }
+
+ /*
+ * If this is not the first time a profile has been loaded, sleep for
+ * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory
+ */
+ if (chip->first_profile_loaded)
+ msleep(3000);
+
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ if (write_profile) {
+ /* write the battery profile */
+ rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ chip->batt_profile_len, 0, 1);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ /* write the integrity bits and release access */
+ rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG,
+ PROFILE_INTEGRITY_BIT,
+ PROFILE_INTEGRITY_BIT, 0);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+
+ /* decrement the user count so that memory access can be released */
+ fg_release_access_if_necessary(chip);
+
+ /*
+ * make sure that the first estimate has completed
+ * in case of a hotswap
+ */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+
+ /*
+ * reinitialize the completion so that the driver knows when the restart
+ * finishes
+ */
+ reinit_completion(&chip->first_soc_done);
+
+ if (chip->esr_pulse_tune_en) {
+ fg_stay_awake(&chip->esr_extract_wakeup_source);
+ schedule_work(&chip->esr_extract_config_work);
+ }
+
+ /*
+ * set the restart bits so that the next fg cycle will not reload
+ * the profile
+ */
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, reg, 1);
+ if (rc) {
+ pr_err("failed to set fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* wait for the first estimate to complete */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+ rc = fg_read(chip, &reg, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto fail;
+ }
+ if ((reg & FIRST_EST_DONE_BIT) == 0)
+ pr_err("Battery profile reloading failed, no first estimate\n");
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* restore the battery temperature reading here */
+ if (chip->sw_rbias_ctrl) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("reloaded 0x%02x%02x into batt temp",
+ buf[0], buf[1]);
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to write batt temp rc=%d\n", rc);
+ goto fail;
+ }
+ }
+
+ /* Enable charging now as the first estimate is done now */
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ chip->fg_restarting = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("done!\n");
+ return 0;
+
+unlock_and_fail:
+ mutex_unlock(&chip->rw_lock);
+ goto fail;
+sub_and_fail:
+ fg_release_access_if_necessary(chip);
+ goto fail;
+fail:
+ chip->fg_restarting = false;
+ return -EINVAL;
+}
+
+#define FG_PROFILE_LEN 128
+#define PROFILE_COMPARE_LEN 32
+#define THERMAL_COEFF_ADDR 0x444
+#define THERMAL_COEFF_OFFSET 0x2
+#define BATTERY_PSY_WAIT_MS 2000
+static int fg_batt_profile_init(struct fg_chip *chip)
+{
+ int rc = 0, ret, len, batt_id;
+ struct device_node *node = chip->pdev->dev.of_node;
+ struct device_node *batt_node, *profile_node;
+ const char *data, *batt_type_str;
+ bool tried_again = false, vbat_in_range, profiles_same;
+ u8 reg = 0;
+
+wait:
+ fg_stay_awake(&chip->profile_wakeup_source);
+ ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("profile loading timed out rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!batt_node) {
+ pr_warn("No available batterydata, using OTP defaults\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ batt_id /= 1000;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery id = %dKOhms\n", batt_id);
+
+ profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
+ fg_batt_type);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ goto no_profile;
+ }
+
+ /* read rslow compensation values if they're available */
+ rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow",
+ &chip->rslow_comp.chg_rs_to_rslow);
+ if (rc) {
+ chip->rslow_comp.chg_rs_to_rslow = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rs to rslow: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1",
+ &chip->rslow_comp.chg_rslow_comp_c1);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c1: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2",
+ &chip->rslow_comp.chg_rslow_comp_c2);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c2: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr",
+ &chip->rslow_comp.chg_rslow_comp_thr);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_thr = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp thr: %d\n", rc);
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &chip->batt_max_voltage_uv);
+
+ if (rc)
+ pr_warn("couldn't find battery max voltage\n");
+
+ /*
+ * Only configure from profile if fg-cc-cv-threshold-mv is not
+ * defined in the charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fg-cc-cv-threshold-mv", NULL)) {
+ of_property_read_u32(profile_node,
+ "qcom,fg-cc-cv-threshold-mv",
+ &chip->cc_cv_threshold_mv);
+ }
+
+ data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+ if (!data) {
+ pr_err("no battery profile loaded\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (len != FG_PROFILE_LEN) {
+ pr_err("battery profile incorrect size: %d\n", len);
+ rc = -EINVAL;
+ goto no_profile;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc) {
+ pr_err("Could not find battery data type: %d\n", rc);
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (!chip->batt_profile)
+ chip->batt_profile = devm_kzalloc(chip->dev,
+ sizeof(char) * len, GFP_KERNEL);
+
+ if (!chip->batt_profile) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ len, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ /* Check whether the charger is ready */
+ if (!is_charger_available(chip))
+ goto reschedule;
+
+ /* Disable charging for a FG cycle before calculating vbat_in_range */
+ if (!chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, false);
+ if (rc)
+ pr_err("Failed to disable charging, rc=%d\n", rc);
+
+ goto reschedule;
+ }
+
+ vbat_in_range = get_vbat_est_diff(chip)
+ < settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
+ profiles_same = memcmp(chip->batt_profile, data,
+ PROFILE_COMPARE_LEN) == 0;
+ if (reg & PROFILE_INTEGRITY_BIT) {
+ fg_cap_learning_load_data(chip);
+ if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery profiles same, using default\n");
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+ goto done;
+ }
+ } else {
+ pr_info("Battery profile not same, clearing data\n");
+ clear_cycle_counter(chip);
+ chip->learning_data.learned_cc_uah = 0;
+ }
+
+ if (fg_est_dump)
+ dump_sram(&chip->dump_sram);
+
+ if ((fg_debug_mask & FG_STATUS) && !vbat_in_range)
+ pr_info("Vbat out of range: v_current_pred: %d, v:%d\n",
+ fg_data[FG_DATA_CPRED_VOLTAGE].value,
+ fg_data[FG_DATA_VOLTAGE].value);
+
+ if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip))
+ pr_info("battery empty\n");
+
+ if ((fg_debug_mask & FG_STATUS) && !profiles_same)
+ pr_info("profiles differ\n");
+
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("Using new profile\n");
+ print_hex_dump(KERN_INFO, "FG: loaded profile: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ chip->batt_profile, len, false);
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+
+ memcpy(chip->batt_profile, data, len);
+
+ chip->batt_profile_len = len;
+
+ if (fg_debug_mask & FG_STATUS)
+ print_hex_dump(KERN_INFO, "FG: new profile: ",
+ DUMP_PREFIX_NONE, 16, 1, chip->batt_profile,
+ chip->batt_profile_len, false);
+
+ rc = fg_do_restart(chip, true);
+ if (rc) {
+ pr_err("restart failed: %d\n", rc);
+ goto no_profile;
+ }
+
+ /*
+ * Only configure from profile if thermal-coefficients is not
+ * defined in the FG device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", NULL)) {
+ data = of_get_property(profile_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ rc = fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("spmi write failed addr:%03x, ret:%d\n",
+ THERMAL_COEFF_ADDR, rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery thermal coefficients changed\n");
+ }
+ }
+
+done:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (fg_batt_type)
+ chip->batt_type = fg_batt_type;
+ else
+ chip->batt_type = batt_type_str;
+ chip->first_profile_loaded = true;
+ chip->profile_loaded = true;
+ chip->battery_missing = is_battery_missing(chip);
+ update_chg_iterm(chip);
+ update_cc_cv_setpoint(chip);
+ rc = populate_system_data(chip);
+ if (rc) {
+ pr_err("failed to read ocv properties=%d\n", rc);
+ return rc;
+ }
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+ schedule_work(&chip->status_change_work);
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
+ fg_data[FG_DATA_VOLTAGE].value);
+ return rc;
+no_profile:
+ if (chip->charging_disabled) {
+ rc = set_prop_enable_charging(chip, true);
+ if (rc)
+ pr_err("Failed to enable charging, rc=%d\n", rc);
+ else
+ chip->charging_disabled = false;
+ }
+
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ return rc;
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ fg_relax(&chip->profile_wakeup_source);
+ return 0;
+}
+
+static void check_empty_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_empty_work.work);
+
+ if (fg_is_batt_empty(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("EMPTY SOC high\n");
+ chip->soc_empty = true;
+ if (chip->power_supply_registered)
+ power_supply_changed(chip->bms_psy);
+ }
+ fg_relax(&chip->empty_check_wakeup_source);
+}
+
+static void batt_profile_init(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ batt_profile_init.work);
+
+ if (fg_batt_profile_init(chip))
+ pr_err("failed to initialize profile\n");
+}
+
+static void sysfs_restart_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ sysfs_restart_work);
+ int rc;
+
+ rc = fg_do_restart(chip, false);
+ if (rc)
+ pr_err("fg restart failed: %d\n", rc);
+ mutex_lock(&chip->sysfs_restart_lock);
+ fg_restart = 0;
+ mutex_unlock(&chip->sysfs_restart_lock);
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+#define SRAM_RELEASE_TIMEOUT_MS 500
+static void charge_full_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ charge_full_work);
+ int rc;
+ u8 buffer[3];
+ int bsoc;
+ int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ bool disable = false;
+ u8 reg;
+
+ if (chip->status != POWER_SUPPLY_STATUS_FULL) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery not full: %d\n", chip->status);
+ disable = true;
+ }
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ goto out;
+ }
+ if (buffer[2] <= resume_soc_raw) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("bsoc = 0x%02x <= resume = 0x%02x\n",
+ buffer[2], resume_soc_raw);
+ disable = true;
+ }
+ if (!disable)
+ goto out;
+
+ rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ /* force a full soc value into the monotonic in order to display 100 */
+ buffer[0] = 0xFF;
+ buffer[1] = 0xFF;
+ rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16;
+ pr_info("wrote %06x into soc full\n", bsoc);
+ }
+ fg_mem_release(chip);
+ /*
+ * wait one cycle to make sure the soc is updated before clearing
+ * the soc mask bit
+ */
+ fg_mem_lock(chip);
+ fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+out:
+ fg_mem_release(chip);
+ if (disable)
+ chip->charge_full = false;
+}
+
+static void update_bcl_thresholds(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 mh_offset = 0, lm_offset = 0;
+ u16 address = 0;
+ int ret = 0;
+
+ address = settings[FG_MEM_BCL_MH_THRESHOLD].address;
+ mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ ret = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (ret)
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret);
+ else
+ pr_debug("Old BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]);
+
+ ret = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (ret)
+ pr_err("spmi write failed. addr:%03x, ret:%d\n",
+ address, ret);
+ else
+ pr_debug("New BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+}
+
+static int disable_bcl_lpm(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 lm_offset = 0;
+ u16 address = 0;
+ int rc = 0;
+
+ address = settings[FG_MEM_BCL_LM_THRESHOLD].address;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ rc = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (rc) {
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]);
+
+ /* Put BCL always above LPM */
+ BCL_MA_TO_ADC(0, data[lm_offset]);
+
+ rc = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (rc)
+ pr_err("spmi write failed. addr:%03x, rc:%d\n",
+ address, rc);
+ else
+ pr_debug("New BCL LM threshold:%x\n", data[lm_offset]);
+
+ return rc;
+}
+
+static void bcl_hi_power_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ bcl_hi_power_work);
+ int rc;
+
+ if (chip->bcl_lpm_disabled) {
+ rc = disable_bcl_lpm(chip);
+ if (rc)
+ pr_err("failed to disable bcl low mode %d\n",
+ rc);
+ } else {
+ update_bcl_thresholds(chip);
+ }
+}
+
+#define VOLT_UV_TO_VOLTCMP8(volt_uv) \
+ ((volt_uv - 2500000) / 9766)
+static int update_irq_volt_empty(struct fg_chip *chip)
+{
+ u8 data;
+ int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
+ data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data);
+ return fg_mem_write(chip, &data,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0);
+}
+
+static int update_cutoff_voltage(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_voltage_raw;
+ s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value;
+
+ converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000);
+ data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_voltage_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n",
+ voltage_mv, converted_voltage_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address,
+ 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0);
+}
+
+static int update_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address,
+ 2, settings[FG_MEM_TERM_CURRENT].offset, 0);
+}
+
+#define OF_READ_SETTING(type, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &settings[type].value); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &store); \
+ \
+ if (retval == -EINVAL) { \
+ retval = 0; \
+ store = default_val; \
+ } else if (retval) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+#define DEFAULT_EVALUATION_CURRENT_MA 1000
+static int fg_of_init(struct fg_chip *chip)
+{
+ int rc = 0, sense_type, len = 0;
+ const char *data;
+ struct device_node *node = chip->pdev->dev.of_node;
+ u32 temp[2] = {0};
+
+ OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1);
+
+ if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) {
+ int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0;
+
+ rc = of_property_read_u32_array(node,
+ "qcom,cold-hot-jeita-hysteresis", temp, 2);
+ if (rc) {
+ pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->jeita_hysteresis_support = true;
+ chip->cold_hysteresis = temp[0];
+ chip->hot_hysteresis = temp[1];
+ hard_hot = settings[FG_MEM_HARD_HOT].value;
+ soft_hot = settings[FG_MEM_SOFT_HOT].value;
+ hard_cold = settings[FG_MEM_HARD_COLD].value;
+ soft_cold = settings[FG_MEM_SOFT_COLD].value;
+ if (((hard_hot - chip->hot_hysteresis) < soft_hot) ||
+ ((hard_cold + chip->cold_hysteresis) > soft_cold)) {
+ chip->jeita_hysteresis_support = false;
+ pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n",
+ chip->hot_hysteresis, chip->cold_hysteresis);
+ } else {
+ pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n",
+ chip->cold_hysteresis, chip->hot_hysteresis);
+ }
+ }
+
+ OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1);
+ data = of_get_property(chip->pdev->dev.of_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ chip->use_thermal_coefficients = true;
+ }
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1);
+ settings[FG_MEM_RESUME_SOC].value =
+ DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value
+ * FULL_SOC_RAW, FULL_CAPACITY);
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1);
+ OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "vbat-estimate-diff-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1);
+ OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1);
+ OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1);
+ OF_READ_PROPERTY(chip->learning_data.max_increment,
+ "cl-max-increment-deciperc", rc, 5);
+ OF_READ_PROPERTY(chip->learning_data.max_decrement,
+ "cl-max-decrement-deciperc", rc, 100);
+ OF_READ_PROPERTY(chip->learning_data.max_temp,
+ "cl-max-temp-decidegc", rc, 450);
+ OF_READ_PROPERTY(chip->learning_data.min_temp,
+ "cl-min-temp-decidegc", rc, 150);
+ OF_READ_PROPERTY(chip->learning_data.max_start_soc,
+ "cl-max-start-capacity", rc, 15);
+ OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
+ "cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->evaluation_current,
+ "aging-eval-current-ma", rc,
+ DEFAULT_EVALUATION_CURRENT_MA);
+ OF_READ_PROPERTY(chip->cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", rc, 0);
+ if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-on"))
+ chip->batt_aging_mode = FG_AGING_CC;
+ else if (of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-estimation-on"))
+ chip->batt_aging_mode = FG_AGING_ESR;
+ else
+ chip->batt_aging_mode = FG_AGING_NONE;
+ if (chip->batt_aging_mode == FG_AGING_CC) {
+ chip->learning_data.feedback_on
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,capacity-learning-feedback");
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery aging mode: %d\n", chip->batt_aging_mode);
+
+ /* Get the use-otp-profile property */
+ chip->use_otp_profile = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,use-otp-profile");
+ chip->hold_soc_while_full
+ = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,hold-soc-while-full");
+
+ sense_type = of_property_read_bool(chip->pdev->dev.of_node,
+ "qcom,ext-sense-type");
+ if (rc == 0) {
+ if (fg_sense_type < 0)
+ fg_sense_type = sense_type;
+
+ if (fg_debug_mask & FG_STATUS) {
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ pr_info("Using internal sense\n");
+ else if (fg_sense_type == EXTERNAL_CURRENT_SENSE)
+ pr_info("Using external sense\n");
+ else
+ pr_info("Using default sense\n");
+ }
+ } else {
+ rc = 0;
+ }
+
+ chip->bad_batt_detection_en = of_property_read_bool(node,
+ "qcom,bad-battery-detection-enable");
+
+ chip->sw_rbias_ctrl = of_property_read_bool(node,
+ "qcom,sw-rbias-control");
+
+ chip->cyc_ctr.en = of_property_read_bool(node,
+ "qcom,cycle-counter-en");
+ if (chip->cyc_ctr.en)
+ chip->cyc_ctr.id = 1;
+
+ chip->esr_pulse_tune_en = of_property_read_bool(node,
+ "qcom,esr-pulse-tuning-en");
+
+ return rc;
+}
+
+static int fg_init_irqs(struct fg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ struct platform_device *pdev = chip->pdev;
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ return rc;
+ }
+
+ if ((base == chip->vbat_adc_addr) ||
+ (base == chip->ibat_adc_addr) ||
+ (base == chip->tp_rev_addr))
+ continue;
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_irq[FULL_SOC].irq = of_irq_get_byname(child,
+ "full-soc");
+ if (chip->soc_irq[FULL_SOC].irq < 0) {
+ pr_err("Unable to get full-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[EMPTY_SOC].irq = of_irq_get_byname(child,
+ "empty-soc");
+ if (chip->soc_irq[EMPTY_SOC].irq < 0) {
+ pr_err("Unable to get low-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[DELTA_SOC].irq = of_irq_get_byname(child,
+ "delta-soc");
+ if (chip->soc_irq[DELTA_SOC].irq < 0) {
+ pr_err("Unable to get delta-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[FIRST_EST_DONE].irq
+ = of_irq_get_byname(child, "first-est-done");
+ if (chip->soc_irq[FIRST_EST_DONE].irq < 0) {
+ pr_err("Unable to get first-est-done irq\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FULL_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "full-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d full-soc: %d\n",
+ chip->soc_irq[FULL_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[DELTA_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "delta-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[DELTA_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FIRST_EST_DONE].irq,
+ fg_first_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "first-est-done", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[FIRST_EST_DONE].irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ break;
+ case FG_MEMIF:
+ chip->mem_irq[FG_MEM_AVAIL].irq
+ = of_irq_get_byname(child, "mem-avail");
+ if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) {
+ pr_err("Unable to get mem-avail irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->mem_irq[FG_MEM_AVAIL].irq,
+ fg_mem_avail_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "mem-avail", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d mem-avail: %d\n",
+ chip->mem_irq[FG_MEM_AVAIL].irq, rc);
+ return rc;
+ }
+ break;
+ case FG_BATT:
+ chip->batt_irq[BATT_MISSING].irq
+ = of_irq_get_byname(child, "batt-missing");
+ if (chip->batt_irq[BATT_MISSING].irq < 0) {
+ pr_err("Unable to get batt-missing irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[BATT_MISSING].irq,
+ NULL,
+ fg_batt_missing_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "batt-missing", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-missing: %d\n",
+ chip->batt_irq[BATT_MISSING].irq, rc);
+ return rc;
+ }
+ chip->batt_irq[VBATT_LOW].irq
+ = of_irq_get_byname(child, "vbatt-low");
+ if (chip->batt_irq[VBATT_LOW].irq < 0) {
+ pr_err("Unable to get vbatt-low irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->batt_irq[VBATT_LOW].irq,
+ fg_vbatt_low_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "vbatt-low", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d vbatt-low: %d\n",
+ chip->batt_irq[VBATT_LOW].irq, rc);
+ return rc;
+ }
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ break;
+ case FG_ADC:
+ break;
+ default:
+ pr_err("subtype %d\n", subtype);
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+}
+
+static int fg_remove(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ fg_cleanup(chip);
+ dev_set_drvdata(&pdev->dev, NULL);
+ return 0;
+}
+
+static int fg_memif_data_open(struct inode *inode, struct file *file)
+{
+ struct fg_log_buffer *log;
+ struct fg_trans *trans;
+ u8 *data_buf;
+
+ size_t logbufsize = SZ_4K;
+ size_t databufsize = SZ_4K;
+
+ if (!dbgfs_data.chip) {
+ pr_err("Not initialized data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ /* Allocate data buffer */
+ data_buf = kzalloc(databufsize, GFP_KERNEL);
+
+ if (!data_buf) {
+ kfree(trans);
+ kfree(log);
+ pr_err("Unable to allocate memory for data buffer\n");
+ return -ENOMEM;
+ }
+
+ trans->log = log;
+ trans->data = data_buf;
+ trans->cnt = dbgfs_data.cnt;
+ trans->addr = dbgfs_data.addr;
+ trans->chip = dbgfs_data.chip;
+ trans->offset = trans->addr;
+ mutex_init(&trans->memif_dfs_lock);
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int fg_memif_dfs_close(struct inode *inode, struct file *file)
+{
+ struct fg_trans *trans = file->private_data;
+
+ if (trans && trans->log && trans->data) {
+ file->private_data = NULL;
+ mutex_destroy(&trans->memif_dfs_lock);
+ kfree(trans->log);
+ kfree(trans->data);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM. When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int
+write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct fg_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (12-bit address) */
+ cnt = print_to_log(log, "%3.3X ", offset & 0xfff);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+ int cnt, rc;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct fg_log_buffer *log = trans->log;
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (item_cnt > SZ_4K) {
+ pr_err("Reading too many bytes\n");
+ return -EINVAL;
+ }
+
+ rc = fg_mem_read(trans->chip, trans->data,
+ trans->addr, trans->cnt, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return rc;
+ }
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_next_line_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fg_trans *trans = file->private_data;
+ struct fg_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy sram register values to user\n");
+ len = -EFAULT;
+ goto unlock_mutex;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return len;
+}
+
+/**
+ * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+ char *kbuf;
+ u32 offset;
+
+ struct fg_trans *trans = file->private_data;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ offset = trans->offset;
+
+ /* Make a copy of the user data */
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while ((pos < count) &&
+ sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ /*
+ * We shouldn't be receiving a string of characters that
+ * exceeds a size of 5 to keep this functionally correct.
+ * Also, we should make sure that pos never gets overflowed
+ * beyond the limit.
+ */
+ if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+ cnt = 0;
+ ret = -EINVAL;
+ break;
+ }
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ pr_info("address %x, count %d\n", offset, cnt);
+ /* Perform the write(s) */
+
+ ret = fg_mem_write(trans->chip, values, offset,
+ cnt, 0, 0);
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt > 4 ? 4 : cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
+ return ret;
+}
+
+static const struct file_operations fg_memif_dfs_reg_fops = {
+ .open = fg_memif_data_open,
+ .release = fg_memif_dfs_close,
+ .read = fg_memif_dfs_reg_read,
+ .write = fg_memif_dfs_reg_write,
+};
+
+/**
+ * fg_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *fg_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating FG_MEM debugfs file-system\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * fg_dfs_get_root: return a pointer to FG debugfs root directory.
+ * @return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SRAM transaction, namely: address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *fg_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = fg_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * fg_dfs_create: adds new fg_mem if debugfs entry
+ * @return zero on success
+ */
+int fg_dfs_create(struct fg_chip *chip)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = fg_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ dbgfs_data.chip = chip;
+
+ file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt));
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE,
+ root, &(dbgfs_data.addr));
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data,
+ &fg_memif_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+}
+
+#define EXTERNAL_SENSE_OFFSET_REG 0x41C
+#define EXT_OFFSET_TRIM_REG 0xF8
+#define SEC_ACCESS_REG 0xD0
+#define SEC_ACCESS_UNLOCK 0xA5
+#define BCL_TRIM_REV_FIXED 12
+static int bcl_trim_workaround(struct fg_chip *chip)
+{
+ u8 reg, rc;
+
+ if (chip->tp_rev_addr == 0)
+ return 0;
+
+ rc = fg_read(chip, &reg, chip->tp_rev_addr, 1);
+ if (rc) {
+ pr_err("Failed to read tp reg, rc = %d\n", rc);
+ return rc;
+ }
+ if (reg >= BCL_TRIM_REV_FIXED) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("workaround not applied, tp_rev = %d\n", reg);
+ return 0;
+ }
+
+ rc = fg_mem_read(chip, &reg, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0);
+ if (rc) {
+ pr_err("Failed to read ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+ rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG,
+ SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1);
+
+ rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG,
+ 0xFF, reg, 1);
+ if (rc) {
+ pr_err("Failed to write ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define KI_COEFF_PRED_FULL_ADDR 0x408
+#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
+#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
+#define TEMP_FRAC_SHIFT_REG 0x4A4
+#define FG_ADC_CONFIG_REG 0x4B8
+#define FG_BCL_CONFIG_OFFSET 0x3
+#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+static int fg_common_hw_init(struct fg_chip *chip)
+{
+ int rc;
+ int resume_soc_raw;
+ u8 val;
+
+ update_iterm(chip);
+ update_cutoff_voltage(chip);
+ update_irq_volt_empty(chip);
+ update_bcl_thresholds(chip);
+
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ return rc;
+ }
+ } else {
+ pr_info("FG auto recharge threshold not specified in DT\n");
+ }
+
+ if (fg_sense_type >= 0) {
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ if (rc) {
+ pr_err("failed to config sense type %d rc=%d\n",
+ fg_sense_type, rc);
+ return rc;
+ }
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF,
+ soc_to_setpoint(settings[FG_MEM_DELTA_SOC].value),
+ settings[FG_MEM_DELTA_SOC].offset);
+ if (rc) {
+ pr_err("failed to write delta soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
+ batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
+ settings[FG_MEM_BATT_LOW].offset);
+ if (rc) {
+ pr_err("failed to write Vbatt_low rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address,
+ THERM_DELAY_MASK,
+ therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value),
+ settings[FG_MEM_THERM_DELAY].offset);
+ if (rc) {
+ pr_err("failed to write therm_delay rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->use_thermal_coefficients) {
+ fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ }
+
+ if (!chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ TEMP_SENSE_ALWAYS_BIT,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+
+ if (chip->esr_pulse_tune_en) {
+ rc = fg_mem_read(chip, &val, SYS_CFG_1_REG, 1, SYS_CFG_1_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read sys_cfg_1: %d\n", rc);
+ return rc;
+ }
+
+ if (!(val & ENABLE_ESR_PULSE_VAL))
+ chip->esr_extract_disabled = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("ESR extract is %sabled\n",
+ chip->esr_extract_disabled ? "dis" : "en");
+
+ rc = fg_mem_read(chip, &val, CBITS_INPUT_FILTER_REG, 1,
+ CBITS_RMEAS1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read cbits_input_filter_reg: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (val & (IMPTR_FAST_TIME_SHIFT | IMPTR_LONG_TIME_SHIFT))
+ chip->imptr_pulse_slow_en = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("imptr_pulse_slow is %sabled\n",
+ chip->imptr_pulse_slow_en ? "en" : "dis");
+
+ rc = fg_mem_read(chip, &val, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET,
+ 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ return rc;
+ }
+
+ if (val & RSLOW_CFG_ON_VAL)
+ chip->rslow_comp.active = true;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("rslow_comp active is %sabled\n",
+ chip->rslow_comp.active ? "en" : "dis");
+ }
+
+ return 0;
+}
+
+static int fg_8994_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 data[4];
+ u64 esr_value;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ PATCH_NEG_CURRENT_BIT,
+ PATCH_NEG_CURRENT_BIT,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write patch current bit rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bcl_trim_workaround(chip);
+ if (rc) {
+ pr_err("failed to redo bcl trim rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0);
+
+ data[0] = 0xA2;
+ data[1] = 0x12;
+
+ rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0);
+ if (rc) {
+ pr_err("failed to write temp ocv constants rc=%d\n", rc);
+ return rc;
+ }
+
+ data[0] = KI_COEFF_PRED_FULL_4_0_LSB;
+ data[1] = KI_COEFF_PRED_FULL_4_0_MSB;
+ fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0);
+
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ pr_debug("set default value to esr filter\n");
+
+ return 0;
+}
+
+#define FG_USBID_CONFIG_OFFSET 0x2
+#define DISABLE_USBID_DETECT_BIT BIT(0)
+static int fg_8996_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable usbid conversions for PMi8996 V1.0 */
+ if (chip->pmic_revision[REVID_DIG_MAJOR] == 1
+ && chip->pmic_revision[REVID_ANA_MAJOR] == 0) {
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ DISABLE_USBID_DETECT_BIT,
+ 0, FG_USBID_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to enable usbid conversions: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int fg_8950_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc)
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+
+ return rc;
+}
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+
+ rc = fg_common_hw_init(chip);
+ if (rc) {
+ pr_err("Unable to initialize FG HW rc=%d\n", rc);
+ return rc;
+ }
+
+ /* add PMIC specific hw init */
+ switch (chip->pmic_subtype) {
+ case PMI8994:
+ rc = fg_8994_hw_init(chip);
+ chip->wa_flag |= PULSE_REQUEST_WA;
+ break;
+ case PMI8996:
+ rc = fg_8996_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ case PMI8950:
+ case PMI8937:
+ rc = fg_8950_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ }
+ if (rc)
+ pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc);
+
+ pr_debug("wa_flag=0x%x\n", chip->wa_flag);
+
+ return rc;
+}
+
+#define DIG_MINOR 0x0
+#define DIG_MAJOR 0x1
+#define ANA_MINOR 0x2
+#define ANA_MAJOR 0x3
+#define IACS_INTR_SRC_SLCT BIT(3)
+static int fg_setup_memif_offset(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
+ if (rc) {
+ pr_err("Unable to read FG revision rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (chip->revision[DIG_MAJOR]) {
+ case DIG_REV_1:
+ case DIG_REV_2:
+ chip->offset = offset[0].address;
+ break;
+ case DIG_REV_3:
+ chip->offset = offset[1].address;
+ chip->ima_supported = true;
+ break;
+ default:
+ pr_err("Digital Major rev=%d not supported\n",
+ chip->revision[DIG_MAJOR]);
+ return -EINVAL;
+ }
+
+ if (chip->ima_supported) {
+ /*
+ * Change the FG_MEM_INT interrupt to track IACS_READY
+ * condition instead of end-of-transaction. This makes sure
+ * that the next transaction starts only after the hw is ready.
+ */
+ rc = fg_masked_write(chip,
+ chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT,
+ IACS_INTR_SRC_SLCT, 1);
+ if (rc) {
+ pr_err("failed to configure interrupt source %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int fg_detect_pmic_type(struct fg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR_OR_NULL(pmic_rev_id)) {
+ pr_err("Unable to get pmic_revid rc=%ld\n",
+ PTR_ERR(pmic_rev_id));
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ case PMI8950:
+ case PMI8937:
+ case PMI8996:
+ chip->pmic_subtype = pmic_rev_id->pmic_subtype;
+ chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
+ chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
+ chip->pmic_revision[REVID_ANA_MAJOR] = pmic_rev_id->rev3;
+ chip->pmic_revision[REVID_DIG_MAJOR] = pmic_rev_id->rev4;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INIT_JEITA_DELAY_MS 1000
+
+static void delayed_init_work(struct work_struct *work)
+{
+ u8 reg[2];
+ int rc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ init_work);
+
+ /* hold memory access until initialization finishes */
+ fg_mem_lock(chip);
+
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("failed to hw init rc = %d\n", rc);
+ fg_mem_release(chip);
+ fg_cleanup(chip);
+ return;
+ }
+ /* release memory access before update_sram_data is called */
+ fg_mem_release(chip);
+
+ schedule_delayed_work(
+ &chip->update_jeita_setting,
+ msecs_to_jiffies(INIT_JEITA_DELAY_MS));
+
+ if (chip->last_sram_update_time == 0)
+ update_sram_data_work(&chip->update_sram_data.work);
+
+ if (chip->last_temp_update_time == 0)
+ update_temp_data(&chip->update_temp_work.work);
+
+ if (!chip->use_otp_profile)
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ goto done;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ goto done;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ }
+
+ pr_debug("FG: HW_init success\n");
+
+ return;
+done:
+ fg_cleanup(chip);
+}
+
+static int fg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &(pdev->dev);
+ struct fg_chip *chip;
+ struct device_node *child;
+ unsigned int base;
+ u8 subtype, reg;
+ int rc = 0;
+ struct power_supply_config bms_psy_cfg;
+
+ if (!pdev) {
+ pr_err("no valid spmi pointer\n");
+ return -ENODEV;
+ }
+
+ if (!pdev->dev.of_node) {
+ pr_err("device node missing\n");
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL);
+ if (chip == NULL) {
+ pr_err("Can't allocate fg_chip\n");
+ return -ENOMEM;
+ }
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->pdev = pdev;
+ chip->dev = &(pdev->dev);
+
+ wakeup_source_init(&chip->empty_check_wakeup_source.source,
+ "qpnp_fg_empty_check");
+ wakeup_source_init(&chip->memif_wakeup_source.source,
+ "qpnp_fg_memaccess");
+ wakeup_source_init(&chip->profile_wakeup_source.source,
+ "qpnp_fg_profile");
+ wakeup_source_init(&chip->update_temp_wakeup_source.source,
+ "qpnp_fg_update_temp");
+ wakeup_source_init(&chip->update_sram_wakeup_source.source,
+ "qpnp_fg_update_sram");
+ wakeup_source_init(&chip->resume_soc_wakeup_source.source,
+ "qpnp_fg_set_resume_soc");
+ wakeup_source_init(&chip->gain_comp_wakeup_source.source,
+ "qpnp_fg_gain_comp");
+ wakeup_source_init(&chip->capacity_learning_wakeup_source.source,
+ "qpnp_fg_cap_learning");
+ wakeup_source_init(&chip->esr_extract_wakeup_source.source,
+ "qpnp_fg_esr_extract");
+ mutex_init(&chip->rw_lock);
+ mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->learning_data.learning_lock);
+ mutex_init(&chip->rslow_comp.lock);
+ mutex_init(&chip->sysfs_restart_lock);
+ INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
+ INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
+ INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
+ INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
+ INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
+ INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
+ INIT_WORK(&chip->dump_sram, dump_sram);
+ INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->cycle_count_work, update_cycle_count);
+ INIT_WORK(&chip->battery_age_work, battery_age_work);
+ INIT_WORK(&chip->update_esr_work, update_esr_value);
+ INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work);
+ INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work);
+ INIT_WORK(&chip->init_work, delayed_init_work);
+ INIT_WORK(&chip->charge_full_work, charge_full_work);
+ INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
+ INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
+ INIT_WORK(&chip->esr_extract_config_work, esr_extract_config_work);
+ alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
+ fg_cap_learning_alarm_cb);
+ init_completion(&chip->sram_access_granted);
+ init_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_revoked);
+ init_completion(&chip->batt_id_avail);
+ init_completion(&chip->first_soc_done);
+ dev_set_drvdata(&pdev->dev, chip);
+
+ if (of_get_available_child_count(pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ rc = -ENXIO;
+ goto of_init_fail;
+ }
+
+ for_each_available_child_of_node(pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ child->full_name, rc);
+ goto of_init_fail;
+ }
+
+ if (strcmp("qcom,fg-adc-vbat", child->name) == 0) {
+ chip->vbat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,fg-adc-ibat", child->name) == 0) {
+ chip->ibat_adc_addr = base;
+ continue;
+ } else if (strcmp("qcom,revid-tp-rev", child->name) == 0) {
+ chip->tp_rev_addr = base;
+ continue;
+ }
+
+ rc = fg_read(chip, &subtype,
+ base + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_base = base;
+ break;
+ case FG_MEMIF:
+ chip->mem_base = base;
+ break;
+ case FG_BATT:
+ chip->batt_base = base;
+ break;
+ default:
+ pr_err("Invalid peripheral subtype=0x%x\n", subtype);
+ rc = -EINVAL;
+ }
+ }
+
+ rc = fg_detect_pmic_type(chip);
+ if (rc) {
+ pr_err("Unable to detect PMIC type rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_setup_memif_offset(chip);
+ if (rc) {
+ pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_of_init(chip);
+ if (rc) {
+ pr_err("failed to parse devicetree rc%d\n", rc);
+ goto of_init_fail;
+ }
+
+ if (chip->jeita_hysteresis_support) {
+ rc = fg_init_batt_temp_state(chip);
+ if (rc) {
+ pr_err("failed to get battery status rc%d\n", rc);
+ goto of_init_fail;
+ }
+ }
+
+ /* check if the first estimate is already finished at this time */
+ if (is_first_est_done(chip))
+ complete_all(&chip->first_soc_done);
+
+ reg = 0xFF;
+ rc = fg_write(chip, &reg, INT_EN_CLR(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to clear interrupts %d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_init_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto cancel_work;
+ }
+
+ chip->batt_type = default_batt_type;
+
+ chip->bms_psy_d.name = "bms";
+ chip->bms_psy_d.type = POWER_SUPPLY_TYPE_BMS;
+ chip->bms_psy_d.properties = fg_power_props;
+ chip->bms_psy_d.num_properties = ARRAY_SIZE(fg_power_props);
+ chip->bms_psy_d.get_property = fg_power_get_property;
+ chip->bms_psy_d.set_property = fg_power_set_property;
+ chip->bms_psy_d.external_power_changed = fg_external_power_changed;
+ chip->bms_psy_d.property_is_writeable = fg_property_is_writeable;
+
+ bms_psy_cfg.drv_data = chip;
+ bms_psy_cfg.supplied_to = fg_supplicants;
+ bms_psy_cfg.num_supplicants = ARRAY_SIZE(fg_supplicants);
+ bms_psy_cfg.of_node = NULL;
+ chip->bms_psy = devm_power_supply_register(chip->dev,
+ &chip->bms_psy_d,
+ &bms_psy_cfg);
+ if (IS_ERR(chip->bms_psy)) {
+ pr_err("batt failed to register rc = %ld\n",
+ PTR_ERR(chip->bms_psy));
+ goto of_init_fail;
+ }
+ chip->power_supply_registered = true;
+ /*
+ * Just initialize the batt_psy_name here. Power supply
+ * will be obtained later.
+ */
+ chip->batt_psy_name = "battery";
+
+ if (chip->mem_base) {
+ rc = fg_dfs_create(chip);
+ if (rc < 0) {
+ pr_err("failed to create debugfs rc = %d\n", rc);
+ goto cancel_work;
+ }
+ }
+
+ schedule_work(&chip->init_work);
+
+ pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ chip->pmic_subtype);
+
+ return rc;
+
+cancel_work:
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
+ cancel_work_sync(&chip->esr_extract_config_work);
+of_init_fail:
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ wakeup_source_trash(&chip->esr_extract_wakeup_source.source);
+ return rc;
+}
+
+static void check_and_update_sram_data(struct fg_chip *chip)
+{
+ unsigned long current_time = 0, next_update_time, time_left;
+
+ get_current_time(&current_time);
+
+ next_update_time = chip->last_temp_update_time
+ + (TEMP_PERIOD_UPDATE_MS / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_temp_work, msecs_to_jiffies(time_left * 1000));
+
+ next_update_time = chip->last_sram_update_time
+ + (fg_sram_update_period_ms / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_sram_data, msecs_to_jiffies(time_left * 1000));
+}
+
+static int fg_suspend(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ cancel_delayed_work(&chip->update_temp_work);
+ cancel_delayed_work(&chip->update_sram_data);
+
+ return 0;
+}
+
+static int fg_resume(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ check_and_update_sram_data(chip);
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_fg_pm_ops = {
+ .suspend = fg_suspend,
+ .resume = fg_resume,
+};
+
+static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_fg_sense_type = fg_sense_type;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_sense_type: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_sense_type != 0 && fg_sense_type != 1) {
+ pr_err("Bad value %d\n", fg_sense_type);
+ fg_sense_type = old_fg_sense_type;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_sense_type set to %d\n", fg_sense_type);
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ return rc;
+}
+
+static struct kernel_param_ops fg_sense_type_ops = {
+ .set = fg_sense_type_set,
+ .get = param_get_int,
+};
+
+module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644);
+
+static int fg_restart_set(const char *val, const struct kernel_param *kp)
+{
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+ chip = power_supply_get_drvdata(bms_psy);
+
+ mutex_lock(&chip->sysfs_restart_lock);
+ if (fg_restart != 0) {
+ mutex_unlock(&chip->sysfs_restart_lock);
+ return 0;
+ }
+ fg_restart = 1;
+ mutex_unlock(&chip->sysfs_restart_lock);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge restart initiated from sysfs...\n");
+
+ schedule_work(&chip->sysfs_restart_work);
+ return 0;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_set,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+static struct platform_driver fg_driver = {
+ .driver = {
+ .name = QPNP_FG_DEV_NAME,
+ .of_match_table = fg_match_table,
+ .pm = &qpnp_fg_pm_ops,
+ },
+ .probe = fg_probe,
+ .remove = fg_remove,
+};
+
+static int __init fg_init(void)
+{
+ return platform_driver_register(&fg_driver);
+}
+
+static void __exit fg_exit(void)
+{
+ return platform_driver_unregister(&fg_driver);
+}
+
+module_init(fg_init);
+module_exit(fg_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel Gauge Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index ea205100644d..4beaddff47b3 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -188,6 +188,11 @@ static int __weak_chg_icl_ua = 500000;
module_param_named(
weak_chg_icl_ua, __weak_chg_icl_ua, int, S_IRUSR | S_IWUSR);
+static int __try_sink_enabled = 1;
+module_param_named(
+ try_sink_enabled, __try_sink_enabled, int, 0600
+);
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define OTG_DEFAULT_DEGLITCH_TIME_MS 50
@@ -1658,6 +1663,18 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /*
+ * allow DRP.DFP time to exceed by tPDdebounce time.
+ */
+ rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
+ TYPEC_DRP_DFP_TIME_CFG_BIT,
+ TYPEC_DRP_DFP_TIME_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure DRP.DFP time rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* configure float charger options */
switch (chip->dt.float_option) {
case 1:
@@ -2236,6 +2253,7 @@ static int smb2_probe(struct platform_device *pdev)
chg->dev = &pdev->dev;
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
+ chg->try_sink_enabled = &__try_sink_enabled;
chg->weak_chg_icl_ua = &__weak_chg_icl_ua;
chg->mode = PARALLEL_MASTER;
chg->irq_info = smb2_irqs;
diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c
new file mode 100644
index 000000000000..a2863dcf7389
--- /dev/null
+++ b/drivers/power/supply/qcom/qpnp-smbcharger.c
@@ -0,0 +1,8472 @@
+/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__
+
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/leds.h>
+#include <linux/rtc.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/batterydata-lib.h>
+#include <linux/of_batterydata.h>
+#include <linux/msm_bcl.h>
+#include <linux/ktime.h>
+#include <linux/extcon.h>
+#include <linux/pmic-voter.h>
+
+/* Mask/Bit helpers */
+#define _SMB_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+/* Config registers */
+struct smbchg_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+struct parallel_usb_cfg {
+ struct power_supply *psy;
+ int min_current_thr_ma;
+ int min_9v_current_thr_ma;
+ int allowed_lowering_ma;
+ int current_max_ma;
+ bool avail;
+ struct mutex lock;
+ int initial_aicl_ma;
+ ktime_t last_disabled;
+ bool enabled_once;
+};
+
+struct ilim_entry {
+ int vmin_uv;
+ int vmax_uv;
+ int icl_pt_ma;
+ int icl_lv_ma;
+ int icl_hv_ma;
+};
+
+struct ilim_map {
+ int num;
+ struct ilim_entry *entries;
+};
+
+struct smbchg_version_tables {
+ const int *dc_ilim_ma_table;
+ int dc_ilim_ma_len;
+ const int *usb_ilim_ma_table;
+ int usb_ilim_ma_len;
+ const int *iterm_ma_table;
+ int iterm_ma_len;
+ const int *fcc_comp_table;
+ int fcc_comp_len;
+ const int *aicl_rerun_period_table;
+ int aicl_rerun_period_len;
+ int rchg_thr_mv;
+};
+
+struct smbchg_chip {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ int schg_version;
+
+ /* peripheral register address bases */
+ u16 chgr_base;
+ u16 bat_if_base;
+ u16 usb_chgpth_base;
+ u16 dc_chgpth_base;
+ u16 otg_base;
+ u16 misc_base;
+
+ int fake_battery_soc;
+ u8 revision[4];
+
+ /* configuration parameters */
+ int iterm_ma;
+ int usb_max_current_ma;
+ int typec_current_ma;
+ int dc_max_current_ma;
+ int dc_target_current_ma;
+ int cfg_fastchg_current_ma;
+ int fastchg_current_ma;
+ int vfloat_mv;
+ int fastchg_current_comp;
+ int float_voltage_comp;
+ int resume_delta_mv;
+ int safety_time;
+ int prechg_safety_time;
+ int bmd_pin_src;
+ int jeita_temp_hard_limit;
+ int aicl_rerun_period_s;
+ bool use_vfloat_adjustments;
+ bool iterm_disabled;
+ bool bmd_algo_disabled;
+ bool soft_vfloat_comp_disabled;
+ bool chg_enabled;
+ bool charge_unknown_battery;
+ bool chg_inhibit_en;
+ bool chg_inhibit_source_fg;
+ bool low_volt_dcin;
+ bool cfg_chg_led_support;
+ bool cfg_chg_led_sw_ctrl;
+ bool vbat_above_headroom;
+ bool force_aicl_rerun;
+ bool hvdcp3_supported;
+ bool restricted_charging;
+ bool skip_usb_suspend_for_fake_battery;
+ bool hvdcp_not_supported;
+ bool otg_pinctrl;
+ u8 original_usbin_allowance;
+ struct parallel_usb_cfg parallel;
+ struct delayed_work parallel_en_work;
+ struct dentry *debug_root;
+ struct smbchg_version_tables tables;
+
+ /* wipower params */
+ struct ilim_map wipower_default;
+ struct ilim_map wipower_pt;
+ struct ilim_map wipower_div2;
+ struct qpnp_vadc_chip *vadc_dev;
+ bool wipower_dyn_icl_avail;
+ struct ilim_entry current_ilim;
+ struct mutex wipower_config;
+ bool wipower_configured;
+ struct qpnp_adc_tm_btm_param param;
+
+ /* flash current prediction */
+ int rpara_uohm;
+ int rslow_uohm;
+ int vled_max_uv;
+
+ /* vfloat adjustment */
+ int max_vbat_sample;
+ int n_vbat_samples;
+
+ /* status variables */
+ int wake_reasons;
+ int previous_soc;
+ int usb_online;
+ bool dc_present;
+ bool usb_present;
+ bool batt_present;
+ int otg_retries;
+ ktime_t otg_enable_time;
+ bool aicl_deglitch_short;
+ bool safety_timer_en;
+ bool aicl_complete;
+ bool usb_ov_det;
+ bool otg_pulse_skip_dis;
+ const char *battery_type;
+ enum power_supply_type usb_supply_type;
+ bool very_weak_charger;
+ bool parallel_charger_detected;
+ bool chg_otg_enabled;
+ bool flash_triggered;
+ bool flash_active;
+ bool icl_disabled;
+ u32 wa_flags;
+ int usb_icl_delta;
+ bool typec_dfp;
+ unsigned int usb_current_max;
+ unsigned int usb_health;
+
+ /* jeita and temperature */
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+ unsigned int thermal_levels;
+ unsigned int therm_lvl_sel;
+ unsigned int *thermal_mitigation;
+
+ /* irqs */
+ int batt_hot_irq;
+ int batt_warm_irq;
+ int batt_cool_irq;
+ int batt_cold_irq;
+ int batt_missing_irq;
+ int vbat_low_irq;
+ int chg_hot_irq;
+ int chg_term_irq;
+ int taper_irq;
+ bool taper_irq_enabled;
+ struct mutex taper_irq_lock;
+ int recharge_irq;
+ int fastchg_irq;
+ int wdog_timeout_irq;
+ int power_ok_irq;
+ int dcin_uv_irq;
+ int usbin_uv_irq;
+ int usbin_ov_irq;
+ int src_detect_irq;
+ int otg_fail_irq;
+ int otg_oc_irq;
+ int aicl_done_irq;
+ int usbid_change_irq;
+ int chg_error_irq;
+ bool enable_aicl_wake;
+
+ /* psy */
+ struct power_supply_desc usb_psy_d;
+ struct power_supply *usb_psy;
+ struct power_supply_desc batt_psy_d;
+ struct power_supply *batt_psy;
+ struct power_supply_desc dc_psy_d;
+ struct power_supply *dc_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *typec_psy;
+ int dc_psy_type;
+ const char *bms_psy_name;
+ const char *battery_psy_name;
+
+ struct regulator *dpdm_reg;
+ struct smbchg_regulator otg_vreg;
+ struct smbchg_regulator ext_otg_vreg;
+ struct work_struct usb_set_online_work;
+ struct delayed_work vfloat_adjust_work;
+ struct delayed_work hvdcp_det_work;
+ spinlock_t sec_access_lock;
+ struct mutex therm_lvl_lock;
+ struct mutex usb_set_online_lock;
+ struct mutex pm_lock;
+ /* aicl deglitch workaround */
+ unsigned long first_aicl_seconds;
+ int aicl_irq_count;
+ struct mutex usb_status_lock;
+ bool hvdcp_3_det_ignore_uv;
+ struct completion src_det_lowered;
+ struct completion src_det_raised;
+ struct completion usbin_uv_lowered;
+ struct completion usbin_uv_raised;
+ int pulse_cnt;
+ struct led_classdev led_cdev;
+ bool skip_usb_notification;
+ u32 vchg_adc_channel;
+ struct qpnp_vadc_chip *vchg_vadc_dev;
+
+ /* voters */
+ struct votable *fcc_votable;
+ struct votable *usb_icl_votable;
+ struct votable *dc_icl_votable;
+ struct votable *usb_suspend_votable;
+ struct votable *dc_suspend_votable;
+ struct votable *battchg_suspend_votable;
+ struct votable *hw_aicl_rerun_disable_votable;
+ struct votable *hw_aicl_rerun_enable_indirect_votable;
+ struct votable *aicl_deglitch_short_votable;
+
+ /* extcon for VBUS / ID notification to USB */
+ struct extcon_dev *extcon;
+};
+
+enum qpnp_schg {
+ QPNP_SCHG,
+ QPNP_SCHG_LITE,
+};
+
+static char *version_str[] = {
+ [QPNP_SCHG] = "SCHG",
+ [QPNP_SCHG_LITE] = "SCHG_LITE",
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum smbchg_wa {
+ SMBCHG_AICL_DEGLITCH_WA = BIT(0),
+ SMBCHG_HVDCP_9V_EN_WA = BIT(1),
+ SMBCHG_USB100_WA = BIT(2),
+ SMBCHG_BATT_OV_WA = BIT(3),
+ SMBCHG_CC_ESR_WA = BIT(4),
+ SMBCHG_FLASH_ICL_DISABLE_WA = BIT(5),
+ SMBCHG_RESTART_WA = BIT(6),
+ SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA = BIT(7),
+};
+
+enum print_reason {
+ PR_REGISTER = BIT(0),
+ PR_INTERRUPT = BIT(1),
+ PR_STATUS = BIT(2),
+ PR_DUMP = BIT(3),
+ PR_PM = BIT(4),
+ PR_MISC = BIT(5),
+ PR_WIPOWER = BIT(6),
+ PR_TYPEC = BIT(7),
+};
+
+enum wake_reason {
+ PM_PARALLEL_CHECK = BIT(0),
+ PM_REASON_VFLOAT_ADJUST = BIT(1),
+ PM_ESR_PULSE = BIT(2),
+ PM_PARALLEL_TAPER = BIT(3),
+ PM_DETECT_HVDCP = BIT(4),
+};
+
+/* fcc_voters */
+#define ESR_PULSE_FCC_VOTER "ESR_PULSE_FCC_VOTER"
+#define BATT_TYPE_FCC_VOTER "BATT_TYPE_FCC_VOTER"
+#define RESTRICTED_CHG_FCC_VOTER "RESTRICTED_CHG_FCC_VOTER"
+
+/* ICL VOTERS */
+#define PSY_ICL_VOTER "PSY_ICL_VOTER"
+#define THERMAL_ICL_VOTER "THERMAL_ICL_VOTER"
+#define HVDCP_ICL_VOTER "HVDCP_ICL_VOTER"
+#define USER_ICL_VOTER "USER_ICL_VOTER"
+#define WEAK_CHARGER_ICL_VOTER "WEAK_CHARGER_ICL_VOTER"
+#define SW_AICL_ICL_VOTER "SW_AICL_ICL_VOTER"
+#define CHG_SUSPEND_WORKAROUND_ICL_VOTER "CHG_SUSPEND_WORKAROUND_ICL_VOTER"
+
+/* USB SUSPEND VOTERS */
+/* userspace has suspended charging altogether */
+#define USER_EN_VOTER "USER_EN_VOTER"
+/*
+ * this specific path has been suspended through the power supply
+ * framework
+ */
+#define POWER_SUPPLY_EN_VOTER "POWER_SUPPLY_EN_VOTER"
+/*
+ * the usb driver has suspended this path by setting a current limit
+ * of < 2MA
+ */
+#define USB_EN_VOTER "USB_EN_VOTER"
+/*
+ * the thermal daemon can suspend a charge path when the system
+ * temperature levels rise
+ */
+#define THERMAL_EN_VOTER "THERMAL_EN_VOTER"
+/*
+ * an external OTG supply is being used, suspend charge path so the
+ * charger does not accidentally try to charge from the external supply.
+ */
+#define OTG_EN_VOTER "OTG_EN_VOTER"
+/*
+ * the charger is very weak, do not draw any current from it
+ */
+#define WEAK_CHARGER_EN_VOTER "WEAK_CHARGER_EN_VOTER"
+/*
+ * fake battery voter, if battery id-resistance around 7.5 Kohm
+ */
+#define FAKE_BATTERY_EN_VOTER "FAKE_BATTERY_EN_VOTER"
+
+/* battchg_enable_voters */
+ /* userspace has disabled battery charging */
+#define BATTCHG_USER_EN_VOTER "BATTCHG_USER_EN_VOTER"
+ /* battery charging disabled while loading battery profiles */
+#define BATTCHG_UNKNOWN_BATTERY_EN_VOTER "BATTCHG_UNKNOWN_BATTERY_EN_VOTER"
+
+/* hw_aicl_rerun_enable_indirect_voters */
+/* enabled via device tree */
+#define DEFAULT_CONFIG_HW_AICL_VOTER "DEFAULT_CONFIG_HW_AICL_VOTER"
+/* Varb workaround voter */
+#define VARB_WORKAROUND_VOTER "VARB_WORKAROUND_VOTER"
+/* SHUTDOWN workaround voter */
+#define SHUTDOWN_WORKAROUND_VOTER "SHUTDOWN_WORKAROUND_VOTER"
+
+/* hw_aicl_rerun_disable_voters */
+/* the results from enabling clients */
+#define HW_AICL_RERUN_ENABLE_INDIRECT_VOTER \
+ "HW_AICL_RERUN_ENABLE_INDIRECT_VOTER"
+/* Weak charger voter */
+#define WEAK_CHARGER_HW_AICL_VOTER "WEAK_CHARGER_HW_AICL_VOTER"
+
+/* aicl_short_deglitch_voters */
+/* Varb workaround voter */
+#define VARB_WORKAROUND_SHORT_DEGLITCH_VOTER \
+ "VARB_WRKARND_SHORT_DEGLITCH_VOTER"
+/* QC 2.0 */
+#define HVDCP_SHORT_DEGLITCH_VOTER "HVDCP_SHORT_DEGLITCH_VOTER"
+
+static const unsigned int smbchg_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
+static int smbchg_debug_mask;
+module_param_named(
+ debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_parallel_en = 1;
+module_param_named(
+ parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_fcc_percent = 50;
+module_param_named(
+ main_chg_fcc_percent, smbchg_main_chg_fcc_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_icl_percent = 60;
+module_param_named(
+ main_chg_icl_percent, smbchg_main_chg_icl_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp_icl_ma = 1800;
+module_param_named(
+ default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp3_icl_ma = 3000;
+module_param_named(
+ default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_dcp_icl_ma = 1800;
+module_param_named(
+ default_dcp_icl_ma, smbchg_default_dcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dyn_icl_en;
+module_param_named(
+ dynamic_icl_wipower_en, wipower_dyn_icl_en,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS;
+module_param_named(
+ wipower_dcin_interval, wipower_dcin_interval,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define WIPOWER_DEFAULT_HYSTERISIS_UV 250000
+static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV;
+module_param_named(
+ wipower_dcin_hyst_uv, wipower_dcin_hyst_uv,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define pr_smb(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define pr_smb_rt(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info_ratelimited(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int smbchg_read(struct smbchg_chip *chip, u8 *val,
+ u16 addr, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = chip->pdev;
+
+ if (addr == 0) {
+ dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid, rc);
+ return -EINVAL;
+ }
+
+ rc = regmap_bulk_read(chip->regmap, addr, val, count);
+ if (rc) {
+ dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, to_spmi_device(pdev->dev.parent)->usid,
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * Do not use this function for register writes if possible. Instead use the
+ * smbchg_masked_write function.
+ *
+ * The sec_access_lock must be held for all register writes and this function
+ * does not do that. If this function is used, please hold the spinlock or
+ * random secure access writes may fail.
+ */
+static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(chip->regmap, base, mask, val);
+ if (rc) {
+ dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n",
+ base, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * This function holds a spin lock to ensure secure access register writes goes
+ * through. If the secure access unlock register is armed, any old register
+ * write can unarm the secure access unlock, causing the next write to fail.
+ *
+ * Note: do not use this for sec_access registers. Instead use the function
+ * below: smbchg_sec_masked_write
+ */
+static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Unlocks sec access and writes to the register specified.
+ *
+ * This function holds a spin lock to exclude other register writes while
+ * the two writes are taking place.
+ */
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+ u16 peripheral_base = base & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+
+ rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET,
+ SEC_ACCESS_VALUE, SEC_ACCESS_VALUE);
+ if (rc) {
+ dev_err(chip->dev, "Unable to unlock sec_access: %d", rc);
+ goto out;
+ }
+
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+ return rc;
+}
+
+static void smbchg_stay_awake(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons | reason;
+ if (reasons != 0 && chip->wake_reasons == 0) {
+ pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_stay_awake(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+}
+
+static void smbchg_relax(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons & (~reason);
+ if (reasons == 0 && chip->wake_reasons != 0) {
+ pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_relax(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+};
+
+enum pwr_path_type {
+ UNKNOWN = 0,
+ PWR_PATH_BATTERY = 1,
+ PWR_PATH_USB = 2,
+ PWR_PATH_DC = 3,
+};
+
+#define PWR_PATH 0x08
+#define PWR_PATH_MASK 0x03
+static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + PWR_PATH, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc);
+ return PWR_PATH_BATTERY;
+ }
+
+ return reg & PWR_PATH_MASK;
+}
+
+#define RID_STS 0xB
+#define RID_MASK 0xF
+#define IDEV_STS 0x8
+#define RT_STS 0x10
+#define USBID_MSB 0xE
+#define USBIN_UV_BIT BIT(0)
+#define USBIN_OV_BIT BIT(1)
+#define USBIN_SRC_DET_BIT BIT(2)
+#define FMB_STS_MASK SMB_MASK(3, 0)
+#define USBID_GND_THRESHOLD 0x495
+static bool is_otg_present_schg(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+ u8 usbid_reg[2];
+ u16 usbid_val;
+ /*
+ * After the falling edge of the usbid change interrupt occurs,
+ * there may still be some time before the ADC conversion for USB RID
+ * finishes in the fuel gauge. In the worst case, this could be up to
+ * 15 ms.
+ *
+ * Sleep for 20 ms (minimum msleep time) to wait for the conversion to
+ * finish and the USB RID status register to be updated before trying
+ * to detect OTG insertions.
+ */
+
+ msleep(20);
+
+ /*
+ * There is a problem with USBID conversions on PMI8994 revisions
+ * 2.0.0. As a workaround, check that the cable is not
+ * detected as factory test before enabling OTG.
+ */
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & FMB_STS_MASK) != 0) {
+ pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg);
+ return false;
+ }
+
+ rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc);
+ return false;
+ }
+ usbid_val = (usbid_reg[0] << 8) | usbid_reg[1];
+
+ if (usbid_val > USBID_GND_THRESHOLD) {
+ pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n",
+ usbid_val);
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RID_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read usb rid status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "RID_STS = %02x\n", reg);
+
+ return (reg & RID_MASK) == 0;
+}
+
+#define RID_GND_DET_STS BIT(2)
+static bool is_otg_present_schg_lite(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->otg_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read otg RT status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & RID_GND_DET_STS);
+}
+
+static bool is_otg_present(struct smbchg_chip *chip)
+{
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ return is_otg_present_schg_lite(chip);
+
+ return is_otg_present_schg(chip);
+}
+
+#define USBIN_9V BIT(5)
+#define USBIN_UNREG BIT(4)
+#define USBIN_LV BIT(3)
+#define DCIN_9V BIT(2)
+#define DCIN_UNREG BIT(1)
+#define DCIN_LV BIT(0)
+#define INPUT_STS 0x0D
+#define DCIN_UV_BIT BIT(0)
+#define DCIN_OV_BIT BIT(1)
+static bool is_dc_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->dc_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT))
+ return false;
+
+ return true;
+}
+
+static bool is_usb_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ if (!(reg & USBIN_SRC_DET_BIT) || (reg & USBIN_OV_BIT))
+ return false;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV));
+}
+
+static char *usb_type_str[] = {
+ "SDP", /* bit 0 */
+ "OTHER", /* bit 1 */
+ "DCP", /* bit 2 */
+ "CDP", /* bit 3 */
+ "NONE", /* bit 4 error case */
+};
+
+#define N_TYPE_BITS 4
+#define TYPE_BITS_OFFSET 4
+
+static int get_type(u8 type_reg)
+{
+ unsigned long type = type_reg;
+ type >>= TYPE_BITS_OFFSET;
+ return find_first_bit(&type, N_TYPE_BITS);
+}
+
+/* helper to return the string of USB type */
+static inline char *get_usb_type_name(int type)
+{
+ return usb_type_str[type];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+ POWER_SUPPLY_TYPE_USB, /* bit 0 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 1 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */
+ POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 4 error case, report DCP */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static inline enum power_supply_type get_usb_supply_type(int type)
+{
+ return usb_type_enum[type];
+}
+
+static bool is_src_detect_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_SRC_DET_BIT;
+}
+
+static void read_usb_type(struct smbchg_chip *chip, char **usb_type_name,
+ enum power_supply_type *usb_supply_type)
+{
+ int rc, type;
+ u8 reg;
+
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low\n");
+ *usb_type_name = "Absent";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ *usb_type_name = "Other";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ return;
+ }
+ type = get_type(reg);
+ *usb_type_name = get_usb_type_name(type);
+ *usb_supply_type = get_usb_supply_type(type);
+}
+
+#define CHGR_STS 0x0E
+#define BATT_LESS_THAN_2V BIT(4)
+#define CHG_HOLD_OFF_BIT BIT(3)
+#define CHG_TYPE_MASK SMB_MASK(2, 1)
+#define CHG_TYPE_SHIFT 1
+#define BATT_NOT_CHG_VAL 0x0
+#define BATT_PRE_CHG_VAL 0x1
+#define BATT_FAST_CHG_VAL 0x2
+#define BATT_TAPER_CHG_VAL 0x3
+#define CHG_INHIBIT_BIT BIT(1)
+#define BAT_TCC_REACHED_BIT BIT(7)
+static int get_prop_batt_status(struct smbchg_chip *chip)
+{
+ int rc, status = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg = 0, chg_type;
+ bool charger_present, chg_inhibit;
+
+ charger_present = is_usb_present(chip) | is_dc_present(chip) |
+ chip->hvdcp_3_det_ignore_uv;
+ if (!charger_present)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & BAT_TCC_REACHED_BIT)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ chg_inhibit = reg & CHG_INHIBIT_BIT;
+ if (chg_inhibit)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & CHG_HOLD_OFF_BIT) {
+ /*
+ * when chg hold off happens the battery is
+ * not charging
+ */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ goto out;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+ if (chg_type == BATT_NOT_CHG_VAL && !chip->hvdcp_3_det_ignore_uv)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+ pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg);
+ return status;
+}
+
+#define BAT_PRES_STATUS 0x08
+#define BAT_PRES_BIT BIT(7)
+static int get_prop_batt_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + BAT_PRES_STATUS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ return !!(reg & BAT_PRES_BIT);
+}
+
+static int get_prop_charge_type(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, chg_type;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+ if (chg_type == BATT_NOT_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (chg_type == BATT_TAPER_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ else if (chg_type == BATT_FAST_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (chg_type == BATT_PRE_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int set_property_on_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ ret.intval = val;
+ rc = power_supply_set_property(chip->bms_psy, prop, &ret);
+ if (rc)
+ pr_smb(PR_STATUS,
+ "bms psy does not allow updating prop %d rc = %d\n",
+ prop, rc);
+
+ return rc;
+}
+
+static int get_property_from_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int *val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy, prop, &ret);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy doesn't support reading prop %d rc = %d\n",
+ prop, rc);
+ return rc;
+ }
+
+ *val = ret.intval;
+ return rc;
+}
+
+#define DEFAULT_BATT_CAPACITY 50
+static int get_prop_batt_capacity(struct smbchg_chip *chip)
+{
+ int capacity, rc;
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CAPACITY, &capacity);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get capacity rc = %d\n", rc);
+ capacity = DEFAULT_BATT_CAPACITY;
+ }
+ return capacity;
+}
+
+#define DEFAULT_BATT_TEMP 200
+static int get_prop_batt_temp(struct smbchg_chip *chip)
+{
+ int temp, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_TEMP, &temp);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get temperature rc = %d\n", rc);
+ temp = DEFAULT_BATT_TEMP;
+ }
+ return temp;
+}
+
+#define DEFAULT_BATT_CURRENT_NOW 0
+static int get_prop_batt_current_now(struct smbchg_chip *chip)
+{
+ int ua, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, &ua);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get current rc = %d\n", rc);
+ ua = DEFAULT_BATT_CURRENT_NOW;
+ }
+ return ua;
+}
+
+#define DEFAULT_BATT_VOLTAGE_NOW 0
+static int get_prop_batt_voltage_now(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_NOW, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_NOW;
+ }
+ return uv;
+}
+
+#define DEFAULT_BATT_VOLTAGE_MAX_DESIGN 4200000
+static int get_prop_batt_voltage_max_design(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_MAX_DESIGN;
+ }
+ return uv;
+}
+
+static int get_prop_batt_health(struct smbchg_chip *chip)
+{
+ if (chip->batt_hot)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ return POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void get_property_from_typec(struct smbchg_chip *chip,
+ enum power_supply_property property,
+ union power_supply_propval *prop)
+{
+ int rc;
+
+ rc = power_supply_get_property(chip->typec_psy,
+ property, prop);
+ if (rc)
+ pr_smb(PR_TYPEC,
+ "typec psy doesn't support reading prop %d rc = %d\n",
+ property, rc);
+}
+
+static void update_typec_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+ union power_supply_propval capability = {0, };
+
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ if (type.intval != POWER_SUPPLY_TYPE_UNKNOWN) {
+ get_property_from_typec(chip,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ &capability);
+ chip->typec_current_ma = capability.intval;
+ pr_smb(PR_TYPEC, "SMB Type-C mode = %d, current=%d\n",
+ type.intval, capability.intval);
+ } else {
+ pr_smb(PR_TYPEC,
+ "typec detection not completed continuing with USB update\n");
+ }
+}
+
+/*
+ * finds the index of the closest value in the array. If there are two that
+ * are equally close, the lower index will be returned
+ */
+static int find_closest_in_array(const int *arr, int len, int val)
+{
+ int i, closest = 0;
+
+ if (len == 0)
+ return closest;
+ for (i = 0; i < len; i++)
+ if (abs(val - arr[i]) < abs(val - arr[closest]))
+ closest = i;
+
+ return closest;
+}
+
+/* finds the index of the closest smaller value in the array. */
+static int find_smaller_in_array(const int *table, int val, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (val >= table[i])
+ break;
+ }
+
+ return i;
+}
+
+static const int iterm_ma_table_8994[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 500,
+ 600
+};
+
+static const int iterm_ma_table_8996[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 400,
+ 500
+};
+
+static const int usb_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static const int usb_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+ 2500,
+ 2600,
+ 2700,
+ 2800,
+ 2900,
+ 3000
+};
+
+static int dc_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+};
+
+static int dc_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+};
+
+static const int fcc_comp_table_8994[] = {
+ 250,
+ 700,
+ 900,
+ 1200,
+};
+
+static const int fcc_comp_table_8996[] = {
+ 250,
+ 1100,
+ 1200,
+ 1500,
+};
+
+static const int aicl_rerun_period[] = {
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static const int aicl_rerun_period_schg_lite[] = {
+ 3, /* 2.8s */
+ 6, /* 5.6s */
+ 11, /* 11.3s */
+ 23, /* 22.5s */
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static void use_pmi8994_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8994;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8994);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8994;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8994);
+ chip->tables.iterm_ma_table = iterm_ma_table_8994;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8994);
+ chip->tables.fcc_comp_table = fcc_comp_table_8994;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8994);
+ chip->tables.rchg_thr_mv = 200;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+static void use_pmi8996_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8996;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8996);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8996;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8996);
+ chip->tables.iterm_ma_table = iterm_ma_table_8996;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8996);
+ chip->tables.fcc_comp_table = fcc_comp_table_8996;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8996);
+ chip->tables.rchg_thr_mv = 150;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+#define CMD_CHG_REG 0x42
+#define EN_BAT_CHG_BIT BIT(1)
+static int smbchg_charging_en(struct smbchg_chip *chip, bool en)
+{
+ /* The en bit is configured active low */
+ return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT);
+}
+
+#define CMD_IL 0x40
+#define USBIN_SUSPEND_BIT BIT(4)
+#define CURRENT_100_MA 100
+#define CURRENT_150_MA 150
+#define CURRENT_500_MA 500
+#define CURRENT_900_MA 900
+#define CURRENT_1500_MA 1500
+#define SUSPEND_CURRENT_MA 2
+#define ICL_OVERRIDE_BIT BIT(2)
+static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define DCIN_SUSPEND_BIT BIT(3)
+static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc = 0;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define IL_CFG 0xF2
+#define DCIN_INPUT_MASK SMB_MASK(4, 0)
+static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma)
+{
+ int i;
+ u8 dc_cur_val;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ current_ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %dma current_table\n",
+ current_ma);
+ return -EINVAL;
+ }
+
+ chip->dc_max_current_ma = chip->tables.dc_ilim_ma_table[i];
+ dc_cur_val = i & DCIN_INPUT_MASK;
+
+ pr_smb(PR_STATUS, "dc current set to %d mA\n",
+ chip->dc_max_current_ma);
+ return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG,
+ DCIN_INPUT_MASK, dc_cur_val);
+}
+
+#define AICL_WL_SEL_CFG 0xF5
+#define AICL_WL_SEL_MASK SMB_MASK(1, 0)
+#define AICL_WL_SEL_SCHG_LITE_MASK SMB_MASK(2, 0)
+static int smbchg_set_aicl_rerun_period_s(struct smbchg_chip *chip,
+ int period_s)
+{
+ int i;
+ u8 reg, mask;
+
+ i = find_smaller_in_array(chip->tables.aicl_rerun_period_table,
+ period_s, chip->tables.aicl_rerun_period_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %ds in aicl rerun period\n",
+ period_s);
+ return -EINVAL;
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ mask = AICL_WL_SEL_SCHG_LITE_MASK;
+ else
+ mask = AICL_WL_SEL_MASK;
+
+ reg = i & mask;
+
+ pr_smb(PR_STATUS, "aicl rerun period set to %ds\n",
+ chip->tables.aicl_rerun_period_table[i]);
+ return smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + AICL_WL_SEL_CFG,
+ mask, reg);
+}
+
+static struct power_supply *get_parallel_psy(struct smbchg_chip *chip)
+{
+ if (!chip->parallel.avail)
+ return NULL;
+ if (chip->parallel.psy)
+ return chip->parallel.psy;
+ chip->parallel.psy = power_supply_get_by_name("usb-parallel");
+ if (!chip->parallel.psy)
+ pr_smb(PR_STATUS, "parallel charger not found\n");
+ return chip->parallel.psy;
+}
+
+static void smbchg_usb_update_online_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ usb_set_online_work);
+ bool user_enabled = !get_client_vote(chip->usb_suspend_votable,
+ USER_EN_VOTER);
+ int online;
+
+ online = user_enabled && chip->usb_present && !chip->very_weak_charger;
+
+ mutex_lock(&chip->usb_set_online_lock);
+ if (chip->usb_online != online) {
+ pr_smb(PR_MISC, "setting usb psy online = %d\n", online);
+ chip->usb_online = online;
+ power_supply_changed(chip->usb_psy);
+ }
+ mutex_unlock(&chip->usb_set_online_lock);
+}
+
+#define CHGPTH_CFG 0xF4
+#define CFG_USB_2_3_SEL_BIT BIT(7)
+#define CFG_USB_2 0
+#define CFG_USB_3 BIT(7)
+#define USBIN_INPUT_MASK SMB_MASK(4, 0)
+#define USBIN_MODE_CHG_BIT BIT(0)
+#define USBIN_LIMITED_MODE 0
+#define USBIN_HC_MODE BIT(0)
+#define USB51_MODE_BIT BIT(1)
+#define USB51_100MA 0
+#define USB51_500MA BIT(1)
+static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 usb_cur_val;
+
+ if (current_ma == CURRENT_100_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CFG_USB_2 rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT | ICL_OVERRIDE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA | ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set ICL_OVERRIDE rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS,
+ "Forcing 100mA current limit\n");
+ chip->usb_max_current_ma = CURRENT_100_MA;
+ return rc;
+ }
+
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_150_MA);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_150_MA, rc);
+ else
+ chip->usb_max_current_ma = 150;
+ return rc;
+ }
+
+ usb_cur_val = i & USBIN_INPUT_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG,
+ USBIN_INPUT_MASK, usb_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT, USBIN_HC_MODE);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+ chip->usb_max_current_ma = chip->tables.usb_ilim_ma_table[i];
+ return rc;
+}
+
+/* if APSD results are used
+ * if SDP is detected it will look at 500mA setting
+ * if set it will draw 500mA
+ * if unset it will draw 100mA
+ * if CDP/DCP it will look at 0x0C setting
+ * i.e. values in 0x41[1, 0] does not matter
+ */
+static int smbchg_set_usb_current_max(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ /*
+ * if the battery is not present, do not allow the usb ICL to lower in
+ * order to avoid browning out the device during a hotswap.
+ */
+ if (!chip->batt_present && current_ma < chip->usb_max_current_ma) {
+ pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n",
+ current_ma);
+ return 0;
+ }
+ pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma);
+
+ if (current_ma <= SUSPEND_CURRENT_MA) {
+ /* suspend the usb if current <= 2mA */
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, true, 0);
+ chip->usb_max_current_ma = 0;
+ goto out;
+ } else {
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, false, 0);
+ }
+
+ switch (chip->usb_supply_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ if ((current_ma < CURRENT_150_MA) &&
+ (chip->wa_flags & SMBCHG_USB100_WA))
+ current_ma = CURRENT_150_MA;
+
+ if (current_ma < CURRENT_150_MA) {
+ /* force 100mA */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 100;
+ }
+ /* specific current values */
+ if (current_ma == CURRENT_150_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 150;
+ }
+ if (current_ma == CURRENT_500_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 500;
+ }
+ if (current_ma == CURRENT_900_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 900;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ if (current_ma < CURRENT_1500_MA) {
+ /* use override for CDP */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+ }
+ /* fall through */
+ default:
+ rc = smbchg_set_high_usb_chg_current(chip, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't set %dmA rc = %d\n", current_ma, rc);
+ break;
+ }
+
+out:
+ pr_smb(PR_STATUS, "usb type = %d current set to %d mA\n",
+ chip->usb_supply_type, chip->usb_max_current_ma);
+ return rc;
+}
+
+#define USBIN_HVDCP_STS 0x0C
+#define USBIN_HVDCP_SEL_BIT BIT(4)
+#define USBIN_HVDCP_SEL_9V_BIT BIT(1)
+#define SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT BIT(2)
+#define SCHG_LITE_USBIN_HVDCP_SEL_BIT BIT(0)
+static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel, hvdcp_sel_9v;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return 0;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT;
+ } else {
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = USBIN_HVDCP_SEL_9V_BIT;
+ }
+
+ if ((reg & hvdcp_sel) && (reg & hvdcp_sel_9v))
+ return chip->parallel.min_9v_current_thr_ma;
+ return chip->parallel.min_current_thr_ma;
+}
+
+static bool is_hvdcp_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ pr_err("Couldn't read hvdcp status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP_STS = 0x%02x\n", reg);
+ /*
+ * If a valid HVDCP is detected, notify it to the usb_psy only
+ * if USB is still present.
+ */
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ else
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+
+ if ((reg & hvdcp_sel) && is_usb_present(chip))
+ return true;
+
+ return false;
+}
+
+#define FCC_CFG 0xF2
+#define FCC_500MA_VAL 0x4
+#define FCC_MASK SMB_MASK(4, 0)
+static int smbchg_set_fastchg_current_raw(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 cur_val;
+
+ /* the fcc enumerations are the same as the usb currents */
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_500_MA);
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK,
+ FCC_500MA_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_500_MA, rc);
+ else
+ chip->fastchg_current_ma = 500;
+ return rc;
+ }
+
+ if (chip->tables.usb_ilim_ma_table[i] == chip->fastchg_current_ma) {
+ pr_smb(PR_STATUS, "skipping fastchg current request: %d\n",
+ chip->fastchg_current_ma);
+ return 0;
+ }
+
+ cur_val = i & FCC_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK, cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "fastcharge current requested %d, set to %d\n",
+ current_ma, chip->tables.usb_ilim_ma_table[cur_val]);
+
+ chip->fastchg_current_ma = chip->tables.usb_ilim_ma_table[cur_val];
+ return rc;
+}
+
+#define ICL_STS_1_REG 0x7
+#define ICL_STS_2_REG 0x9
+#define ICL_STS_MASK 0x1F
+#define AICL_SUSP_BIT BIT(6)
+#define AICL_STS_BIT BIT(5)
+#define USBIN_SUSPEND_STS_BIT BIT(3)
+#define USBIN_ACTIVE_PWR_SRC_BIT BIT(1)
+#define DCIN_ACTIVE_PWR_SRC_BIT BIT(0)
+#define PARALLEL_REENABLE_TIMER_MS 1000
+#define PARALLEL_CHG_THRESHOLD_CURRENT 1800
+static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc);
+ return false;
+ }
+
+ return !(reg & USBIN_SUSPEND_STS_BIT)
+ && (reg & USBIN_ACTIVE_PWR_SRC_BIT);
+}
+
+static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return 0;
+
+ pval.intval = en;
+ return power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+}
+
+#define ESR_PULSE_CURRENT_DELTA_MA 200
+static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en)
+{
+ int rc, fg_current_now, icl_ma;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW,
+ &fg_current_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA,
+ fg_current_now - ESR_PULSE_CURRENT_DELTA_MA);
+ rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma);
+ if (rc < 0) {
+ pr_err("Couldn't Vote FCC en = %d rc = %d\n", en, rc);
+ return rc;
+ }
+ rc = smbchg_parallel_usb_charging_en(chip, !en);
+ return rc;
+}
+
+#define USB_AICL_CFG 0xF3
+#define AICL_EN_BIT BIT(2)
+static void smbchg_rerun_aicl(struct smbchg_chip *chip)
+{
+ pr_smb(PR_STATUS, "Rerunning AICL...\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ /* Add a delay so that AICL successfully clears */
+ msleep(50);
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+}
+
+static void taper_irq_en(struct smbchg_chip *chip, bool en)
+{
+ mutex_lock(&chip->taper_irq_lock);
+ if (en != chip->taper_irq_enabled) {
+ if (en) {
+ enable_irq(chip->taper_irq);
+ enable_irq_wake(chip->taper_irq);
+ } else {
+ disable_irq_wake(chip->taper_irq);
+ disable_irq_nosync(chip->taper_irq);
+ }
+ chip->taper_irq_enabled = en;
+ }
+ mutex_unlock(&chip->taper_irq_lock);
+}
+
+static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc);
+ return 0;
+ }
+ if (reg & AICL_SUSP_BIT) {
+ pr_warn("AICL suspended: %02x\n", reg);
+ return 0;
+ }
+ reg &= ICL_STS_MASK;
+ if (reg >= chip->tables.usb_ilim_ma_len) {
+ pr_warn("invalid AICL value: %02x\n", reg);
+ return 0;
+ }
+ return chip->tables.usb_ilim_ma_table[reg];
+}
+
+static void smbchg_parallel_usb_disable(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int fcc_ma, usb_icl_ma;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+ pr_smb(PR_STATUS, "disabling parallel charger\n");
+ chip->parallel.last_disabled = ktime_get_boottime();
+ taper_irq_en(chip, false);
+ chip->parallel.initial_aicl_ma = 0;
+ chip->parallel.current_max_ma = 0;
+ pval.intval = SUSPEND_CURRENT_MA * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ pval.intval = false;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (fcc_ma < 0)
+ pr_err("no voters for fcc, skip it\n");
+ else
+ smbchg_set_fastchg_current_raw(chip, fcc_ma);
+
+ if (usb_icl_ma < 0)
+ pr_err("no voters for usb_icl, skip it\n");
+ else
+ smbchg_set_usb_current_max(chip, usb_icl_ma);
+
+ smbchg_rerun_aicl(chip);
+}
+
+#define PARALLEL_TAPER_MAX_TRIES 3
+#define PARALLEL_FCC_PERCENT_REDUCTION 75
+#define MINIMUM_PARALLEL_FCC_MA 500
+#define CHG_ERROR_BIT BIT(0)
+#define BAT_TAPER_MODE_BIT BIT(6)
+static void smbchg_parallel_usb_taper(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int parallel_fcc_ma, tries = 0;
+ u8 reg = 0;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_TAPER);
+try_again:
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ tries += 1;
+ parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n",
+ tries, parallel_fcc_ma);
+ if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA
+ || tries > PARALLEL_TAPER_MAX_TRIES) {
+ smbchg_parallel_usb_disable(chip);
+ goto done;
+ }
+ pval.intval = ((parallel_fcc_ma
+ * PARALLEL_FCC_PERCENT_REDUCTION) / 100);
+ pr_smb(PR_STATUS, "reducing FCC of parallel charger to %d\n",
+ pval.intval);
+ /* Change it to uA */
+ pval.intval *= 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /*
+ * sleep here for 100 ms in order to make sure the charger has a chance
+ * to go back into constant current charging
+ */
+ mutex_unlock(&chip->parallel.lock);
+ msleep(100);
+
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (reg & BAT_TAPER_MODE_BIT) {
+ mutex_unlock(&chip->parallel.lock);
+ goto try_again;
+ }
+ taper_irq_en(chip, true);
+done:
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_TAPER);
+}
+
+static void smbchg_parallel_usb_enable(struct smbchg_chip *chip,
+ int total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int new_parallel_cl_ma, set_parallel_cl_ma, new_pmi_cl_ma, rc;
+ int current_table_index, target_icl_ma;
+ int fcc_ma, main_fastchg_current_ma;
+ int target_parallel_fcc_ma, supplied_parallel_fcc_ma;
+ int parallel_chg_fcc_percent;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ pr_smb(PR_STATUS, "Attempting to enable parallel charger\n");
+ pval.intval = chip->vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set Vflt on parallel psy rc: %d\n", rc);
+ return;
+ }
+ /* Set USB ICL */
+ target_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ if (target_icl_ma < 0) {
+ pr_err("no voters for usb_icl, skip it\n");
+ return;
+ }
+ new_parallel_cl_ma = total_current_ma
+ * (100 - smbchg_main_chg_icl_percent) / 100;
+ taper_irq_en(chip, true);
+
+ pval.intval = true;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+
+ pval.intval = new_parallel_cl_ma * 1000;
+ power_supply_set_property(parallel_psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+ &pval);
+
+ /* read back the real amount of current we are getting */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ set_parallel_cl_ma = pval.intval / 1000;
+ chip->parallel.current_max_ma = new_parallel_cl_ma;
+ pr_smb(PR_MISC, "Requested ICL = %d from parallel, got %d\n",
+ new_parallel_cl_ma, set_parallel_cl_ma);
+ new_pmi_cl_ma = max(0, target_icl_ma - set_parallel_cl_ma);
+ pr_smb(PR_STATUS, "New Total USB current = %d[%d, %d]\n",
+ total_current_ma, new_pmi_cl_ma,
+ set_parallel_cl_ma);
+ smbchg_set_usb_current_max(chip, new_pmi_cl_ma);
+
+ /* begin splitting the fast charge current */
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc, skip it\n");
+ return;
+ }
+ parallel_chg_fcc_percent = 100 - smbchg_main_chg_fcc_percent;
+ target_parallel_fcc_ma = (fcc_ma * parallel_chg_fcc_percent) / 100;
+ pval.intval = target_parallel_fcc_ma * 1000;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /* check how much actual current is supplied by the parallel charger */
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ supplied_parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_MISC, "Requested FCC = %d from parallel, got %d\n",
+ target_parallel_fcc_ma, supplied_parallel_fcc_ma);
+
+ /* then for the main charger, use the left over FCC */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ fcc_ma - supplied_parallel_fcc_ma,
+ chip->tables.usb_ilim_ma_len);
+ main_fastchg_current_ma =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ smbchg_set_fastchg_current_raw(chip, main_fastchg_current_ma);
+ pr_smb(PR_STATUS, "FCC = %d[%d, %d]\n", fcc_ma, main_fastchg_current_ma,
+ supplied_parallel_fcc_ma);
+
+ chip->parallel.enabled_once = true;
+
+ return;
+}
+
+static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip,
+ int *ret_total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int min_current_thr_ma, rc, type;
+ int total_current_ma, current_limit_ma, parallel_cl_ma;
+ ktime_t kt_since_last_disable;
+ u8 reg;
+ int fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ const char *fcc_voter
+ = get_effective_client_locked(chip->fcc_votable);
+ int usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+
+ if (!parallel_psy || !smbchg_parallel_en
+ || !chip->parallel_charger_detected) {
+ pr_smb(PR_STATUS, "Parallel charging not enabled\n");
+ return false;
+ }
+
+ if (fcc_ma < 0) {
+ pr_err("no voters for fcc! Can't enable parallel\n");
+ return false;
+ }
+ if (usb_icl_ma < 0) {
+ pr_err("no voters for usb_icl, Can't enable parallel\n");
+ return false;
+ }
+
+ kt_since_last_disable = ktime_sub(ktime_get_boottime(),
+ chip->parallel.last_disabled);
+ if (chip->parallel.current_max_ma == 0
+ && chip->parallel.enabled_once
+ && ktime_to_ms(kt_since_last_disable)
+ < PARALLEL_REENABLE_TIMER_MS) {
+ pr_smb(PR_STATUS, "Only been %lld since disable, skipping\n",
+ ktime_to_ms(kt_since_last_disable));
+ return false;
+ }
+
+ /*
+ * If the battery is not present, try not to change parallel charging
+ * from OFF to ON or from ON to OFF, as it could cause the device to
+ * brown out in the instant that the USB settings are changed.
+ *
+ * Only allow parallel charging check to report false (thereby turnin
+ * off parallel charging) if the battery is still there, or if parallel
+ * charging is disabled in the first place.
+ */
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (get_prop_batt_present(chip)
+ || chip->parallel.current_max_ma == 0)) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return false;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ return false;
+ }
+
+ type = get_type(reg);
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB_CDP) {
+ pr_smb(PR_STATUS, "CDP adapter, skipping\n");
+ return false;
+ }
+
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB) {
+ pr_smb(PR_STATUS, "SDP adapter, skipping\n");
+ return false;
+ }
+
+ /*
+ * If USBIN is suspended or not the active power source, do not enable
+ * parallel charging. The device may be charging off of DCIN.
+ */
+ if (!smbchg_is_usbin_active_pwr_src(chip)) {
+ pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg);
+ return false;
+ }
+
+ min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip);
+ if (min_current_thr_ma <= 0) {
+ pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n",
+ min_current_thr_ma);
+ return false;
+ }
+
+ if (usb_icl_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n",
+ usb_icl_ma, min_current_thr_ma);
+ return false;
+ }
+
+ if (!fcc_voter)
+ return false;
+ /*
+ * Suspend the parallel charger if the charging current is < 1800 mA
+ * and is not because of an ESR pulse.
+ */
+ if ((strcmp(fcc_voter, ESR_PULSE_FCC_VOTER) == 0)
+ && fcc_ma < PARALLEL_CHG_THRESHOLD_CURRENT) {
+ pr_smb(PR_STATUS, "FCC %d lower than %d\n",
+ fcc_ma,
+ PARALLEL_CHG_THRESHOLD_CURRENT);
+ return false;
+ }
+
+ current_limit_ma = smbchg_get_aicl_level_ma(chip);
+ if (current_limit_ma <= 0)
+ return false;
+
+ if (chip->parallel.initial_aicl_ma == 0) {
+ if (current_limit_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n",
+ current_limit_ma, min_current_thr_ma);
+ return false;
+ }
+ chip->parallel.initial_aicl_ma = current_limit_ma;
+ }
+
+ power_supply_get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ parallel_cl_ma = pval.intval / 1000;
+ /*
+ * Read back the real amount of current we are getting
+ * Treat 2mA as 0 because that is the suspend current setting
+ */
+ if (parallel_cl_ma <= SUSPEND_CURRENT_MA)
+ parallel_cl_ma = 0;
+
+ /*
+ * Set the parallel charge path's input current limit (ICL)
+ * to the total current / 2
+ */
+ total_current_ma = min(current_limit_ma + parallel_cl_ma, usb_icl_ma);
+
+ if (total_current_ma < chip->parallel.initial_aicl_ma
+ - chip->parallel.allowed_lowering_ma) {
+ pr_smb(PR_STATUS,
+ "Total current reduced a lot: %d (%d + %d) < %d - %d\n",
+ total_current_ma,
+ current_limit_ma, parallel_cl_ma,
+ chip->parallel.initial_aicl_ma,
+ chip->parallel.allowed_lowering_ma);
+ return false;
+ }
+
+ *ret_total_current_ma = total_current_ma;
+ return true;
+}
+
+#define PARALLEL_CHARGER_EN_DELAY_MS 500
+static void smbchg_parallel_usb_en_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ parallel_en_work.work);
+ int previous_aicl_ma, total_current_ma, aicl_ma;
+ bool in_progress;
+
+ /* do a check to see if the aicl is stable */
+ previous_aicl_ma = smbchg_get_aicl_level_ma(chip);
+ msleep(PARALLEL_CHARGER_EN_DELAY_MS);
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (previous_aicl_ma == aicl_ma) {
+ pr_smb(PR_STATUS, "AICL at %d\n", aicl_ma);
+ } else {
+ pr_smb(PR_STATUS,
+ "AICL changed [%d -> %d], recheck %d ms\n",
+ previous_aicl_ma, aicl_ma,
+ PARALLEL_CHARGER_EN_DELAY_MS);
+ goto recheck;
+ }
+
+ mutex_lock(&chip->parallel.lock);
+ in_progress = (chip->parallel.current_max_ma != 0);
+ if (smbchg_is_parallel_usb_ok(chip, &total_current_ma)) {
+ smbchg_parallel_usb_enable(chip, total_current_ma);
+ } else {
+ if (in_progress) {
+ pr_smb(PR_STATUS, "parallel charging unavailable\n");
+ smbchg_parallel_usb_disable(chip);
+ }
+ }
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_CHECK);
+ return;
+
+recheck:
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_CHECK);
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static int charging_suspend_vote_cb(struct votable *votable, void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_charging_en(chip, !suspend);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't configure batt chg: 0x%x rc = %d\n",
+ !suspend, rc);
+ }
+
+ return rc;
+}
+
+static int usb_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_usb_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if ((strcmp(client, THERMAL_EN_VOTER) == 0)
+ || (strcmp(client, POWER_SUPPLY_EN_VOTER) == 0)
+ || (strcmp(client, USER_EN_VOTER) == 0)
+ || (strcmp(client, FAKE_BATTERY_EN_VOTER) == 0))
+ smbchg_parallel_usb_check_ok(chip);
+
+ return rc;
+}
+
+static int dc_suspend_vote_cb(struct votable *votable,
+ void *data,
+ int suspend,
+ const char *client)
+{
+ int rc;
+ struct smbchg_chip *chip = data;
+
+ if (suspend < 0) {
+ pr_err("No voters\n");
+ suspend = false;
+ }
+
+ rc = smbchg_dc_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if (chip->dc_psy_type != -EINVAL && chip->dc_psy)
+ power_supply_changed(chip->dc_psy);
+
+ return rc;
+}
+
+static int set_fastchg_current_vote_cb(struct votable *votable,
+ void *data,
+ int fcc_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc;
+
+ if (fcc_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_fastchg_current_raw(chip, fcc_ma);
+ if (rc < 0) {
+ pr_err("Can't set FCC fcc_ma=%d rc=%d\n", fcc_ma, rc);
+ return rc;
+ }
+ }
+ /*
+ * check if parallel charging can be enabled, and if enabled,
+ * distribute the fcc
+ */
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_set_fastchg_current_user(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ pr_smb(PR_STATUS, "User setting FCC to %d\n", current_ma);
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote en rc %d\n", rc);
+ return rc;
+}
+
+static struct ilim_entry *smbchg_wipower_find_entry(struct smbchg_chip *chip,
+ struct ilim_map *map, int uv)
+{
+ int i;
+ struct ilim_entry *ret = &(chip->wipower_default.entries[0]);
+
+ for (i = 0; i < map->num; i++) {
+ if (is_between(map->entries[i].vmin_uv, map->entries[i].vmax_uv,
+ uv))
+ ret = &map->entries[i];
+ }
+ return ret;
+}
+
+#define ZIN_ICL_PT 0xFC
+#define ZIN_ICL_LV 0xFD
+#define ZIN_ICL_HV 0xFE
+#define ZIN_ICL_MASK SMB_MASK(4, 0)
+static int smbchg_dcin_ilim_config(struct smbchg_chip *chip, int offset, int ma)
+{
+ int i, rc;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0)
+ i = 0;
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + offset,
+ ZIN_ICL_MASK, i);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write bat if offset %d value = %d rc = %d\n",
+ offset, i, rc);
+ return rc;
+}
+
+static int smbchg_wipower_ilim_config(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc = 0;
+
+ if (chip->current_ilim.icl_pt_ma != ilim->icl_pt_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_PT, ilim->icl_pt_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_PT, ilim->icl_pt_ma, rc);
+ else
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ }
+
+ if (chip->current_ilim.icl_lv_ma != ilim->icl_lv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_LV, ilim->icl_lv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_LV, ilim->icl_lv_ma, rc);
+ else
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ }
+
+ if (chip->current_ilim.icl_hv_ma != ilim->icl_hv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_HV, ilim->icl_hv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_HV, ilim->icl_hv_ma, rc);
+ else
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ }
+ return rc;
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx);
+static int smbchg_wipower_dcin_btm_configure(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc;
+
+ if (ilim->vmin_uv == chip->current_ilim.vmin_uv
+ && ilim->vmax_uv == chip->current_ilim.vmax_uv)
+ return 0;
+
+ chip->param.channel = DCIN;
+ chip->param.btm_ctx = chip;
+ if (wipower_dcin_interval < ADC_MEAS1_INTERVAL_0MS)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_0MS;
+
+ if (wipower_dcin_interval > ADC_MEAS1_INTERVAL_16S)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_16S;
+
+ chip->param.timer_interval = wipower_dcin_interval;
+ chip->param.threshold_notification = &btm_notify_dcin;
+ chip->param.high_thr = ilim->vmax_uv + wipower_dcin_hyst_uv;
+ chip->param.low_thr = ilim->vmin_uv - wipower_dcin_hyst_uv;
+ chip->param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+ rc = qpnp_vadc_channel_monitor(chip->vadc_dev, &chip->param);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't configure btm for dcin rc = %d\n",
+ rc);
+ } else {
+ chip->current_ilim.vmin_uv = ilim->vmin_uv;
+ chip->current_ilim.vmax_uv = ilim->vmax_uv;
+ pr_smb(PR_STATUS, "btm ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ }
+ return rc;
+}
+
+static int smbchg_wipower_icl_configure(struct smbchg_chip *chip,
+ int dcin_uv, bool div2)
+{
+ int rc = 0;
+ struct ilim_map *map = div2 ? &chip->wipower_div2 : &chip->wipower_pt;
+ struct ilim_entry *ilim = smbchg_wipower_find_entry(chip, map, dcin_uv);
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config ilim rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+
+ rc = smbchg_wipower_dcin_btm_configure(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config btm rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+ chip->wipower_configured = true;
+ return 0;
+}
+
+static void smbchg_wipower_icl_deconfigure(struct smbchg_chip *chip)
+{
+ int rc;
+ struct ilim_entry *ilim = &(chip->wipower_default.entries[0]);
+
+ if (!chip->wipower_configured)
+ return;
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc)
+ dev_err(chip->dev, "Couldn't config default ilim rc = %d\n",
+ rc);
+
+ rc = qpnp_vadc_end_channel_monitor(chip->vadc_dev);
+ if (rc)
+ dev_err(chip->dev, "Couldn't de configure btm for dcin rc = %d\n",
+ rc);
+
+ chip->wipower_configured = false;
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ pr_smb(PR_WIPOWER, "De config btm\n");
+}
+
+#define FV_STS 0x0C
+#define DIV2_ACTIVE BIT(7)
+static void __smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ int chg_type;
+ bool usb_present, dc_present;
+ int rc;
+ int dcin_uv;
+ bool div2;
+ struct qpnp_vadc_result adc_result;
+ u8 reg;
+
+ if (!wipower_dyn_icl_en) {
+ smbchg_wipower_icl_deconfigure(chip);
+ return;
+ }
+
+ chg_type = get_prop_charge_type(chip);
+ usb_present = is_usb_present(chip);
+ dc_present = is_dc_present(chip);
+ if (chg_type != POWER_SUPPLY_CHARGE_TYPE_NONE
+ && !usb_present
+ && dc_present
+ && chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) {
+ rc = qpnp_vadc_read(chip->vadc_dev, DCIN, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ dcin_uv = adc_result.physical;
+
+ /* check div_by_2 */
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS, 1);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ div2 = !!(reg & DIV2_ACTIVE);
+
+ pr_smb(PR_WIPOWER,
+ "config ICL chg_type = %d usb = %d dc = %d dcin_uv(adc_code) = %d (0x%x) div2 = %d\n",
+ chg_type, usb_present, dc_present, dcin_uv,
+ adc_result.adc_code, div2);
+ smbchg_wipower_icl_configure(chip, dcin_uv, div2);
+ } else {
+ pr_smb(PR_WIPOWER,
+ "deconfig ICL chg_type = %d usb = %d dc = %d\n",
+ chg_type, usb_present, dc_present);
+ smbchg_wipower_icl_deconfigure(chip);
+ }
+}
+
+static void smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ if (!chip->wipower_dyn_icl_avail)
+ return;
+
+ mutex_lock(&chip->wipower_config);
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx)
+{
+ struct smbchg_chip *chip = ctx;
+
+ mutex_lock(&chip->wipower_config);
+ pr_smb(PR_WIPOWER, "%s state\n",
+ state == ADC_TM_LOW_STATE ? "low" : "high");
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static int force_dcin_icl_write(void *data, u64 val)
+{
+ struct smbchg_chip *chip = data;
+
+ smbchg_wipower_check(chip);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dcin_icl_ops, NULL,
+ force_dcin_icl_write, "0x%02llx\n");
+
+/*
+ * set the dc charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_dc_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+
+ return smbchg_set_dc_current_max(chip, icl_ma);
+}
+
+/*
+ * set the usb charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_usb_current_limit_vote_cb(struct votable *votable,
+ void *data,
+ int icl_ma,
+ const char *client)
+{
+ struct smbchg_chip *chip = data;
+ int rc, aicl_ma;
+ const char *effective_id;
+
+ if (icl_ma < 0) {
+ pr_err("No voters\n");
+ return 0;
+ }
+ effective_id = get_effective_client_locked(chip->usb_icl_votable);
+
+ if (!effective_id)
+ return 0;
+
+ /* disable parallel charging if HVDCP is voting for 300mA */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ smbchg_parallel_usb_disable(chip);
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_usb_current_max(chip, icl_ma);
+ if (rc) {
+ pr_err("Failed to set usb current max: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* skip the aicl rerun if hvdcp icl voter is active */
+ if (strcmp(effective_id, HVDCP_ICL_VOTER) == 0)
+ return 0;
+
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (icl_ma > aicl_ma)
+ smbchg_rerun_aicl(chip);
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_system_temp_level_set(struct smbchg_chip *chip,
+ int lvl_sel)
+{
+ int rc = 0;
+ int prev_therm_lvl;
+ int thermal_icl_ma;
+
+ if (!chip->thermal_mitigation) {
+ dev_err(chip->dev, "Thermal mitigation not supported\n");
+ return -EINVAL;
+ }
+
+ if (lvl_sel < 0) {
+ dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel);
+ return -EINVAL;
+ }
+
+ if (lvl_sel >= chip->thermal_levels) {
+ dev_err(chip->dev, "Unsupported level selected %d forcing %d\n",
+ lvl_sel, chip->thermal_levels - 1);
+ lvl_sel = chip->thermal_levels - 1;
+ }
+
+ if (lvl_sel == chip->therm_lvl_sel)
+ return 0;
+
+ mutex_lock(&chip->therm_lvl_lock);
+ prev_therm_lvl = chip->therm_lvl_sel;
+ chip->therm_lvl_sel = lvl_sel;
+ if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+ /*
+ * Disable charging if highest value selected by
+ * setting the DC and USB path in suspend
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ goto out;
+ }
+
+ if (chip->therm_lvl_sel == 0) {
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable USB thermal ICL vote rc=%d\n",
+ rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable DC thermal ICL vote rc=%d\n",
+ rc);
+ } else {
+ thermal_icl_ma =
+ (int)chip->thermal_mitigation[chip->therm_lvl_sel];
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for USB thermal ICL rc=%d\n", rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for DC thermal ICL rc=%d\n", rc);
+ }
+
+ if (prev_therm_lvl == chip->thermal_levels - 1) {
+ /*
+ * If previously highest value was selected charging must have
+ * been disabed. Enable charging by taking the DC and USB path
+ * out of suspend.
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER,
+ false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&chip->therm_lvl_lock);
+ return rc;
+}
+
+static int smbchg_ibat_ocp_threshold_ua = 4500000;
+module_param(smbchg_ibat_ocp_threshold_ua, int, 0644);
+
+#define UCONV 1000000LL
+#define MCONV 1000LL
+#define FLASH_V_THRESHOLD 3000000
+#define FLASH_VDIP_MARGIN 100000
+#define VPH_FLASH_VDIP (FLASH_V_THRESHOLD + FLASH_VDIP_MARGIN)
+#define BUCK_EFFICIENCY 800LL
+static int smbchg_calc_max_flash_current(struct smbchg_chip *chip)
+{
+ int ocv_uv, esr_uohm, rbatt_uohm, ibat_now, rc;
+ int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+ int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE,
+ &esr_uohm);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support resistance\n");
+ return 0;
+ }
+
+ rc = msm_bcl_read(BCL_PARAM_CURRENT, &ibat_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "BCL current read failed: %d\n", rc);
+ return 0;
+ }
+
+ rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm;
+ /*
+ * Calculate the maximum current that can pulled out of the battery
+ * before the battery voltage dips below a safe threshold.
+ */
+ ibat_safe_ua = div_s64((ocv_uv - VPH_FLASH_VDIP) * UCONV,
+ rbatt_uohm);
+
+ if (ibat_safe_ua <= smbchg_ibat_ocp_threshold_ua) {
+ /*
+ * If the calculated current is below the OCP threshold, then
+ * use it as the possible flash current.
+ */
+ ibat_flash_ua = ibat_safe_ua - ibat_now;
+ vph_flash_uv = VPH_FLASH_VDIP;
+ } else {
+ /*
+ * If the calculated current is above the OCP threshold, then
+ * use the ocp threshold instead.
+ *
+ * Any higher current will be tripping the battery OCP.
+ */
+ ibat_flash_ua = smbchg_ibat_ocp_threshold_ua - ibat_now;
+ vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+ * smbchg_ibat_ocp_threshold_ua, UCONV);
+ }
+ /* Calculate the input voltage of the flash module. */
+ vin_flash_uv = max((chip->vled_max_uv + 500000LL),
+ div64_s64((vph_flash_uv * 1200), 1000));
+ /* Calculate the available power for the flash module. */
+ avail_flash_power_fw = BUCK_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+ /*
+ * Calculate the available amount of current the flash module can draw
+ * before collapsing the battery. (available power/ flash input voltage)
+ */
+ avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+ pr_smb(PR_MISC,
+ "avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d\n",
+ avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm);
+ return (int)avail_flash_ua;
+}
+
+#define FCC_CMP_CFG 0xF3
+#define FCC_COMP_MASK SMB_MASK(1, 0)
+static int smbchg_fastchg_current_comp_set(struct smbchg_chip *chip,
+ int comp_current)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < chip->tables.fcc_comp_len; i++)
+ if (comp_current == chip->tables.fcc_comp_table[i])
+ break;
+
+ if (i >= chip->tables.fcc_comp_len)
+ return -EINVAL;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CMP_CFG,
+ FCC_COMP_MASK, i);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define CFG_TCC_REG 0xF9
+#define CHG_ITERM_MASK SMB_MASK(2, 0)
+static int smbchg_iterm_set(struct smbchg_chip *chip, int iterm_ma)
+{
+ int rc;
+ u8 reg;
+
+ reg = find_closest_in_array(
+ chip->tables.iterm_ma_table,
+ chip->tables.iterm_ma_len,
+ iterm_ma);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CFG_TCC_REG,
+ CHG_ITERM_MASK, reg);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n",
+ iterm_ma, reg);
+ chip->iterm_ma = iterm_ma;
+
+ return 0;
+}
+
+#define FV_CMP_CFG 0xF5
+#define FV_COMP_MASK SMB_MASK(5, 0)
+static int smbchg_float_voltage_comp_set(struct smbchg_chip *chip, int code)
+{
+ int rc;
+ u8 val;
+
+ val = code & FV_COMP_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FV_CMP_CFG,
+ FV_COMP_MASK, val);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define VFLOAT_CFG_REG 0xF4
+#define MIN_FLOAT_MV 3600
+#define MAX_FLOAT_MV 4500
+#define VFLOAT_MASK SMB_MASK(5, 0)
+
+#define MID_RANGE_FLOAT_MV_MIN 3600
+#define MID_RANGE_FLOAT_MIN_VAL 0x05
+#define MID_RANGE_FLOAT_STEP_MV 20
+
+#define HIGH_RANGE_FLOAT_MIN_MV 4340
+#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV 10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV 4360
+#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2C
+#define VHIGH_RANGE_FLOAT_STEP_MV 20
+static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval prop;
+ int rc, delta;
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+ vfloat_mv);
+ return -EINVAL;
+ }
+
+ if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+ /* mid range */
+ delta = vfloat_mv - MID_RANGE_FLOAT_MV_MIN;
+ temp = MID_RANGE_FLOAT_MIN_VAL + delta
+ / MID_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % MID_RANGE_FLOAT_STEP_MV;
+ } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) {
+ /* high range */
+ delta = vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV;
+ temp = HIGH_RANGE_FLOAT_MIN_VAL + delta
+ / HIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % HIGH_RANGE_FLOAT_STEP_MV;
+ } else {
+ /* very high range */
+ delta = vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV;
+ temp = VHIGH_RANGE_FLOAT_MIN_VAL + delta
+ / VHIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % VHIGH_RANGE_FLOAT_STEP_MV;
+ }
+
+ if (parallel_psy) {
+ prop.intval = vfloat_mv + 50;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage on parallel psy rc: %d\n",
+ rc);
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG,
+ VFLOAT_MASK, temp);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc);
+ else
+ chip->vfloat_mv = vfloat_mv;
+
+ return rc;
+}
+
+static int smbchg_float_voltage_get(struct smbchg_chip *chip)
+{
+ return chip->vfloat_mv;
+}
+
+#define SFT_CFG 0xFD
+#define SFT_EN_MASK SMB_MASK(5, 4)
+#define SFT_TO_MASK SMB_MASK(3, 2)
+#define PRECHG_SFT_TO_MASK SMB_MASK(1, 0)
+#define SFT_TIMER_DISABLE_BIT BIT(5)
+#define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4)
+#define SAFETY_TIME_MINUTES_SHIFT 2
+static int smbchg_safety_timer_enable(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+ u8 reg;
+
+ if (enable == chip->safety_timer_en)
+ return 0;
+
+ if (enable)
+ reg = 0;
+ else
+ reg = SFT_TIMER_DISABLE_BIT | PRECHG_SFT_TIMER_DISABLE_BIT;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s safety timer rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ chip->safety_timer_en = enable;
+ return 0;
+}
+
+enum skip_reason {
+ REASON_OTG_ENABLED = BIT(0),
+ REASON_FLASH_ENABLED = BIT(1)
+};
+
+#define BAT_IF_TRIM7_REG 0xF7
+#define CFG_750KHZ_BIT BIT(1)
+#define MISC_CFG_NTC_VOUT_REG 0xF3
+#define CFG_NTC_VOUT_FSW_BIT BIT(0)
+static int smbchg_switch_buck_frequency(struct smbchg_chip *chip,
+ bool flash_active)
+{
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA))
+ return 0;
+
+ if (chip->flash_active == flash_active) {
+ pr_smb(PR_STATUS, "Fsw not changed, flash_active: %d\n",
+ flash_active);
+ return 0;
+ }
+
+ /*
+ * As per the systems team recommendation, before the flash fires,
+ * buck switching frequency(Fsw) needs to be increased to 1MHz. Once the
+ * flash is disabled, Fsw needs to be set back to 750KHz.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->misc_base +
+ MISC_CFG_NTC_VOUT_REG, CFG_NTC_VOUT_FSW_BIT,
+ flash_active ? CFG_NTC_VOUT_FSW_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set switching frequency multiplier rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, flash_active ? 0 : CFG_750KHZ_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Fsw @ %sHz\n", flash_active ? "1M" : "750K");
+ chip->flash_active = flash_active;
+ return 0;
+}
+
+#define OTG_TRIM6 0xF6
+#define TR_ENB_SKIP_BIT BIT(2)
+#define OTG_EN_BIT BIT(0)
+static int smbchg_otg_pulse_skip_disable(struct smbchg_chip *chip,
+ enum skip_reason reason, bool disable)
+{
+ int rc;
+ bool disabled;
+
+ disabled = !!chip->otg_pulse_skip_dis;
+ pr_smb(PR_STATUS, "%s pulse skip, reason %d\n",
+ disable ? "disabling" : "enabling", reason);
+ if (disable)
+ chip->otg_pulse_skip_dis |= reason;
+ else
+ chip->otg_pulse_skip_dis &= ~reason;
+ if (disabled == !!chip->otg_pulse_skip_dis)
+ return 0;
+ disabled = !!chip->otg_pulse_skip_dis;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_TRIM6,
+ TR_ENB_SKIP_BIT, disabled ? TR_ENB_SKIP_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg pulse skip rc = %d\n",
+ disabled ? "disable" : "enable", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "%s pulse skip\n", disabled ? "disabled" : "enabled");
+ return 0;
+}
+
+#define LOW_PWR_OPTIONS_REG 0xFF
+#define FORCE_TLIM_BIT BIT(4)
+static int smbchg_force_tlim_en(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + LOW_PWR_OPTIONS_REG,
+ FORCE_TLIM_BIT, enable ? FORCE_TLIM_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg force tlim rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_vfloat_adjust_check(struct smbchg_chip *chip)
+{
+ if (!chip->use_vfloat_adjustments)
+ return;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ pr_smb(PR_STATUS, "Starting vfloat adjustments\n");
+ schedule_delayed_work(&chip->vfloat_adjust_work, 0);
+}
+
+#define FV_STS_REG 0xC
+#define AICL_INPUT_STS_BIT BIT(6)
+static bool smbchg_is_input_current_limited(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read FV_STS rc=%d\n", rc);
+ return false;
+ }
+
+ return !!(reg & AICL_INPUT_STS_BIT);
+}
+
+#define SW_ESR_PULSE_MS 1500
+static void smbchg_cc_esr_wa_check(struct smbchg_chip *chip)
+{
+ int rc, esr_count;
+
+ if (!(chip->wa_flags & SMBCHG_CC_ESR_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "No inputs present, skipping\n");
+ return;
+ }
+
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return;
+ }
+
+ if (!smbchg_is_input_current_limited(chip)) {
+ pr_smb(PR_STATUS, "Not input current limited, skipping\n");
+ return;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_ESR_COUNT, &esr_count);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "could not read ESR counter rc = %d\n", rc);
+ return;
+ }
+
+ /*
+ * The esr_count is counting down the number of fuel gauge cycles
+ * before a ESR pulse is needed.
+ *
+ * After a successful ESR pulse, this count is reset to some
+ * high number like 28. If this reaches 0, then the fuel gauge
+ * hardware should force a ESR pulse.
+ *
+ * However, if the device is in constant current charge mode while
+ * being input current limited, the ESR pulse will not affect the
+ * battery current, so the measurement will fail.
+ *
+ * As a failsafe, force a manual ESR pulse if this value is read as
+ * 0.
+ */
+ if (esr_count != 0) {
+ pr_smb(PR_STATUS, "ESR count is not zero, skipping\n");
+ return;
+ }
+
+ pr_smb(PR_STATUS, "Lowering charge current for ESR pulse\n");
+ smbchg_stay_awake(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, true);
+ msleep(SW_ESR_PULSE_MS);
+ pr_smb(PR_STATUS, "Raising charge current for ESR pulse\n");
+ smbchg_relax(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, false);
+}
+
+static void smbchg_soc_changed(struct smbchg_chip *chip)
+{
+ smbchg_cc_esr_wa_check(chip);
+}
+
+#define DC_AICL_CFG 0xF3
+#define MISC_TRIM_OPT_15_8 0xF5
+#define USB_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_LONG 0
+#define DC_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_LONG 0
+#define AICL_RERUN_MASK (BIT(5) | BIT(4))
+#define AICL_RERUN_ON (BIT(5) | BIT(4))
+#define AICL_RERUN_OFF 0
+
+static int smbchg_hw_aicl_rerun_enable_indirect_cb(struct votable *votable,
+ void *data,
+ int enable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (enable < 0) {
+ pr_err("No voters\n");
+ enable = 0;
+ }
+ /*
+ * If the indirect voting result of all the clients is to enable hw aicl
+ * rerun, then remove our vote to disable hw aicl rerun
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ HW_AICL_RERUN_ENABLE_INDIRECT_VOTER, !enable, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote for hw rerun rc= %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smbchg_hw_aicl_rerun_disable_cb(struct votable *votable, void *data,
+ int disable,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (disable < 0) {
+ pr_err("No voters\n");
+ disable = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8,
+ AICL_RERUN_MASK, disable ? AICL_RERUN_OFF : AICL_RERUN_ON);
+ if (rc < 0)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_aicl_deglitch_config_cb(struct votable *votable, void *data,
+ int shorter,
+ const char *client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = data;
+
+ if (shorter < 0) {
+ pr_err("No voters\n");
+ shorter = 0;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USB_AICL_CFG,
+ USB_AICL_DEGLITCH_MASK,
+ shorter ? USB_AICL_DEGLITCH_SHORT : USB_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to USB_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + DC_AICL_CFG,
+ DC_AICL_DEGLITCH_MASK,
+ shorter ? DC_AICL_DEGLITCH_SHORT : DC_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to DC_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_aicl_deglitch_wa_en(struct smbchg_chip *chip, bool en)
+{
+ int rc;
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote %s deglitch rc=%d\n",
+ en ? "short" : "long", rc);
+ return;
+ }
+ pr_smb(PR_STATUS, "AICL deglitch set to %s\n", en ? "short" : "long");
+
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote hw aicl rerun rc= %d\n", rc);
+ return;
+ }
+ chip->aicl_deglitch_short = en;
+}
+
+static void smbchg_aicl_deglitch_wa_check(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ int rc;
+ bool low_volt_chgr = true;
+
+ if (!(chip->wa_flags & SMBCHG_AICL_DEGLITCH_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "Charger removed\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ if (!chip->bms_psy)
+ return;
+
+ if (is_usb_present(chip)) {
+ if (is_hvdcp_present(chip))
+ low_volt_chgr = false;
+ } else if (is_dc_present(chip)) {
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ low_volt_chgr = false;
+ else
+ low_volt_chgr = chip->low_volt_dcin;
+ }
+
+ if (!low_volt_chgr) {
+ pr_smb(PR_STATUS, "High volt charger! Don't set deglitch\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ /* It is possible that battery voltage went high above threshold
+ * when the charger is inserted and can go low because of system
+ * load. We shouldn't be reconfiguring AICL deglitch when this
+ * happens as it will lead to oscillation again which is being
+ * fixed here. Do it once when the battery voltage crosses the
+ * threshold (e.g. 4.2 V) and clear it only when the charger
+ * is removed.
+ */
+ if (!chip->vbat_above_headroom) {
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN, &prop);
+ if (rc < 0) {
+ pr_err("could not read voltage_min, rc=%d\n", rc);
+ return;
+ }
+ chip->vbat_above_headroom = !prop.intval;
+ }
+ smbchg_aicl_deglitch_wa_en(chip, chip->vbat_above_headroom);
+}
+
+#define MISC_TEST_REG 0xE2
+#define BB_LOOP_DISABLE_ICL BIT(2)
+static int smbchg_icl_loop_disable_check(struct smbchg_chip *chip)
+{
+ bool icl_disabled = !chip->chg_otg_enabled && chip->flash_triggered;
+ int rc = 0;
+
+ if ((chip->wa_flags & SMBCHG_FLASH_ICL_DISABLE_WA)
+ && icl_disabled != chip->icl_disabled) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TEST_REG,
+ BB_LOOP_DISABLE_ICL,
+ icl_disabled ? BB_LOOP_DISABLE_ICL : 0);
+ chip->icl_disabled = icl_disabled;
+ }
+
+ return rc;
+}
+
+#define UNKNOWN_BATT_TYPE "Unknown Battery"
+#define LOADING_BATT_TYPE "Loading Battery Data"
+static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
+{
+ int rc = 0, max_voltage_uv = 0, fastchg_ma = 0, ret = 0, iterm_ua = 0;
+ struct device_node *batt_node, *profile_node;
+ struct device_node *node = chip->pdev->dev.of_node;
+ union power_supply_propval prop = {0,};
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ if (rc) {
+ pr_smb(PR_STATUS, "Unable to read battery-type rc=%d\n", rc);
+ return 0;
+ }
+ if (!strcmp(prop.strval, UNKNOWN_BATT_TYPE) ||
+ !strcmp(prop.strval, LOADING_BATT_TYPE)) {
+ pr_smb(PR_MISC, "Battery-type not identified\n");
+ return 0;
+ }
+ /* quit if there is no change in the battery-type from previous */
+ if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
+ return 0;
+
+ chip->battery_type = prop.strval;
+ batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
+ if (!batt_node) {
+ pr_smb(PR_MISC, "No batterydata available\n");
+ return 0;
+ }
+
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ if (rc < 0) {
+ pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc);
+ return 0;
+ }
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ prop.intval / 1000, NULL);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ return rc;
+ }
+
+ /* change vfloat */
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_voltage_uv);
+ if (rc) {
+ pr_warn("couldn't find battery max voltage rc=%d\n", rc);
+ ret = rc;
+ } else {
+ if (chip->vfloat_mv != (max_voltage_uv / 1000)) {
+ pr_info("Vfloat changed from %dmV to %dmV for battery-type %s\n",
+ chip->vfloat_mv, (max_voltage_uv / 1000),
+ chip->battery_type);
+ rc = smbchg_float_voltage_set(chip,
+ (max_voltage_uv / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+
+ /* change chg term */
+ rc = of_property_read_u32(profile_node, "qcom,chg-term-ua",
+ &iterm_ua);
+ if (rc && rc != -EINVAL) {
+ pr_warn("couldn't read battery term current=%d\n", rc);
+ ret = rc;
+ } else if (!rc) {
+ if (chip->iterm_ma != (iterm_ua / 1000)
+ && !chip->iterm_disabled) {
+ pr_info("Term current changed from %dmA to %dmA for battery-type %s\n",
+ chip->iterm_ma, (iterm_ua / 1000),
+ chip->battery_type);
+ rc = smbchg_iterm_set(chip,
+ (iterm_ua / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+ chip->iterm_ma = iterm_ua / 1000;
+ }
+
+ /*
+ * Only configure from profile if fastchg-ma is not defined in the
+ * charger device node.
+ */
+ if (!of_find_property(chip->pdev->dev.of_node,
+ "qcom,fastchg-current-ma", NULL)) {
+ rc = of_property_read_u32(profile_node,
+ "qcom,fastchg-current-ma", &fastchg_ma);
+ if (rc) {
+ ret = rc;
+ } else {
+ pr_smb(PR_MISC,
+ "fastchg-ma changed from to %dma for battery-type %s\n",
+ fastchg_ma, chip->battery_type);
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ fastchg_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for fastchg current rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return ret;
+}
+
+#define MAX_INV_BATT_ID 7700
+#define MIN_INV_BATT_ID 7300
+static void check_battery_type(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ bool en;
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (chip->bms_psy) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ en = (strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0
+ || chip->charge_unknown_battery)
+ && (strcmp(prop.strval, LOADING_BATT_TYPE) != 0);
+ vote(chip->battchg_suspend_votable,
+ BATTCHG_UNKNOWN_BATTERY_EN_VOTER, !en, 0);
+
+ if (!chip->skip_usb_suspend_for_fake_battery) {
+ power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ /* suspend USB path for invalid battery-id */
+ en = (prop.intval <= MAX_INV_BATT_ID &&
+ prop.intval >= MIN_INV_BATT_ID) ? 1 : 0;
+ vote(chip->usb_suspend_votable, FAKE_BATTERY_EN_VOTER,
+ en, 0);
+ }
+ }
+}
+
+static void smbchg_external_power_changed(struct power_supply *psy)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0, soc;
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name = "null";
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->bms_psy) {
+ check_battery_type(chip);
+ soc = get_prop_batt_capacity(chip);
+ if (chip->previous_soc != soc) {
+ chip->previous_soc = soc;
+ smbchg_soc_changed(chip);
+ }
+
+ rc = smbchg_config_chg_battery_type(chip);
+ if (rc)
+ pr_smb(PR_MISC,
+ "Couldn't update charger configuration rc=%d\n",
+ rc);
+ }
+
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc == 0)
+ vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !prop.intval, 0);
+
+ current_limit = chip->usb_current_max / 1000;
+
+ /* Override if type-c charger used */
+ if (chip->typec_current_ma > 500 &&
+ current_limit < chip->typec_current_ma)
+ current_limit = chip->typec_current_ma;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB)
+ goto skip_current_for_non_sdp;
+
+ pr_smb(PR_MISC, "usb type = %s current_limit = %d\n",
+ usb_type_name, current_limit);
+
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit);
+ if (rc < 0)
+ pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc);
+
+skip_current_for_non_sdp:
+ smbchg_vfloat_adjust_check(chip);
+
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+}
+
+static int smbchg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ chip->otg_retries = 0;
+ chip->chg_otg_enabled = true;
+ smbchg_icl_loop_disable_check(chip);
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, true);
+
+ /* If pin control mode then return from here */
+ if (chip->otg_pinctrl)
+ return rc;
+
+ /* sleep to make sure the pulse skip is actually disabled */
+ msleep(20);
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc);
+ else
+ chip->otg_enable_time = ktime_get();
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ if (!chip->otg_pinctrl) {
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+ rc);
+ }
+
+ chip->chg_otg_enabled = false;
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, false);
+ smbchg_icl_loop_disable_check(chip);
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + CMD_CHG_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smbchg_otg_reg_ops = {
+ .enable = smbchg_otg_regulator_enable,
+ .disable = smbchg_otg_regulator_disable,
+ .is_enabled = smbchg_otg_regulator_is_enable,
+};
+
+#define USBIN_CHGR_CFG 0xF1
+#define ADAPTER_ALLOWANCE_MASK 0x7
+#define USBIN_ADAPTER_9V 0x3
+#define USBIN_ADAPTER_5V_9V_CONT 0x2
+#define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5
+#define HVDCP_EN_BIT BIT(3)
+static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * To disallow source detect and usbin_uv interrupts, set the adapter
+ * allowance to 9V, so that the audio boost operating in reverse never
+ * gets detected as a valid input
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Reenable HVDCP and set the adapter allowance back to the original
+ * value in order to allow normal USBs to be recognized as a valid
+ * input.
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, chip->original_usbin_allowance);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ return get_client_vote(chip->usb_suspend_votable, OTG_EN_VOTER);
+}
+
+struct regulator_ops smbchg_external_otg_reg_ops = {
+ .enable = smbchg_external_otg_regulator_enable,
+ .disable = smbchg_external_otg_regulator_disable,
+ .is_enabled = smbchg_external_otg_regulator_is_enable,
+};
+
+static int smbchg_regulator_init(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct regulator_config cfg = {};
+ struct device_node *regulator_node;
+
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops;
+ chip->otg_vreg.rdesc.of_match = "qcom,smbcharger-boost-otg";
+ chip->otg_vreg.rdesc.name = "qcom,smbcharger-boost-otg";
+
+ chip->otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "OTG reg failed, rc=%d\n", rc);
+ }
+ if (rc)
+ return rc;
+
+ regulator_node = of_get_child_by_name(chip->dev->of_node,
+ "qcom,smbcharger-external-otg");
+ if (!regulator_node) {
+ dev_dbg(chip->dev, "external-otg node absent\n");
+ return 0;
+ }
+
+ chip->ext_otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops;
+ chip->ext_otg_vreg.rdesc.of_match = "qcom,smbcharger-external-otg";
+ chip->ext_otg_vreg.rdesc.name = "qcom,smbcharger-external-otg";
+ if (of_get_property(chip->dev->of_node, "otg-parent-supply", NULL))
+ chip->ext_otg_vreg.rdesc.supply_name = "otg-parent";
+ cfg.dev = chip->dev;
+ cfg.driver_data = chip;
+
+ chip->ext_otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->ext_otg_vreg.rdesc,
+ &cfg);
+ if (IS_ERR(chip->ext_otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->ext_otg_vreg.rdev);
+ chip->ext_otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "external OTG reg failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+#define CMD_CHG_LED_REG 0x43
+#define CHG_LED_CTRL_BIT BIT(0)
+#define LED_SW_CTRL_BIT 0x1
+#define LED_CHG_CTRL_BIT 0x0
+#define CHG_LED_ON 0x03
+#define CHG_LED_OFF 0x00
+#define LED_BLINKING_PATTERN1 0x01
+#define LED_BLINKING_PATTERN2 0x02
+#define LED_BLINKING_CFG_MASK SMB_MASK(2, 1)
+#define CHG_LED_SHIFT 1
+static int smbchg_chg_led_controls(struct smbchg_chip *chip)
+{
+ u8 reg, mask;
+ int rc;
+
+ if (chip->cfg_chg_led_sw_ctrl) {
+ /* turn-off LED by default for software control */
+ mask = CHG_LED_CTRL_BIT | LED_BLINKING_CFG_MASK;
+ reg = LED_SW_CTRL_BIT;
+ } else {
+ mask = CHG_LED_CTRL_BIT;
+ reg = LED_CHG_CTRL_BIT;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_LED_REG,
+ mask, reg);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't write LED_CTRL_BIT rc=%d\n", rc);
+ return rc;
+}
+
+static void smbchg_chg_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT :
+ CHG_LED_OFF << CHG_LED_SHIFT;
+ pval.intval = value > LED_OFF ? 1 : 0;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+ pr_smb(PR_STATUS,
+ "set the charger led brightness to value=%d\n",
+ value);
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static enum
+led_brightness smbchg_chg_led_brightness_get(struct led_classdev *cdev)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ u8 reg_val, chg_led_sts;
+ int rc;
+
+ rc = smbchg_read(chip, &reg_val, chip->bat_if_base + CMD_CHG_LED_REG,
+ 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read CHG_LED_REG sts rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg_led_sts = (reg_val & LED_BLINKING_CFG_MASK) >> CHG_LED_SHIFT;
+
+ pr_smb(PR_STATUS, "chg_led_sts = %02x\n", chg_led_sts);
+
+ return (chg_led_sts == CHG_LED_OFF) ? LED_OFF : LED_FULL;
+}
+
+static void smbchg_chg_led_blink_set(struct smbchg_chip *chip,
+ unsigned long blinking)
+{
+ union power_supply_propval pval = {0, };
+ u8 reg;
+ int rc;
+
+ pval.intval = (blinking == 0) ? 0 : 1;
+ power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
+ &pval);
+
+ if (blinking == 0) {
+ reg = CHG_LED_OFF << CHG_LED_SHIFT;
+ } else {
+ if (blinking == 1)
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ else if (blinking == 2)
+ reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT;
+ else
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static ssize_t smbchg_chg_led_blink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct smbchg_chip *chip = container_of(cdev, struct smbchg_chip,
+ led_cdev);
+ unsigned long blinking;
+ ssize_t rc = -EINVAL;
+
+ rc = kstrtoul(buf, 10, &blinking);
+ if (rc)
+ return rc;
+
+ smbchg_chg_led_blink_set(chip, blinking);
+
+ return len;
+}
+
+static DEVICE_ATTR(blink, 0664, NULL, smbchg_chg_led_blink_store);
+
+static struct attribute *led_blink_attributes[] = {
+ &dev_attr_blink.attr,
+ NULL,
+};
+
+static struct attribute_group smbchg_led_attr_group = {
+ .attrs = led_blink_attributes
+};
+
+static int smbchg_register_chg_led(struct smbchg_chip *chip)
+{
+ int rc;
+
+ chip->led_cdev.name = "red";
+ chip->led_cdev.brightness_set = smbchg_chg_led_brightness_set;
+ chip->led_cdev.brightness_get = smbchg_chg_led_brightness_get;
+
+ rc = led_classdev_register(chip->dev, &chip->led_cdev);
+ if (rc) {
+ dev_err(chip->dev, "unable to register charger led, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = sysfs_create_group(&chip->led_cdev.dev->kobj,
+ &smbchg_led_attr_group);
+ if (rc) {
+ dev_err(chip->dev, "led sysfs rc: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int vf_adjust_low_threshold = 5;
+module_param(vf_adjust_low_threshold, int, 0644);
+
+static int vf_adjust_high_threshold = 7;
+module_param(vf_adjust_high_threshold, int, 0644);
+
+static int vf_adjust_n_samples = 10;
+module_param(vf_adjust_n_samples, int, 0644);
+
+static int vf_adjust_max_delta_mv = 40;
+module_param(vf_adjust_max_delta_mv, int, 0644);
+
+static int vf_adjust_trim_steps_per_adjust = 1;
+module_param(vf_adjust_trim_steps_per_adjust, int, 0644);
+
+#define CENTER_TRIM_CODE 7
+#define MAX_LIN_CODE 14
+#define MAX_TRIM_CODE 15
+#define SCALE_SHIFT 4
+#define VF_TRIM_OFFSET_MASK SMB_MASK(3, 0)
+#define VF_STEP_SIZE_MV 10
+#define SCALE_LSB_MV 17
+static int smbchg_trim_add_steps(int prev_trim, int delta_steps)
+{
+ int scale_steps;
+ int linear_offset, linear_scale;
+ int offset_code = prev_trim & VF_TRIM_OFFSET_MASK;
+ int scale_code = (prev_trim & ~VF_TRIM_OFFSET_MASK) >> SCALE_SHIFT;
+
+ if (abs(delta_steps) > 1) {
+ pr_smb(PR_STATUS,
+ "Cant trim multiple steps delta_steps = %d\n",
+ delta_steps);
+ return prev_trim;
+ }
+ if (offset_code <= CENTER_TRIM_CODE)
+ linear_offset = offset_code + CENTER_TRIM_CODE;
+ else if (offset_code > CENTER_TRIM_CODE)
+ linear_offset = MAX_TRIM_CODE - offset_code;
+
+ if (scale_code <= CENTER_TRIM_CODE)
+ linear_scale = scale_code + CENTER_TRIM_CODE;
+ else if (scale_code > CENTER_TRIM_CODE)
+ linear_scale = scale_code - (CENTER_TRIM_CODE + 1);
+
+ /* check if we can accomodate delta steps with just the offset */
+ if (linear_offset + delta_steps >= 0
+ && linear_offset + delta_steps <= MAX_LIN_CODE) {
+ linear_offset += delta_steps;
+
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+ }
+
+ /* changing offset cannot satisfy delta steps, change the scale bits */
+ scale_steps = delta_steps > 0 ? 1 : -1;
+
+ if (linear_scale + scale_steps < 0
+ || linear_scale + scale_steps > MAX_LIN_CODE) {
+ pr_smb(PR_STATUS,
+ "Cant trim scale_steps = %d delta_steps = %d\n",
+ scale_steps, delta_steps);
+ return prev_trim;
+ }
+
+ linear_scale += scale_steps;
+
+ if (linear_scale > CENTER_TRIM_CODE)
+ scale_code = linear_scale - CENTER_TRIM_CODE;
+ else
+ scale_code = linear_scale + (CENTER_TRIM_CODE + 1);
+ prev_trim = (prev_trim & VF_TRIM_OFFSET_MASK)
+ | scale_code << SCALE_SHIFT;
+
+ /*
+ * now that we have changed scale which is a 17mV jump, change the
+ * offset bits (10mV) too so the effective change is just 7mV
+ */
+ delta_steps = -1 * delta_steps;
+
+ linear_offset = clamp(linear_offset + delta_steps, 0, MAX_LIN_CODE);
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+}
+
+#define TRIM_14 0xFE
+#define VF_TRIM_MASK 0xFF
+static int smbchg_adjust_vfloat_mv_trim(struct smbchg_chip *chip,
+ int delta_mv)
+{
+ int sign, delta_steps, rc = 0;
+ u8 prev_trim, new_trim;
+ int i;
+
+ sign = delta_mv > 0 ? 1 : -1;
+ delta_steps = (delta_mv + sign * VF_STEP_SIZE_MV / 2)
+ / VF_STEP_SIZE_MV;
+
+ rc = smbchg_read(chip, &prev_trim, chip->misc_base + TRIM_14, 1);
+ if (rc) {
+ dev_err(chip->dev, "Unable to read trim 14: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 1; i <= abs(delta_steps)
+ && i <= vf_adjust_trim_steps_per_adjust; i++) {
+ new_trim = (u8)smbchg_trim_add_steps(prev_trim,
+ delta_steps > 0 ? 1 : -1);
+ if (new_trim == prev_trim) {
+ pr_smb(PR_STATUS,
+ "VFloat trim unchanged from %02x\n", prev_trim);
+ /* treat no trim change as an error */
+ return -EINVAL;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_14,
+ VF_TRIM_MASK, new_trim);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't change vfloat trim rc=%d\n", rc);
+ }
+ pr_smb(PR_STATUS,
+ "VFlt trim %02x to %02x, delta steps: %d\n",
+ prev_trim, new_trim, delta_steps);
+ prev_trim = new_trim;
+ }
+
+ return rc;
+}
+
+#define VFLOAT_RESAMPLE_DELAY_MS 10000
+static void smbchg_vfloat_adjust_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ vfloat_adjust_work.work);
+ int vbat_uv, vbat_mv, ibat_ua, rc, delta_vfloat_mv;
+ bool taper, enable;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ taper = (get_prop_charge_type(chip)
+ == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+ enable = taper && (chip->parallel.current_max_ma == 0);
+
+ if (!enable) {
+ pr_smb(PR_MISC,
+ "Stopping vfloat adj taper=%d parallel_ma = %d\n",
+ taper, chip->parallel.current_max_ma);
+ goto stop;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ goto stop;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &vbat_uv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support voltage rc = %d\n", rc);
+ goto stop;
+ }
+ vbat_mv = vbat_uv / 1000;
+
+ if ((vbat_mv - chip->vfloat_mv) < -1 * vf_adjust_max_delta_mv) {
+ pr_smb(PR_STATUS, "Skip vbat out of range: %d\n", vbat_mv);
+ goto reschedule;
+ }
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_CURRENT_NOW, &ibat_ua);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support current_now rc = %d\n", rc);
+ goto stop;
+ }
+
+ if (ibat_ua / 1000 > -chip->iterm_ma) {
+ pr_smb(PR_STATUS, "Skip ibat too high: %d\n", ibat_ua);
+ goto reschedule;
+ }
+
+ pr_smb(PR_STATUS, "sample number = %d vbat_mv = %d ibat_ua = %d\n",
+ chip->n_vbat_samples,
+ vbat_mv,
+ ibat_ua);
+
+ chip->max_vbat_sample = max(chip->max_vbat_sample, vbat_mv);
+ chip->n_vbat_samples += 1;
+ if (chip->n_vbat_samples < vf_adjust_n_samples) {
+ pr_smb(PR_STATUS, "Skip %d samples; max = %d\n",
+ chip->n_vbat_samples, chip->max_vbat_sample);
+ goto reschedule;
+ }
+ /* if max vbat > target vfloat, delta_vfloat_mv could be negative */
+ delta_vfloat_mv = chip->vfloat_mv - chip->max_vbat_sample;
+ pr_smb(PR_STATUS, "delta_vfloat_mv = %d, samples = %d, mvbat = %d\n",
+ delta_vfloat_mv, chip->n_vbat_samples, chip->max_vbat_sample);
+ /*
+ * enough valid samples has been collected, adjust trim codes
+ * based on maximum of collected vbat samples if necessary
+ */
+ if (delta_vfloat_mv > vf_adjust_high_threshold
+ || delta_vfloat_mv < -1 * vf_adjust_low_threshold) {
+ rc = smbchg_adjust_vfloat_mv_trim(chip, delta_vfloat_mv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "Stopping vfloat adj after trim adj rc = %d\n",
+ rc);
+ goto stop;
+ }
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ goto reschedule;
+ }
+
+stop:
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ smbchg_relax(chip, PM_REASON_VFLOAT_ADJUST);
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->vfloat_adjust_work,
+ msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS));
+ return;
+}
+
+static int smbchg_charging_status_change(struct smbchg_chip *chip)
+{
+ smbchg_vfloat_adjust_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ return 0;
+}
+
+#define BB_CLMP_SEL 0xF8
+#define BB_CLMP_MASK SMB_MASK(1, 0)
+#define BB_CLMP_VFIX_3338MV 0x1
+#define BB_CLMP_VFIX_3512MV 0x2
+static int smbchg_set_optimal_charging_mode(struct smbchg_chip *chip, int type)
+{
+ int rc;
+ bool hvdcp2 = (type == POWER_SUPPLY_TYPE_USB_HVDCP
+ && smbchg_is_usbin_active_pwr_src(chip));
+
+ /*
+ * Set the charger switching freq to 1MHZ if HVDCP 2.0,
+ * or 750KHZ otherwise
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, hvdcp2 ? 0 : CFG_750KHZ_BIT);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Set the charger switch frequency clamp voltage threshold to 3.338V
+ * if HVDCP 2.0, or 3.512V otherwise.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BB_CLMP_SEL,
+ BB_CLMP_MASK,
+ hvdcp2 ? BB_CLMP_VFIX_3338MV : BB_CLMP_VFIX_3512MV);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define DEFAULT_SDP_MA 100
+#define DEFAULT_CDP_MA 1500
+static int smbchg_change_usb_supply_type(struct smbchg_chip *chip,
+ enum power_supply_type type)
+{
+ int rc, current_limit_ma;
+
+ /*
+ * if the type is not unknown, set the type before changing ICL vote
+ * in order to ensure that the correct current limit registers are
+ * used
+ */
+ if (type != POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ /*
+ * Type-C only supports STD(900), MEDIUM(1500) and HIGH(3000) current
+ * modes, skip all BC 1.2 current if external typec is supported.
+ * Note: for SDP supporting current based on USB notifications.
+ */
+ if (chip->typec_psy && (type != POWER_SUPPLY_TYPE_USB))
+ current_limit_ma = chip->typec_current_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB)
+ current_limit_ma = DEFAULT_SDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_CDP)
+ current_limit_ma = DEFAULT_CDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP)
+ current_limit_ma = smbchg_default_hvdcp_icl_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+ current_limit_ma = smbchg_default_hvdcp3_icl_ma;
+ else
+ current_limit_ma = smbchg_default_dcp_icl_ma;
+
+ pr_smb(PR_STATUS, "Type %d: setting mA = %d\n",
+ type, current_limit_ma);
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit_ma);
+ if (rc < 0) {
+ pr_err("Couldn't vote for new USB ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ /* otherwise if it is unknown, set type after the vote */
+ if (type == POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ if (!chip->skip_usb_notification)
+ power_supply_changed(chip->usb_psy);
+
+ /* set the correct buck switching frequency */
+ rc = smbchg_set_optimal_charging_mode(chip, type);
+ if (rc < 0)
+ pr_err("Couldn't set charger optimal mode rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_5V 0x00
+#define HVDCP_9V 0x10
+#define USB_CMD_HVDCP_1 0x42
+#define FORCE_HVDCP_2p0 BIT(3)
+
+static int force_9v_hvdcp(struct smbchg_chip *chip)
+{
+ int rc;
+
+ /* Force 5V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Force QC2.0 */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, FORCE_HVDCP_2p0);
+ rc |= smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, 0);
+ if (rc < 0) {
+ pr_err("Couldn't force QC2.0 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Delay to switch into HVDCP 2.0 and avoid UV */
+ msleep(500);
+
+ /* Force 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc)
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+
+ return rc;
+}
+
+static void smbchg_hvdcp_det_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ hvdcp_det_work.work);
+ int rc;
+
+ if (is_hvdcp_present(chip)) {
+ if (!chip->hvdcp3_supported &&
+ (chip->wa_flags & SMBCHG_HVDCP_9V_EN_WA)) {
+ /* force HVDCP 2.0 */
+ rc = force_9v_hvdcp(chip);
+ if (rc)
+ pr_err("could not force 9V HVDCP continuing rc=%d\n",
+ rc);
+ }
+ smbchg_change_usb_supply_type(chip,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_aicl_deglitch_wa_check(chip);
+ }
+ smbchg_relax(chip, PM_DETECT_HVDCP);
+}
+
+static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state)
+{
+ int rc;
+ u8 reg;
+ union power_supply_propval pval = {0, };
+
+ /*
+ * ensure that we are not in the middle of an insertion where usbin_uv
+ * is low and src_detect hasnt gone high. If so force dp=F dm=F
+ * which guarantees proper type detection
+ */
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "overwriting state = %d with %d\n",
+ state, POWER_SUPPLY_DP_DM_DPF_DMF);
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ return regulator_enable(chip->dpdm_reg);
+ }
+ pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
+ pval.intval = state;
+ return power_supply_set_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_DP_DM, &pval);
+}
+
+#define APSD_CFG 0xF5
+#define AUTO_SRC_DETECT_EN_BIT BIT(0)
+#define APSD_TIMEOUT_MS 1500
+static void restore_from_hvdcp_detection(struct smbchg_chip *chip)
+{
+ int rc;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ /* switch to 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0)
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+
+ /* enable HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+
+ /* enable APSD */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+
+ /* Reset back to 5V unregulated */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK, USBIN_ADAPTER_5V_UNREGULATED_9V);
+ if (rc < 0)
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ chip->pulse_cnt = 0;
+}
+
+#define RESTRICTED_CHG_FCC_PERCENT 50
+static int smbchg_restricted_charging(struct smbchg_chip *chip, bool enable)
+{
+ int current_table_index, fastchg_current;
+ int rc = 0;
+
+ /* If enable, set the fcc to the set point closest
+ * to 50% of the configured fcc while remaining below it
+ */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ chip->cfg_fastchg_current_ma
+ * RESTRICTED_CHG_FCC_PERCENT / 100,
+ chip->tables.usb_ilim_ma_len);
+ fastchg_current =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ rc = vote(chip->fcc_votable, RESTRICTED_CHG_FCC_VOTER, enable,
+ fastchg_current);
+
+ pr_smb(PR_STATUS, "restricted_charging set to %d\n", enable);
+ chip->restricted_charging = enable;
+
+ return rc;
+}
+
+static void handle_usb_removal(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ pr_smb(PR_STATUS, "triggered\n");
+ smbchg_aicl_deglitch_wa_check(chip);
+ /* Clear the OV detected status set before */
+ if (chip->usb_ov_det)
+ chip->usb_ov_det = false;
+ /* Clear typec current status */
+ if (chip->typec_psy)
+ chip->typec_current_ma = 0;
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_present);
+ if (chip->dpdm_reg)
+ regulator_disable(chip->dpdm_reg);
+ schedule_work(&chip->usb_set_online_work);
+
+ pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ power_supply_changed(chip->usb_psy);
+
+ if (parallel_psy && chip->parallel_charger_detected) {
+ pval.intval = false;
+ power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ }
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && chip->enable_aicl_wake) {
+ disable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = false;
+ }
+ chip->parallel.enabled_once = false;
+ chip->vbat_above_headroom = false;
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+
+ vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, false, 0);
+ chip->usb_icl_delta = 0;
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+ if (!chip->hvdcp_not_supported)
+ restore_from_hvdcp_detection(chip);
+}
+
+static bool is_usbin_uv_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_UV_BIT;
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static void handle_usb_insertion(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ enum power_supply_type usb_supply_type;
+ int rc;
+ char *usb_type_name = "null";
+
+ pr_smb(PR_STATUS, "triggered\n");
+ /* usb inserted */
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ pr_smb(PR_STATUS,
+ "inserted type = %d (%s)", usb_supply_type, usb_type_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->typec_psy)
+ update_typec_status(chip);
+ smbchg_change_usb_supply_type(chip, usb_supply_type);
+
+ /* Only notify USB if it's not a charger */
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB ||
+ usb_supply_type == POWER_SUPPLY_TYPE_USB_CDP)
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB,
+ chip->usb_present);
+
+ /* Notify the USB psy if OV condition is not present */
+ if (!chip->usb_ov_det) {
+ /*
+ * Note that this could still be a very weak charger
+ * if the handle_usb_insertion was triggered from
+ * the falling edge of an USBIN_OV interrupt
+ */
+ pr_smb(PR_MISC, "setting usb psy health %s\n",
+ chip->very_weak_charger
+ ? "UNSPEC_FAILURE" : "GOOD");
+ chip->usb_health = chip->very_weak_charger
+ ? POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
+ : POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_changed(chip->usb_psy);
+ }
+ schedule_work(&chip->usb_set_online_work);
+
+ if (!chip->hvdcp_not_supported &&
+ (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP)) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ smbchg_stay_awake(chip, PM_DETECT_HVDCP);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ if (parallel_psy) {
+ pval.intval = true;
+ rc = power_supply_set_property(parallel_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ chip->parallel_charger_detected = rc ? false : true;
+ if (rc)
+ pr_debug("parallel-charger absent rc=%d\n", rc);
+ }
+
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && !chip->enable_aicl_wake) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+}
+
+void update_usb_status(struct smbchg_chip *chip, bool usb_present, bool force)
+{
+ mutex_lock(&chip->usb_status_lock);
+ if (force) {
+ chip->usb_present = usb_present;
+ chip->usb_present ? handle_usb_insertion(chip)
+ : handle_usb_removal(chip);
+ goto unlock;
+ }
+ if (!chip->usb_present && usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ } else if (chip->usb_present && !usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_removal(chip);
+ }
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+unlock:
+ mutex_unlock(&chip->usb_status_lock);
+}
+
+static int otg_oc_reset(struct smbchg_chip *chip)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc)
+ pr_err("Failed to disable OTG rc=%d\n", rc);
+
+ msleep(20);
+
+ /*
+ * There is a possibility that an USBID interrupt might have
+ * occurred notifying USB power supply to disable OTG. We
+ * should not enable OTG in such cases.
+ */
+ if (!is_otg_present(chip)) {
+ pr_smb(PR_STATUS,
+ "OTG is not present, not enabling OTG_EN_BIT\n");
+ goto out;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc)
+ pr_err("Failed to re-enable OTG rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define AICL_IRQ_LIMIT_SECONDS 60
+#define AICL_IRQ_LIMIT_COUNT 25
+static void increment_aicl_count(struct smbchg_chip *chip)
+{
+ bool bad_charger = false;
+ int max_aicl_count, rc;
+ u8 reg;
+ long elapsed_seconds;
+ unsigned long now_seconds;
+
+ pr_smb(PR_INTERRUPT, "aicl count c:%d dgltch:%d first:%ld\n",
+ chip->aicl_irq_count, chip->aicl_deglitch_short,
+ chip->first_aicl_seconds);
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (!rc)
+ chip->aicl_complete = reg & AICL_STS_BIT;
+ else
+ chip->aicl_complete = false;
+
+ if (chip->aicl_deglitch_short || chip->force_aicl_rerun) {
+ if (!chip->aicl_irq_count)
+ get_current_time(&chip->first_aicl_seconds);
+ get_current_time(&now_seconds);
+ elapsed_seconds = now_seconds
+ - chip->first_aicl_seconds;
+
+ if (elapsed_seconds > AICL_IRQ_LIMIT_SECONDS) {
+ pr_smb(PR_INTERRUPT,
+ "resetting: elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ chip->aicl_irq_count = 1;
+ get_current_time(&chip->first_aicl_seconds);
+ return;
+ }
+ /*
+ * Double the amount of AICLs allowed if parallel charging is
+ * enabled.
+ */
+ max_aicl_count = AICL_IRQ_LIMIT_COUNT
+ * (chip->parallel.avail ? 2 : 1);
+ chip->aicl_irq_count++;
+
+ if (chip->aicl_irq_count > max_aicl_count) {
+ pr_smb(PR_INTERRUPT, "elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ pr_smb(PR_INTERRUPT, "Disable AICL rerun\n");
+ chip->very_weak_charger = true;
+ bad_charger = true;
+
+ /*
+ * Disable AICL rerun since many interrupts were
+ * triggered in a short time
+ */
+ /* disable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ return;
+ }
+
+ /* Vote 100mA current limit */
+ rc = vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER,
+ true, CURRENT_100_MA);
+ if (rc < 0) {
+ pr_err("Can't vote %d current limit rc=%d\n",
+ CURRENT_100_MA, rc);
+ }
+
+ chip->aicl_irq_count = 0;
+ } else if ((get_prop_charge_type(chip) ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST) &&
+ (reg & AICL_SUSP_BIT)) {
+ /*
+ * If the AICL_SUSP_BIT is on, then AICL reruns have
+ * already been disabled. Set the very weak charger
+ * flag so that the driver reports a bad charger
+ * and does not reenable AICL reruns.
+ */
+ chip->very_weak_charger = true;
+ bad_charger = true;
+ }
+ if (bad_charger) {
+ pr_smb(PR_MISC,
+ "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+ }
+}
+
+static int wait_for_usbin_uv(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->usbin_uv_lowered;
+ bool usbin_uv;
+
+ if (high)
+ completion = &chip->usbin_uv_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ usbin_uv = is_usbin_uv_high(chip);
+
+ if (high == usbin_uv)
+ return 0;
+
+ pr_err("usbin uv didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ usbin_uv ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int wait_for_src_detect(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->src_det_lowered;
+ bool src_detect;
+
+ if (high)
+ completion = &chip->src_det_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ src_detect = is_src_detect_high(chip);
+
+ if (high == src_detect)
+ return 0;
+
+ pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ src_detect ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion)
+{
+ int rc;
+ bool src_detect;
+ bool usbin_uv;
+
+ if (insertion) {
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ } else {
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+ }
+
+ /* ensure that usbin uv real time status is in the right state */
+ usbin_uv = is_usbin_uv_high(chip);
+ if (usbin_uv != insertion) {
+ pr_err("Skip faking, usbin uv is already %d\n", usbin_uv);
+ return -EINVAL;
+ }
+
+ /* ensure that src_detect real time status is in the right state */
+ src_detect = is_src_detect_high(chip);
+ if (src_detect == insertion) {
+ pr_err("Skip faking, src detect is already %d\n", src_detect);
+ return -EINVAL;
+ }
+
+ pr_smb(PR_MISC, "Allow only %s charger\n",
+ insertion ? "5-9V" : "9V only");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK,
+ insertion ?
+ USBIN_ADAPTER_5V_9V_CONT : USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s usbin uv\n",
+ insertion ? "falling" : "rising");
+ rc = wait_for_usbin_uv(chip, !insertion);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s src det\n",
+ insertion ? "rising" : "falling");
+ rc = wait_for_src_detect(chip, insertion);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+ goto handle_removal;
+ }
+
+ /* disable APSD */
+ pr_smb(PR_MISC, "Disabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable APSD rc=%d\n", rc);
+ goto out;
+ }
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto handle_removal;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DMF);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if ((reg >> TYPE_BITS_OFFSET) != 0) {
+ pr_smb(PR_MISC, "type bits set after 2s sleep - abort\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DM3P3);
+ /* Wait 60mS after entering continuous mode */
+ msleep(60);
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+handle_removal:
+ chip->hvdcp_3_det_ignore_uv = false;
+ update_usb_status(chip, 0, 0);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+
+ /* switch to 9V HVDCP */
+ pr_smb(PR_MISC, "Switch to 9V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable HVDCP */
+ pr_smb(PR_MISC, "Enable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable APSD */
+ pr_smb(PR_MISC, "Enabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Disable AICL */
+ pr_smb(PR_MISC, "Disable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+ /* fake a removal */
+ chip->hvdcp_3_det_ignore_uv = true;
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal rc=%d\n", rc);
+ goto out;
+ }
+
+ /*
+ * reset the enabled once flag for parallel charging so
+ * parallel charging can immediately restart after the HVDCP pulsing
+ * is complete
+ */
+ chip->parallel.enabled_once = false;
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto out;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ /* Enable AICL */
+ pr_smb(PR_MISC, "Enable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+out:
+ /*
+ * There are many QC 2.0 chargers that collapse before the aicl deglitch
+ * timer can mitigate. Hence set the aicl deglitch time to a shorter
+ * period.
+ */
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't reduce aicl deglitch rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "HVDCP removed\n");
+ update_usb_status(chip, 0, 0);
+ }
+ return rc;
+}
+
+#define USB_CMD_APSD 0x41
+#define APSD_RERUN BIT(0)
+static int rerun_apsd(struct smbchg_chip *chip)
+{
+ int rc;
+
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+
+ /* re-run APSD */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD,
+ APSD_RERUN, APSD_RERUN);
+ if (rc) {
+ pr_err("Couldn't re-run APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising usbin uv\n");
+ rc = wait_for_usbin_uv(chip, true);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling src det\n");
+ rc = wait_for_src_detect(chip, false);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling usbin uv\n");
+ rc = wait_for_usbin_uv(chip, false);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising src det\n");
+ rc = wait_for_src_detect(chip, true);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define SCHG_LITE_USBIN_HVDCP_5_9V 0x8
+#define SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK 0x38
+#define SCHG_LITE_USBIN_HVDCP_SEL_IDLE BIT(3)
+static bool is_hvdcp_5v_cont_mode(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc) {
+ pr_err("Unable to read HVDCP status rc=%d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP status = %x\n", reg);
+
+ if (reg & SCHG_LITE_USBIN_HVDCP_SEL_IDLE) {
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc) {
+ pr_err("Unable to read INPUT status rc=%d\n", rc);
+ return false;
+ }
+ pr_smb(PR_STATUS, "INPUT status = %x\n", reg);
+ if ((reg & SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK) ==
+ SCHG_LITE_USBIN_HVDCP_5_9V)
+ return true;
+ }
+ return false;
+}
+
+static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /* check if HVDCP is already in 5V continuous mode */
+ if (is_hvdcp_5v_cont_mode(chip)) {
+ pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n");
+ return 0;
+ }
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+
+ /* re-run APSD */
+ rc = rerun_apsd(chip);
+ if (rc) {
+ pr_err("APSD rerun failed\n");
+ goto out;
+ }
+
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* We are set if HVDCP in 5V continuous mode */
+ if (!is_hvdcp_5v_cont_mode(chip)) {
+ pr_err("HVDCP could not be set in 5V continuous mode\n");
+ goto out;
+ }
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n");
+ rc = force_9v_hvdcp(chip);
+ if (rc) {
+ pr_err("Failed to force 9V HVDCP=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ return rc;
+}
+
+#define CMD_HVDCP_2 0x43
+#define SINGLE_INCREMENT BIT(0)
+#define SINGLE_DECREMENT BIT(1)
+static int smbchg_dp_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Increment DP\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_INCREMENT, SINGLE_INCREMENT);
+ if (rc)
+ pr_err("Single-increment failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_dm_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Decrement DM\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_DECREMENT, SINGLE_DECREMENT);
+ if (rc)
+ pr_err("Single-decrement failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_hvdcp3_confirmed(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /*
+ * reset the enabled once flag for parallel charging because this is
+ * effectively a new insertion.
+ */
+ chip->parallel.enabled_once = false;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_USB_HVDCP_3);
+
+ return rc;
+}
+
+static int smbchg_dp_dm(struct smbchg_chip *chip, int val)
+{
+ int rc = 0;
+ int target_icl_vote_ma;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_PREPARE:
+ if (!is_hvdcp_present(chip)) {
+ pr_err("No pulsing unless HVDCP\n");
+ return -ENODEV;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_prepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_prepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_UNPREPARE:
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_unprepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_unprepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3:
+ rc = smbchg_hvdcp3_confirmed(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DP_PULSE);
+ else
+ rc = smbchg_dp_pulse_lite(chip);
+ if (!rc)
+ chip->pulse_cnt++;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DM_PULSE);
+ else
+ rc = smbchg_dm_pulse_lite(chip);
+ if (!rc && chip->pulse_cnt)
+ chip->pulse_cnt--;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED:
+ chip->hvdcp3_supported = true;
+ pr_smb(PR_MISC, "HVDCP3 supported\n");
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chip->usb_icl_delta -= 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ chip->usb_icl_delta += 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void update_typec_capability_status(struct smbchg_chip *chip,
+ const union power_supply_propval *val)
+{
+ pr_smb(PR_TYPEC, "typec capability = %dma\n", val->intval);
+
+ pr_debug("changing ICL from %dma to %dma\n", chip->typec_current_ma,
+ val->intval);
+ chip->typec_current_ma = val->intval;
+ smbchg_change_usb_supply_type(chip, chip->usb_supply_type);
+}
+
+static void update_typec_otg_status(struct smbchg_chip *chip, int mode,
+ bool force)
+{
+ union power_supply_propval pval = {0, };
+ pr_smb(PR_TYPEC, "typec mode = %d\n", mode);
+
+ if (mode == POWER_SUPPLY_TYPE_DFP) {
+ chip->typec_dfp = true;
+ pval.intval = 1;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ } else if (force || chip->typec_dfp) {
+ chip->typec_dfp = false;
+ pval.intval = 0;
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST,
+ chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ }
+}
+
+static int smbchg_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->usb_current_max;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->usb_present;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->usb_online;
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = chip->usb_supply_type;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = chip->usb_health;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ chip->usb_current_max = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ chip->usb_online = val->intval;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ power_supply_changed(psy);
+ return 0;
+}
+
+static int
+smbchg_usb_is_writeable(struct power_supply *psy, enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+static char *smbchg_usb_supplicants[] = {
+ "battery",
+ "bms",
+};
+
+static enum power_supply_property smbchg_usb_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+#define CHARGE_OUTPUT_VTG_RATIO 840
+static int smbchg_get_iusb(struct smbchg_chip *chip)
+{
+ int rc, iusb_ua = -EINVAL;
+ struct qpnp_vadc_result adc_result;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip))
+ return 0;
+
+ if (chip->vchg_vadc_dev && chip->vchg_adc_channel != -EINVAL) {
+ rc = qpnp_vadc_read(chip->vchg_vadc_dev,
+ chip->vchg_adc_channel, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "error in VCHG (channel-%d) read rc = %d\n",
+ chip->vchg_adc_channel, rc);
+ return 0;
+ }
+ iusb_ua = div_s64(adc_result.physical * 1000,
+ CHARGE_OUTPUT_VTG_RATIO);
+ }
+
+ return iusb_ua;
+}
+
+static enum power_supply_property smbchg_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+};
+
+static int smbchg_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ vote(chip->battchg_suspend_votable, BATTCHG_USER_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->usb_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ rc = vote(chip->dc_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ chip->chg_enabled = val->intval;
+ schedule_work(&chip->usb_set_online_work);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smbchg_system_temp_level_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smbchg_set_fastchg_current_user(chip, val->intval / 1000);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smbchg_float_voltage_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ rc = smbchg_safety_timer_enable(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ rc = smbchg_switch_buck_frequency(chip, val->intval);
+ if (rc) {
+ pr_err("Couldn't switch buck frequency, rc=%d\n", rc);
+ /*
+ * Trigger a panic if there is an error while switching
+ * buck frequency. This will prevent LS FET damage.
+ */
+ BUG_ON(1);
+ }
+
+ rc = smbchg_otg_pulse_skip_disable(chip,
+ REASON_FLASH_ENABLED, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ chip->flash_triggered = !!val->intval;
+ smbchg_icl_loop_disable_check(chip);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_TLIM:
+ rc = smbchg_force_tlim_en(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smbchg_dp_dm(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ smbchg_rerun_aicl(chip);
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = smbchg_restricted_charging(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+ if (chip->typec_psy)
+ update_typec_capability_status(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ if (chip->typec_psy)
+ update_typec_otg_status(chip, val->intval, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_battery_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int smbchg_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ val->intval
+ = get_effective_result(chip->battchg_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = smbchg_float_voltage_get(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
+ val->intval = smbchg_calc_max_flash_current(chip);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = chip->fastchg_current_ma * 1000;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ val->intval = chip->therm_lvl_sel;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ val->intval = smbchg_get_aicl_level_ma(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ val->intval = (int)chip->aicl_complete;
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ val->intval = (int)chip->restricted_charging;
+ break;
+ /* properties from fg */
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_prop_batt_current_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_prop_batt_voltage_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_prop_batt_temp(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = get_prop_batt_voltage_max_design(chip);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ val->intval = chip->safety_timer_en;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ val->intval = chip->otg_pulse_skip_dis;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ val->intval = chip->flash_triggered;
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chip->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ val->intval = smbchg_is_input_current_limited(chip);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ val->intval = smbchg_get_iusb(chip);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static char *smbchg_dc_supplicants[] = {
+ "bms",
+};
+
+static enum power_supply_property smbchg_dc_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smbchg_dc_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->dc_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = vote(chip->dc_icl_votable, USER_ICL_VOTER, true,
+ val->intval / 1000);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_dc_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = power_supply_get_drvdata(psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = is_dc_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = get_effective_result(chip->dc_suspend_votable);
+ if (val->intval < 0) /* no votes */
+ val->intval = 1;
+ else
+ val->intval = !val->intval;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* return if dc is charging the battery */
+ val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC)
+ && (get_prop_batt_status(chip)
+ == POWER_SUPPLY_STATUS_CHARGING);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->dc_max_current_ma * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smbchg_dc_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+#define HOT_BAT_HARD_BIT BIT(0)
+#define HOT_BAT_SOFT_BIT BIT(1)
+#define COLD_BAT_HARD_BIT BIT(2)
+#define COLD_BAT_SOFT_BIT BIT(3)
+#define BAT_OV_BIT BIT(4)
+#define BAT_LOW_BIT BIT(5)
+#define BAT_MISSING_BIT BIT(6)
+#define BAT_TERM_MISSING_BIT BIT(7)
+static irqreturn_t batt_hot_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cold_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_warm_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cool_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_pres_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_present = !(reg & BAT_MISSING_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vbat_low_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("vbat low\n");
+ return IRQ_HANDLED;
+}
+
+#define CHG_COMP_SFT_BIT BIT(3)
+static irqreturn_t chg_error_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc = 0;
+ u8 reg;
+
+ pr_smb(PR_INTERRUPT, "chg-error triggered\n");
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ } else {
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (reg & CHG_COMP_SFT_BIT)
+ set_property_on_fg(chip,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ 1);
+ }
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fastchg_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "p2f triggered\n");
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_hot_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("chg hot\n");
+ smbchg_wipower_check(_chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_term_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "tcc triggered\n");
+ /*
+ * Charge termination is a pulse and not level triggered. That means,
+ * TCC bit in RT_STS can get cleared by the time this interrupt is
+ * handled. Instead of relying on that to determine whether the
+ * charge termination had happened, we've to simply notify the FG
+ * about this as long as the interrupt is handled.
+ */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_CHARGE_DONE, 1);
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t taper_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ taper_irq_en(chip, false);
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_taper(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t recharge_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_timeout_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_warn_ratelimited("wdog timeout rt_stat = 0x%02x\n", reg);
+ if (chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static irqreturn_t power_ok_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+#define DCIN_UNSUSPEND_DELAY_MS 1000
+static irqreturn_t dcin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool dc_present = is_dc_present(chip);
+
+ pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ if (chip->dc_present != dc_present) {
+ /* dc changed */
+ chip->dc_present = dc_present;
+ if (chip->dc_psy_type != -EINVAL && chip->batt_psy)
+ power_supply_changed(chip->dc_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_aicl_deglitch_wa_check(chip);
+ chip->vbat_above_headroom = false;
+ }
+
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_ov_handler() - this is called when an overvoltage condition occurs
+ * @chip: pointer to smbchg_chip chip
+ */
+static irqreturn_t usbin_ov_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc;
+ u8 reg;
+ bool usb_present;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ goto out;
+ }
+
+ /* OV condition is detected. Notify it to USB psy */
+ if (reg & USBIN_OV_BIT) {
+ chip->usb_ov_det = true;
+ pr_smb(PR_MISC, "setting usb psy health OV\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ power_supply_changed(chip->usb_psy);
+ } else {
+ chip->usb_ov_det = false;
+ /* If USB is present, then handle the USB insertion */
+ usb_present = is_usb_present(chip);
+ if (usb_present)
+ update_usb_status(chip, usb_present, false);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_uv_handler() - this is called when USB charger is removed
+ * @chip: pointer to smbchg_chip chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+#define ICL_MODE_MASK SMB_MASK(5, 4)
+#define ICL_MODE_HIGH_CURRENT 0
+static irqreturn_t usbin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc) {
+ pr_err("could not read rt sts: %d", rc);
+ goto out;
+ }
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d rt_sts = 0x%02x hvdcp_3_det_ignore_uv = %d aicl = %d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, reg, chip->hvdcp_3_det_ignore_uv,
+ aicl_level);
+
+ /*
+ * set usb_psy's dp=f dm=f if this is a new insertion, i.e. it is
+ * not already src_detected and usbin_uv is seen falling
+ */
+ if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (reg & USBIN_UV_BIT)
+ complete_all(&chip->usbin_uv_raised);
+ else
+ complete_all(&chip->usbin_uv_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ if ((reg & USBIN_UV_BIT) && (reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_STATUS, "Very weak charger detected\n");
+ chip->very_weak_charger = true;
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n",
+ rc);
+ goto out;
+ }
+ if ((reg & ICL_MODE_MASK) != ICL_MODE_HIGH_CURRENT) {
+ /*
+ * If AICL is not even enabled, this is either an
+ * SDP or a grossly out of spec charger. Do not
+ * draw any current from it.
+ */
+ rc = vote(chip->usb_suspend_votable,
+ WEAK_CHARGER_EN_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("could not disable charger: %d", rc);
+ } else if (aicl_level == chip->tables.usb_ilim_ma_table[0]) {
+ /*
+ * we are in a situation where the adapter is not able
+ * to supply even 300mA. Disable hw aicl reruns else it
+ * is only a matter of time when we get back here again
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ }
+ pr_smb(PR_MISC, "setting usb psy health UNSPEC_FAILURE\n");
+ chip->usb_health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ power_supply_changed(chip->usb_psy);
+ schedule_work(&chip->usb_set_online_work);
+ }
+
+ smbchg_wipower_check(chip);
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB charger type
+ * is detected and on falling edge when USB voltage falls
+ * below the coarse detect voltage(1V), use it for
+ * handling USB charger insertion and removal.
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static irqreturn_t src_detect_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ bool src_detect = is_src_detect_high(chip);
+ int rc;
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d usb_present = %d src_detect = %d hvdcp_3_det_ignore_uv=%d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, usb_present, src_detect,
+ chip->hvdcp_3_det_ignore_uv);
+
+ if (src_detect)
+ complete_all(&chip->src_det_raised);
+ else
+ complete_all(&chip->src_det_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ /*
+ * When VBAT is above the AICL threshold (4.25V) - 180mV (4.07V),
+ * an input collapse due to AICL will actually cause an USBIN_UV
+ * interrupt to fire as well.
+ *
+ * Handle USB insertions and removals in the source detect handler
+ * instead of the USBIN_UV handler since the latter is untrustworthy
+ * when the battery voltage is high.
+ */
+ chip->very_weak_charger = false;
+ /*
+ * a src detect marks a new insertion or a real removal,
+ * vote for enable aicl hw reruns
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't enable hw aicl rerun rc=%d\n", rc);
+
+ rc = vote(chip->usb_suspend_votable, WEAK_CHARGER_EN_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("could not enable charger: %d\n", rc);
+
+ if (src_detect) {
+ update_usb_status(chip, usb_present, 0);
+ } else {
+ update_usb_status(chip, 0, false);
+ chip->aicl_irq_count = 0;
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_oc_handler() - called when the usb otg goes over current
+ */
+#define NUM_OTG_RETRIES 5
+#define OTG_OC_RETRY_DELAY_US 50000
+static irqreturn_t otg_oc_handler(int irq, void *_chip)
+{
+ int rc;
+ struct smbchg_chip *chip = _chip;
+ s64 elapsed_us = ktime_us_delta(ktime_get(), chip->otg_enable_time);
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ pr_warn("OTG OC triggered - OTG disabled\n");
+ return IRQ_HANDLED;
+ }
+
+ if (elapsed_us > OTG_OC_RETRY_DELAY_US)
+ chip->otg_retries = 0;
+
+ /*
+ * Due to a HW bug in the PMI8994 charger, the current inrush that
+ * occurs when connecting certain OTG devices can cause the OTG
+ * overcurrent protection to trip.
+ *
+ * The work around is to try reenabling the OTG when getting an
+ * overcurrent interrupt once.
+ */
+ if (chip->otg_retries < NUM_OTG_RETRIES) {
+ chip->otg_retries += 1;
+ pr_smb(PR_STATUS,
+ "Retrying OTG enable. Try #%d, elapsed_us %lld\n",
+ chip->otg_retries, elapsed_us);
+ rc = otg_oc_reset(chip);
+ if (rc)
+ pr_err("Failed to reset OTG OC state rc=%d\n", rc);
+ chip->otg_enable_time = ktime_get();
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_fail_handler() - called when the usb otg fails
+ * (when vbat < OTG UVLO threshold)
+ */
+static irqreturn_t otg_fail_handler(int irq, void *_chip)
+{
+ pr_smb(PR_INTERRUPT, "triggered\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * aicl_done_handler() - called when the usb AICL algorithm is finished
+ * and a current is set.
+ */
+static irqreturn_t aicl_done_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+
+ pr_smb(PR_INTERRUPT, "triggered, aicl: %d\n", aicl_level);
+
+ increment_aicl_count(chip);
+
+ if (usb_present)
+ smbchg_parallel_usb_check_ok(chip);
+
+ if (chip->aicl_complete && chip->batt_psy)
+ power_supply_changed(chip->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbid_change_handler() - called when the usb RID changes.
+ * This is used mostly for detecting OTG
+ */
+static irqreturn_t usbid_change_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool otg_present;
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ otg_present = is_otg_present(chip);
+ pr_smb(PR_MISC, "setting usb psy OTG = %d\n",
+ otg_present ? 1 : 0);
+
+ extcon_set_cable_state_(chip->extcon, EXTCON_USB_HOST, otg_present);
+
+ if (otg_present)
+ pr_smb(PR_STATUS, "OTG detected\n");
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+
+ return IRQ_HANDLED;
+}
+
+static int determine_initial_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ batt_pres_handler(0, chip);
+ batt_hot_handler(0, chip);
+ batt_warm_handler(0, chip);
+ batt_cool_handler(0, chip);
+ batt_cold_handler(0, chip);
+ if (chip->typec_psy) {
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ update_typec_otg_status(chip, type.intval, true);
+ } else {
+ usbid_change_handler(0, chip);
+ }
+ src_detect_handler(0, chip);
+
+ chip->usb_present = is_usb_present(chip);
+ chip->dc_present = is_dc_present(chip);
+
+ if (chip->usb_present) {
+ int rc = 0;
+ pr_smb(PR_MISC, "setting usb dp=f dm=f\n");
+ if (chip->dpdm_reg && !regulator_is_enabled(chip->dpdm_reg))
+ rc = regulator_enable(chip->dpdm_reg);
+ if (rc < 0) {
+ pr_err("Couldn't enable DP/DM for pulsing rc=%d\n", rc);
+ return rc;
+ }
+ handle_usb_insertion(chip);
+ } else {
+ handle_usb_removal(chip);
+ }
+
+ return 0;
+}
+
+static int prechg_time[] = {
+ 24,
+ 48,
+ 96,
+ 192,
+};
+static int chg_time[] = {
+ 192,
+ 384,
+ 768,
+ 1536,
+};
+
+enum bpd_type {
+ BPD_TYPE_BAT_NONE,
+ BPD_TYPE_BAT_ID,
+ BPD_TYPE_BAT_THM,
+ BPD_TYPE_BAT_THM_BAT_ID,
+ BPD_TYPE_DEFAULT,
+};
+
+static const char * const bpd_label[] = {
+ [BPD_TYPE_BAT_NONE] = "bpd_none",
+ [BPD_TYPE_BAT_ID] = "bpd_id",
+ [BPD_TYPE_BAT_THM] = "bpd_thm",
+ [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id",
+};
+
+static inline int get_bpd(const char *name)
+{
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(bpd_label); i++) {
+ if (strcmp(bpd_label[i], name) == 0)
+ return i;
+ }
+ return -EINVAL;
+}
+
+#define REVISION1_REG 0x0
+#define DIG_MINOR 0
+#define DIG_MAJOR 1
+#define ANA_MINOR 2
+#define ANA_MAJOR 3
+#define CHGR_CFG1 0xFB
+#define RECHG_THRESHOLD_SRC_BIT BIT(1)
+#define TERM_I_SRC_BIT BIT(2)
+#define TERM_SRC_FG BIT(2)
+#define CHG_INHIB_CFG_REG 0xF7
+#define CHG_INHIBIT_50MV_VAL 0x00
+#define CHG_INHIBIT_100MV_VAL 0x01
+#define CHG_INHIBIT_200MV_VAL 0x02
+#define CHG_INHIBIT_300MV_VAL 0x03
+#define CHG_INHIBIT_MASK 0x03
+#define USE_REGISTER_FOR_CURRENT BIT(2)
+#define CHGR_CFG2 0xFC
+#define CHG_EN_SRC_BIT BIT(7)
+#define CHG_EN_POLARITY_BIT BIT(6)
+#define P2F_CHG_TRAN BIT(5)
+#define CHG_BAT_OV_ECC BIT(4)
+#define I_TERM_BIT BIT(3)
+#define AUTO_RECHG_BIT BIT(2)
+#define CHARGER_INHIBIT_BIT BIT(0)
+#define USB51_COMMAND_POL BIT(2)
+#define USB51AC_CTRL BIT(1)
+#define TR_8OR32B 0xFE
+#define BUCK_8_16_FREQ_BIT BIT(0)
+#define BM_CFG 0xF3
+#define BATT_MISSING_ALGO_BIT BIT(2)
+#define BMD_PIN_SRC_MASK SMB_MASK(1, 0)
+#define PIN_SRC_SHIFT 0
+#define CHGR_CFG 0xFF
+#define RCHG_LVL_BIT BIT(0)
+#define VCHG_EN_BIT BIT(1)
+#define VCHG_INPUT_CURRENT_BIT BIT(3)
+#define CFG_AFVC 0xF6
+#define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0)
+#define TR_RID_REG 0xFA
+#define FG_INPUT_FET_DELAY_BIT BIT(3)
+#define TRIM_OPTIONS_7_0 0xF6
+#define INPUT_MISSING_POLLER_EN_BIT BIT(3)
+#define CHGR_CCMP_CFG 0xFA
+#define JEITA_TEMP_HARD_LIMIT_BIT BIT(5)
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_ADAPTER_SEL_9V_BIT BIT(4)
+#define HVDCP_AUTH_ALG_EN_BIT BIT(6)
+#define CMD_APSD 0x41
+#define APSD_RERUN_BIT BIT(0)
+#define OTG_CFG 0xF1
+#define HICCUP_ENABLED_BIT BIT(6)
+#define OTG_PIN_POLARITY_BIT BIT(4)
+#define OTG_PIN_ACTIVE_LOW BIT(4)
+#define OTG_EN_CTRL_MASK SMB_MASK(3, 2)
+#define OTG_PIN_CTRL_RID_DIS 0x04
+#define OTG_CMD_CTRL_RID_EN 0x08
+#define AICL_ADC_BIT BIT(6)
+static void batt_ov_wa_check(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ /* disable-'battery OV disables charging' feature */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_BAT_OV_ECC, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if battery OV is set:
+ * restart charging by disable/enable charging
+ */
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read Battery RT status rc = %d\n", rc);
+ return;
+ }
+
+ if (reg & BAT_OV_BIT) {
+ rc = smbchg_charging_en(chip, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't disable charging: rc = %d\n", rc);
+ return;
+ }
+
+ /* delay for charging-disable to take affect */
+ msleep(200);
+
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't enable charging: rc = %d\n", rc);
+ return;
+ }
+ }
+}
+
+static int smbchg_hw_init(struct smbchg_chip *chip)
+{
+ int rc, i;
+ u8 reg, mask;
+
+ rc = smbchg_read(chip, chip->revision,
+ chip->misc_base + REVISION1_REG, 4);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read revision rc=%d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]);
+
+ /* Setup 9V HVDCP */
+ if (!chip->hvdcp_not_supported) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->aicl_rerun_period_s > 0) {
+ rc = smbchg_set_aicl_rerun_period_s(chip,
+ chip->aicl_rerun_period_s);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set AICL rerun timer rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG,
+ FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0,
+ INPUT_MISSING_POLLER_EN_BIT,
+ INPUT_MISSING_POLLER_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable input missing poller rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Do not force using current from the register i.e. use auto
+ * power source detect (APSD) mA ratings for the initial current values.
+ *
+ * If this is set, AICL will not rerun at 9V for HVDCPs
+ */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USE_REGISTER_FOR_CURRENT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * set chg en by cmd register, set chg en by writing bit 1,
+ * enable auto pre to fast, enable auto recharge by default.
+ * enable current termination and charge inhibition based on
+ * the device tree configuration.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT | P2F_CHG_TRAN
+ | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT,
+ CHG_EN_POLARITY_BIT
+ | (chip->chg_inhibit_en ? CHARGER_INHIBIT_BIT : 0)
+ | (chip->iterm_disabled ? I_TERM_BIT : 0));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * enable battery charging to make sure it hasn't been changed earlier
+ * by the bootloader.
+ */
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable battery charging=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Based on the configuration, use the analog sensors or the fuelgauge
+ * adc for recharge threshold source.
+ */
+
+ if (chip->chg_inhibit_source_fg)
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT,
+ TERM_SRC_FG | RECHG_THRESHOLD_SRC_BIT);
+ else
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * control USB suspend via command bits and set correct 100/500mA
+ * polarity on the usb current
+ */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ USB51_COMMAND_POL | USB51AC_CTRL, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ check_battery_type(chip);
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smbchg_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv);
+ }
+
+ /* set the fast charge current compensation */
+ if (chip->fastchg_current_comp != -EINVAL) {
+ rc = smbchg_fastchg_current_comp_set(chip,
+ chip->fastchg_current_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set fastchg current comp to %d\n",
+ chip->fastchg_current_comp);
+ }
+
+ /* set the float voltage compensation */
+ if (chip->float_voltage_comp != -EINVAL) {
+ rc = smbchg_float_voltage_comp_set(chip,
+ chip->float_voltage_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set float voltage comp to %d\n",
+ chip->float_voltage_comp);
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+ return -EINVAL;
+ } else {
+ smbchg_iterm_set(chip, chip->iterm_ma);
+ }
+ }
+
+ /* set the safety time voltage */
+ if (chip->safety_time != -EINVAL) {
+ reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) |
+ (chip->prechg_safety_time > 0
+ ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT);
+
+ for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+ if (chip->safety_time <= chg_time[i]) {
+ reg |= i << SAFETY_TIME_MINUTES_SHIFT;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(prechg_time); i++) {
+ if (chip->prechg_safety_time <= prechg_time[i]) {
+ reg |= i;
+ break;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK | SFT_TO_MASK |
+ (chip->prechg_safety_time > 0
+ ? PRECHG_SFT_TO_MASK : 0), reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set safety timer rc = %d\n",
+ rc);
+ return rc;
+ }
+ chip->safety_timer_en = true;
+ } else {
+ rc = smbchg_read(chip, &reg, chip->chgr_base + SFT_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Unable to read SFT_CFG rc = %d\n",
+ rc);
+ else if (!(reg & SFT_EN_MASK))
+ chip->safety_timer_en = true;
+ }
+
+ /* configure jeita temperature hard limit */
+ if (chip->jeita_temp_hard_limit >= 0) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CCMP_CFG,
+ JEITA_TEMP_HARD_LIMIT_BIT,
+ chip->jeita_temp_hard_limit
+ ? 0 : JEITA_TEMP_HARD_LIMIT_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set jeita temp hard limit rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* make the buck switch faster to prevent some vbus oscillation */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + TR_8OR32B,
+ BUCK_8_16_FREQ_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc);
+ return rc;
+ }
+
+ /* battery missing detection */
+ mask = BATT_MISSING_ALGO_BIT;
+ reg = chip->bmd_algo_disabled ? 0 : BATT_MISSING_ALGO_BIT;
+ if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) {
+ mask |= BMD_PIN_SRC_MASK;
+ reg |= chip->bmd_pin_src << PIN_SRC_SHIFT;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BM_CFG, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (chip->vchg_adc_channel != -EINVAL) {
+ /* configure and enable VCHG */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ smbchg_charging_status_change(chip);
+
+ vote(chip->usb_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ vote(chip->dc_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+
+ /*
+ * Configure only if the recharge threshold source is not
+ * fuel gauge ADC.
+ */
+ if (!chip->chg_inhibit_source_fg) {
+ if (chip->resume_delta_mv < 100)
+ reg = CHG_INHIBIT_50MV_VAL;
+ else if (chip->resume_delta_mv < 200)
+ reg = CHG_INHIBIT_100MV_VAL;
+ else if (chip->resume_delta_mv < 300)
+ reg = CHG_INHIBIT_200MV_VAL;
+ else
+ reg = CHG_INHIBIT_300MV_VAL;
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHG_INHIB_CFG_REG,
+ CHG_INHIBIT_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CFG,
+ RCHG_LVL_BIT,
+ (chip->resume_delta_mv
+ < chip->tables.rchg_thr_mv)
+ ? 0 : RCHG_LVL_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* DC path current settings */
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = vote(chip->dc_icl_votable, PSY_ICL_VOTER, true,
+ chip->dc_target_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for initial DC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+
+ /*
+ * on some devices the battery is powered via external sources which
+ * could raise its voltage above the float voltage. smbchargers go
+ * in to reverse boost in such a situation and the workaround is to
+ * disable float voltage compensation (note that the battery will appear
+ * hot/cold when powered via external source).
+ */
+ if (chip->soft_vfloat_comp_disabled) {
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC,
+ VFLOAT_COMP_ENABLE_MASK, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ chip->cfg_fastchg_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't vote fastchg ma rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+
+ if (chip->wipower_dyn_icl_avail) {
+ rc = smbchg_wipower_ilim_config(chip,
+ &(chip->wipower_default.entries[0]));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set default wipower ilim = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ /* unsuspend dc path, it could be suspended by the bootloader */
+ rc = smbchg_dc_suspend(chip, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend dc path= %d\n", rc);
+ return rc;
+ }
+
+ if (chip->force_aicl_rerun) {
+ /* vote to enable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ DEFAULT_CONFIG_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote enable hw aicl rerun rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ /* enable OTG hiccup mode */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ HICCUP_ENABLED_BIT, HICCUP_ENABLED_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set OTG OC config rc = %d\n",
+ rc);
+ }
+
+ if (chip->otg_pinctrl) {
+ /* configure OTG enable to pin control active low */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ OTG_PIN_POLARITY_BIT | OTG_EN_CTRL_MASK,
+ OTG_PIN_ACTIVE_LOW | OTG_PIN_CTRL_RID_DIS);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set OTG EN config rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->wa_flags & SMBCHG_BATT_OV_WA)
+ batt_ov_wa_check(chip);
+
+ /* turn off AICL adc for improved accuracy */
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8, AICL_ADC_BIT, 0);
+ if (rc)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static struct of_device_id smbchg_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-smbcharger",
+ },
+ { },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define OF_PROP_READ(chip, prop, dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ if (optional) \
+ prop = -EINVAL; \
+ \
+ retval = of_property_read_u32(chip->pdev->dev.of_node, \
+ "qcom," dt_property , \
+ &prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ dev_err(chip->dev, "Error reading " #dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define ILIM_ENTRIES 3
+#define VOLTAGE_RANGE_ENTRIES 2
+#define RANGE_ENTRY (ILIM_ENTRIES + VOLTAGE_RANGE_ENTRIES)
+static int smb_parse_wipower_map_dt(struct smbchg_chip *chip,
+ struct ilim_map *map, char *property)
+{
+ struct device_node *node = chip->dev->of_node;
+ int total_elements, size;
+ struct property *prop;
+ const __be32 *data;
+ int num, i;
+
+ prop = of_find_property(node, property, &size);
+ if (!prop) {
+ dev_err(chip->dev, "%s missing\n", property);
+ return -EINVAL;
+ }
+
+ total_elements = size / sizeof(int);
+ if (total_elements % RANGE_ENTRY) {
+ dev_err(chip->dev, "%s table not in multiple of %d, total elements = %d\n",
+ property, RANGE_ENTRY, total_elements);
+ return -EINVAL;
+ }
+
+ data = prop->value;
+ num = total_elements / RANGE_ENTRY;
+ map->entries = devm_kzalloc(chip->dev,
+ num * sizeof(struct ilim_entry), GFP_KERNEL);
+ if (!map->entries) {
+ dev_err(chip->dev, "kzalloc failed for default ilim\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < num; i++) {
+ map->entries[i].vmin_uv = be32_to_cpup(data++);
+ map->entries[i].vmax_uv = be32_to_cpup(data++);
+ map->entries[i].icl_pt_ma = be32_to_cpup(data++);
+ map->entries[i].icl_lv_ma = be32_to_cpup(data++);
+ map->entries[i].icl_hv_ma = be32_to_cpup(data++);
+ }
+ map->num = num;
+ return 0;
+}
+
+static int smb_parse_wipower_dt(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ chip->wipower_dyn_icl_avail = false;
+
+ if (!chip->vadc_dev)
+ goto err;
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_default,
+ "qcom,wipower-default-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_pt,
+ "qcom,wipower-pt-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_div2,
+ "qcom,wipower-div2-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-div2-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+ chip->wipower_dyn_icl_avail = true;
+ return 0;
+err:
+ chip->wipower_default.num = 0;
+ chip->wipower_pt.num = 0;
+ chip->wipower_default.num = 0;
+ if (chip->wipower_default.entries)
+ devm_kfree(chip->dev, chip->wipower_default.entries);
+ if (chip->wipower_pt.entries)
+ devm_kfree(chip->dev, chip->wipower_pt.entries);
+ if (chip->wipower_div2.entries)
+ devm_kfree(chip->dev, chip->wipower_div2.entries);
+ chip->wipower_default.entries = NULL;
+ chip->wipower_pt.entries = NULL;
+ chip->wipower_div2.entries = NULL;
+ chip->vadc_dev = NULL;
+ return rc;
+}
+
+#define DEFAULT_VLED_MAX_UV 3500000
+#define DEFAULT_FCC_MA 2000
+static int smb_parse_dt(struct smbchg_chip *chip)
+{
+ int rc = 0, ocp_thresh = -EINVAL;
+ struct device_node *node = chip->dev->of_node;
+ const char *dc_psy_type, *bpd;
+
+ if (!node) {
+ dev_err(chip->dev, "device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ /* read optional u32 properties */
+ OF_PROP_READ(chip, ocp_thresh,
+ "ibat-ocp-threshold-ua", rc, 1);
+ if (ocp_thresh >= 0)
+ smbchg_ibat_ocp_threshold_ua = ocp_thresh;
+ OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1);
+ OF_PROP_READ(chip, chip->cfg_fastchg_current_ma,
+ "fastchg-current-ma", rc, 1);
+ if (chip->cfg_fastchg_current_ma == -EINVAL)
+ chip->cfg_fastchg_current_ma = DEFAULT_FCC_MA;
+ OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1);
+ OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1);
+ OF_PROP_READ(chip, chip->vled_max_uv, "vled-max-uv", rc, 1);
+ if (chip->vled_max_uv < 0)
+ chip->vled_max_uv = DEFAULT_VLED_MAX_UV;
+ OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1);
+ if (chip->rpara_uohm < 0)
+ chip->rpara_uohm = 0;
+ OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins",
+ rc, 1);
+ OF_PROP_READ(chip, chip->fastchg_current_comp, "fastchg-current-comp",
+ rc, 1);
+ OF_PROP_READ(chip, chip->float_voltage_comp, "float-voltage-comp",
+ rc, 1);
+ if (chip->safety_time != -EINVAL &&
+ (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+ dev_err(chip->dev, "Bad charging-timeout-mins %d\n",
+ chip->safety_time);
+ return -EINVAL;
+ }
+ if (chip->prechg_safety_time != -EINVAL &&
+ (chip->prechg_safety_time >
+ prechg_time[ARRAY_SIZE(prechg_time) - 1])) {
+ dev_err(chip->dev, "Bad precharging-timeout-mins %d\n",
+ chip->prechg_safety_time);
+ return -EINVAL;
+ }
+ OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_current_thr_ma,
+ "parallel-usb-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma,
+ "parallel-usb-9v-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma,
+ "parallel-allowed-lowering-ma", rc, 1);
+ if (chip->parallel.min_current_thr_ma != -EINVAL
+ && chip->parallel.min_9v_current_thr_ma != -EINVAL)
+ chip->parallel.avail = true;
+ /*
+ * use the dt values if they exist, otherwise do not touch the params
+ */
+ of_property_read_u32(node, "qcom,parallel-main-chg-fcc-percent",
+ &smbchg_main_chg_fcc_percent);
+ of_property_read_u32(node, "qcom,parallel-main-chg-icl-percent",
+ &smbchg_main_chg_icl_percent);
+ pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n",
+ chip->parallel.min_current_thr_ma,
+ chip->parallel.min_9v_current_thr_ma);
+ OF_PROP_READ(chip, chip->jeita_temp_hard_limit,
+ "jeita-temp-hard-limit", rc, 1);
+ OF_PROP_READ(chip, chip->aicl_rerun_period_s,
+ "aicl-rerun-period-s", rc, 1);
+ OF_PROP_READ(chip, chip->vchg_adc_channel,
+ "vchg-adc-channel-id", rc, 1);
+
+ /* read boolean configuration properties */
+ chip->use_vfloat_adjustments = of_property_read_bool(node,
+ "qcom,autoadjust-vfloat");
+ chip->bmd_algo_disabled = of_property_read_bool(node,
+ "qcom,bmd-algo-disabled");
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+ chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-vfloat-comp-disabled");
+ chip->chg_enabled = !(of_property_read_bool(node,
+ "qcom,charging-disabled"));
+ chip->charge_unknown_battery = of_property_read_bool(node,
+ "qcom,charge-unknown-battery");
+ chip->chg_inhibit_en = of_property_read_bool(node,
+ "qcom,chg-inhibit-en");
+ chip->chg_inhibit_source_fg = of_property_read_bool(node,
+ "qcom,chg-inhibit-fg");
+ chip->low_volt_dcin = of_property_read_bool(node,
+ "qcom,low-volt-dcin");
+ chip->force_aicl_rerun = of_property_read_bool(node,
+ "qcom,force-aicl-rerun");
+ chip->skip_usb_suspend_for_fake_battery = of_property_read_bool(node,
+ "qcom,skip-usb-suspend-for-fake-battery");
+
+ /* parse the battery missing detection pin source */
+ rc = of_property_read_string(chip->pdev->dev.of_node,
+ "qcom,bmd-pin-src", &bpd);
+ if (rc) {
+ /* Select BAT_THM as default BPD scheme */
+ chip->bmd_pin_src = BPD_TYPE_DEFAULT;
+ rc = 0;
+ } else {
+ chip->bmd_pin_src = get_bpd(bpd);
+ if (chip->bmd_pin_src < 0) {
+ dev_err(chip->dev,
+ "failed to determine bpd schema %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* parse the dc power supply configuration */
+ rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type);
+ if (rc) {
+ chip->dc_psy_type = -EINVAL;
+ rc = 0;
+ } else {
+ if (strcmp(dc_psy_type, "Mains") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (strcmp(dc_psy_type, "Wireless") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+ else if (strcmp(dc_psy_type, "Wipower") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIPOWER;
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ OF_PROP_READ(chip, chip->dc_target_current_ma,
+ "dc-psy-ma", rc, 0);
+ if (rc)
+ return rc;
+ if (chip->dc_target_current_ma < DC_MA_MIN
+ || chip->dc_target_current_ma > DC_MA_MAX) {
+ dev_err(chip->dev, "Bad dc mA %d\n",
+ chip->dc_target_current_ma);
+ return -EINVAL;
+ }
+ }
+
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ smb_parse_wipower_dt(chip);
+
+ /* read the bms power supply name */
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ /* read the battery power supply name */
+ rc = of_property_read_string(node, "qcom,battery-psy-name",
+ &chip->battery_psy_name);
+ if (rc)
+ chip->battery_psy_name = "battery";
+
+ /* Get the charger led support property */
+ chip->cfg_chg_led_sw_ctrl =
+ of_property_read_bool(node, "qcom,chg-led-sw-controls");
+ chip->cfg_chg_led_support =
+ of_property_read_bool(node, "qcom,chg-led-support");
+
+ if (of_find_property(node, "qcom,thermal-mitigation",
+ &chip->thermal_levels)) {
+ chip->thermal_mitigation = devm_kzalloc(chip->dev,
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ chip->skip_usb_notification
+ = of_property_read_bool(node,
+ "qcom,skip-usb-notification");
+
+ chip->otg_pinctrl = of_property_read_bool(node, "qcom,otg-pinctrl");
+
+ return 0;
+}
+
+#define SUBTYPE_REG 0x5
+#define SMBCHG_CHGR_SUBTYPE 0x1
+#define SMBCHG_OTG_SUBTYPE 0x8
+#define SMBCHG_BAT_IF_SUBTYPE 0x3
+#define SMBCHG_USB_CHGPTH_SUBTYPE 0x4
+#define SMBCHG_DC_CHGPTH_SUBTYPE 0x5
+#define SMBCHG_MISC_SUBTYPE 0x7
+#define SMBCHG_LITE_CHGR_SUBTYPE 0x51
+#define SMBCHG_LITE_OTG_SUBTYPE 0x58
+#define SMBCHG_LITE_BAT_IF_SUBTYPE 0x53
+#define SMBCHG_LITE_USB_CHGPTH_SUBTYPE 0x54
+#define SMBCHG_LITE_DC_CHGPTH_SUBTYPE 0x55
+#define SMBCHG_LITE_MISC_SUBTYPE 0x57
+static int smbchg_request_irq(struct smbchg_chip *chip,
+ struct device_node *child,
+ int irq_num, char *irq_name,
+ irqreturn_t (irq_handler)(int irq, void *_chip),
+ int flags)
+{
+ int rc;
+
+ irq_num = of_irq_get_byname(child, irq_name);
+ if (irq_num < 0) {
+ dev_err(chip->dev, "Unable to get %s irqn", irq_name);
+ rc = -ENXIO;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ irq_num, NULL, irq_handler, flags, irq_name,
+ chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to request %s irq: %dn",
+ irq_name, rc);
+ rc = -ENXIO;
+ }
+ return 0;
+}
+
+static int smbchg_request_irqs(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+ unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->chg_error_irq, "chg-error",
+ chg_error_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->taper_irq,
+ "chg-taper-thr", taper_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ disable_irq_nosync(chip->taper_irq);
+ rc = smbchg_request_irq(chip, child, chip->chg_term_irq,
+ "chg-tcc-thr", chg_term_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->recharge_irq,
+ "chg-rechg-thr", recharge_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->fastchg_irq,
+ "chg-p2f-thr", fastchg_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_term_irq);
+ enable_irq_wake(chip->chg_error_irq);
+ enable_irq_wake(chip->fastchg_irq);
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->batt_hot_irq,
+ "batt-hot", batt_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_warm_irq,
+ "batt-warm", batt_warm_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cool_irq,
+ "batt-cool", batt_cool_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_cold_irq,
+ "batt-cold", batt_cold_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->batt_missing_irq,
+ "batt-missing", batt_pres_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->vbat_low_irq,
+ "batt-low", vbat_low_handler, flags);
+ if (rc < 0)
+ return rc;
+
+ enable_irq_wake(chip->batt_hot_irq);
+ enable_irq_wake(chip->batt_warm_irq);
+ enable_irq_wake(chip->batt_cool_irq);
+ enable_irq_wake(chip->batt_cold_irq);
+ enable_irq_wake(chip->batt_missing_irq);
+ enable_irq_wake(chip->vbat_low_irq);
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_uv_irq,
+ "usbin-uv", usbin_uv_handler,
+ flags | IRQF_EARLY_RESUME);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbin_ov_irq,
+ "usbin-ov", usbin_ov_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->src_detect_irq,
+ "usbin-src-det",
+ src_detect_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->aicl_done_irq,
+ "aicl-done",
+ aicl_done_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+
+ if (chip->schg_version != QPNP_SCHG_LITE) {
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ }
+ enable_irq_wake(chip->usbin_uv_irq);
+ enable_irq_wake(chip->usbin_ov_irq);
+ enable_irq_wake(chip->src_detect_irq);
+ if (chip->parallel.avail && chip->usb_present) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->dcin_uv_irq,
+ "dcin-uv", dcin_uv_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->dcin_uv_irq);
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ rc = smbchg_request_irq(chip, child, chip->power_ok_irq,
+ "power-ok", power_ok_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->chg_hot_irq,
+ "temp-shutdown", chg_hot_handler, flags);
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child, chip->wdog_timeout_irq,
+ "wdog-timeout",
+ wdog_timeout_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->chg_hot_irq);
+ enable_irq_wake(chip->wdog_timeout_irq);
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ break;
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ rc = smbchg_request_irq(chip, child,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+ if (rc < 0)
+ return rc;
+ rc = smbchg_request_irq(chip, child,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags);
+ if (rc < 0)
+ return rc;
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define REQUIRE_BASE(chip, base, rc) \
+do { \
+ if (!rc && !chip->base) { \
+ dev_err(chip->dev, "Missing " #base "\n"); \
+ rc = -EINVAL; \
+ } \
+} while (0)
+
+static int smbchg_parse_peripherals(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ unsigned int base;
+ struct device_node *child;
+ u8 subtype;
+
+ if (of_get_available_child_count(chip->pdev->dev.of_node) == 0) {
+ pr_err("no child nodes\n");
+ return -ENXIO;
+ }
+
+ for_each_available_child_of_node(chip->pdev->dev.of_node, child) {
+ rc = of_property_read_u32(child, "reg", &base);
+ if (rc < 0) {
+ rc = 0;
+ continue;
+ }
+
+ rc = smbchg_read(chip, &subtype, base + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ chip->chgr_base = base;
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ chip->bat_if_base = base;
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ chip->usb_chgpth_base = base;
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ chip->dc_chgpth_base = base;
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ chip->misc_base = base;
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ chip->otg_base = base;
+ break;
+ }
+ }
+
+ REQUIRE_BASE(chip, chgr_base, rc);
+ REQUIRE_BASE(chip, bat_if_base, rc);
+ REQUIRE_BASE(chip, usb_chgpth_base, rc);
+ REQUIRE_BASE(chip, dc_chgpth_base, rc);
+ REQUIRE_BASE(chip, misc_base, rc);
+
+ return rc;
+}
+
+static inline void dump_reg(struct smbchg_chip *chip, u16 addr,
+ const char *name)
+{
+ u8 reg;
+
+ smbchg_read(chip, &reg, addr, 1);
+ pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg);
+}
+
+/* dumps useful registers for debug */
+static void dump_regs(struct smbchg_chip *chip)
+{
+ u16 addr;
+
+ /* charger peripheral */
+ for (addr = 0xB; addr <= 0x10; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Status");
+ for (addr = 0xF0; addr <= 0xFF; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Config");
+ /* battery interface peripheral */
+ dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status");
+ dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command");
+ for (addr = 0xF0; addr <= 0xFB; addr++)
+ dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config");
+ /* usb charge path peripheral */
+ for (addr = 0x7; addr <= 0x10; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status");
+ dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command");
+ for (addr = 0xF0; addr <= 0xF5; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config");
+ /* dc charge path peripheral */
+ dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status");
+ for (addr = 0xF0; addr <= 0xF6; addr++)
+ dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config");
+ /* misc peripheral */
+ dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status");
+ dump_reg(chip, chip->misc_base + RT_STS, "MISC Status");
+ for (addr = 0xF0; addr <= 0xF3; addr++)
+ dump_reg(chip, chip->misc_base + addr, "MISC CFG");
+}
+
+static int create_debugfs_entries(struct smbchg_chip *chip)
+{
+ struct dentry *ent;
+
+ chip->debug_root = debugfs_create_dir("qpnp-smbcharger", NULL);
+ if (!chip->debug_root) {
+ dev_err(chip->dev, "Couldn't create debug dir\n");
+ return -EINVAL;
+ }
+
+ ent = debugfs_create_file("force_dcin_icl_check",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_dcin_icl_ops);
+ if (!ent) {
+ dev_err(chip->dev,
+ "Couldn't create force dcin icl check file\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_check_chg_version(struct smbchg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+ int rc;
+
+ revid_dev_node = of_parse_phandle(chip->pdev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(pmic_rev_id)) {
+ rc = PTR_ERR(revid_dev_node);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA
+ | SMBCHG_BATT_OV_WA
+ | SMBCHG_CC_ESR_WA
+ | SMBCHG_RESTART_WA;
+ use_pmi8994_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ case PMI8950:
+ case PMI8937:
+ chip->wa_flags |= SMBCHG_BATT_OV_WA;
+ if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ {
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA;
+ } else { /* rev > PMI8950 v1.0 */
+ chip->wa_flags |= SMBCHG_HVDCP_9V_EN_WA
+ | SMBCHG_USB100_WA;
+ }
+ use_pmi8994_tables(chip);
+ chip->tables.aicl_rerun_period_table =
+ aicl_rerun_period_schg_lite;
+ chip->tables.aicl_rerun_period_len =
+ ARRAY_SIZE(aicl_rerun_period_schg_lite);
+
+ chip->schg_version = QPNP_SCHG_LITE;
+ if (pmic_rev_id->pmic_subtype == PMI8937)
+ chip->hvdcp_not_supported = true;
+ break;
+ case PMI8996:
+ chip->wa_flags |= SMBCHG_CC_ESR_WA
+ | SMBCHG_FLASH_ICL_DISABLE_WA
+ | SMBCHG_RESTART_WA
+ | SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA;
+ use_pmi8996_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported, WA flags not set\n",
+ pmic_rev_id->pmic_subtype);
+ }
+
+ pr_smb(PR_STATUS, "pmic=%s, wa_flags=0x%x, hvdcp_supported=%s\n",
+ pmic_rev_id->pmic_name, chip->wa_flags,
+ chip->hvdcp_not_supported ? "false" : "true");
+
+ return 0;
+}
+
+static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip)
+{
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name;
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP
+ && !is_hvdcp_present(chip)) {
+ pr_smb(PR_STATUS, "DCP found rerunning APSD\n");
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, true, 300);
+ if (rc < 0)
+ pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n",
+ rc);
+
+ pr_smb(PR_STATUS, "Faking Removal\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Faking Insertion\n");
+ fake_insertion_removal(chip, true);
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) {
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n");
+ fake_insertion_removal(chip, true);
+ }
+
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n",
+ rc);
+ }
+}
+
+static int smbchg_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct smbchg_chip *chip;
+ struct power_supply *typec_psy = NULL;
+ struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev;
+ const char *typec_psy_name;
+ struct power_supply_config usb_psy_cfg = {};
+ struct power_supply_config batt_psy_cfg = {};
+ struct power_supply_config dc_psy_cfg = {};
+
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,external-typec")) {
+ /* read the type power supply name */
+ rc = of_property_read_string(pdev->dev.of_node,
+ "qcom,typec-psy-name", &typec_psy_name);
+ if (rc) {
+ pr_err("failed to get prop typec-psy-name rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ typec_psy = power_supply_get_by_name(typec_psy_name);
+ if (!typec_psy) {
+ pr_smb(PR_STATUS,
+ "Type-C supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,dcin-vadc", NULL)) {
+ vadc_dev = qpnp_get_vadc(&pdev->dev, "dcin");
+ if (IS_ERR(vadc_dev)) {
+ rc = PTR_ERR(vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Couldn't get vadc rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ vchg_vadc_dev = NULL;
+ if (of_find_property(pdev->dev.of_node, "qcom,vchg_sns-vadc", NULL)) {
+ vchg_vadc_dev = qpnp_get_vadc(&pdev->dev, "vchg_sns");
+ if (IS_ERR(vchg_vadc_dev)) {
+ rc = PTR_ERR(vchg_vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Couldn't get vadc 'vchg' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!chip->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ chip->fcc_votable = create_votable("BATT_FCC",
+ VOTE_MIN,
+ set_fastchg_current_vote_cb, chip);
+ if (IS_ERR(chip->fcc_votable)) {
+ rc = PTR_ERR(chip->fcc_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_icl_votable = create_votable("USB_ICL",
+ VOTE_MIN,
+ set_usb_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->usb_icl_votable)) {
+ rc = PTR_ERR(chip->usb_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_icl_votable = create_votable("DCIN_ICL",
+ VOTE_MIN,
+ set_dc_current_limit_vote_cb, chip);
+ if (IS_ERR(chip->dc_icl_votable)) {
+ rc = PTR_ERR(chip->dc_icl_votable);
+ goto votables_cleanup;
+ }
+
+ chip->usb_suspend_votable = create_votable("USB_SUSPEND",
+ VOTE_SET_ANY,
+ usb_suspend_vote_cb, chip);
+ if (IS_ERR(chip->usb_suspend_votable)) {
+ rc = PTR_ERR(chip->usb_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->dc_suspend_votable = create_votable("DC_SUSPEND",
+ VOTE_SET_ANY,
+ dc_suspend_vote_cb, chip);
+ if (IS_ERR(chip->dc_suspend_votable)) {
+ rc = PTR_ERR(chip->dc_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->battchg_suspend_votable = create_votable("BATTCHG_SUSPEND",
+ VOTE_SET_ANY,
+ charging_suspend_vote_cb, chip);
+ if (IS_ERR(chip->battchg_suspend_votable)) {
+ rc = PTR_ERR(chip->battchg_suspend_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_disable_votable = create_votable("HWAICL_DISABLE",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_disable_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_disable_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_disable_votable);
+ goto votables_cleanup;
+ }
+
+ chip->hw_aicl_rerun_enable_indirect_votable = create_votable(
+ "HWAICL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ smbchg_hw_aicl_rerun_enable_indirect_cb, chip);
+ if (IS_ERR(chip->hw_aicl_rerun_enable_indirect_votable)) {
+ rc = PTR_ERR(chip->hw_aicl_rerun_enable_indirect_votable);
+ goto votables_cleanup;
+ }
+
+ chip->aicl_deglitch_short_votable = create_votable(
+ "HWAICL_SHORT_DEGLITCH",
+ VOTE_SET_ANY,
+ smbchg_aicl_deglitch_config_cb, chip);
+ if (IS_ERR(chip->aicl_deglitch_short_votable)) {
+ rc = PTR_ERR(chip->aicl_deglitch_short_votable);
+ goto votables_cleanup;
+ }
+
+ INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work);
+ INIT_DELAYED_WORK(&chip->parallel_en_work,
+ smbchg_parallel_usb_en_work);
+ INIT_DELAYED_WORK(&chip->vfloat_adjust_work, smbchg_vfloat_adjust_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smbchg_hvdcp_det_work);
+ init_completion(&chip->src_det_lowered);
+ init_completion(&chip->src_det_raised);
+ init_completion(&chip->usbin_uv_lowered);
+ init_completion(&chip->usbin_uv_raised);
+ chip->vadc_dev = vadc_dev;
+ chip->vchg_vadc_dev = vchg_vadc_dev;
+ chip->pdev = pdev;
+ chip->dev = &pdev->dev;
+
+ chip->typec_psy = typec_psy;
+ chip->fake_battery_soc = -EINVAL;
+ chip->usb_online = -EINVAL;
+ dev_set_drvdata(&pdev->dev, chip);
+
+ spin_lock_init(&chip->sec_access_lock);
+ mutex_init(&chip->therm_lvl_lock);
+ mutex_init(&chip->usb_set_online_lock);
+ mutex_init(&chip->parallel.lock);
+ mutex_init(&chip->taper_irq_lock);
+ mutex_init(&chip->pm_lock);
+ mutex_init(&chip->wipower_config);
+ mutex_init(&chip->usb_status_lock);
+ device_init_wakeup(chip->dev, true);
+
+ rc = smbchg_parse_peripherals(chip);
+ if (rc) {
+ dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_check_chg_version(chip);
+ if (rc) {
+ pr_err("Unable to check schg version rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smb_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to parse DT nodes: %d\n", rc);
+ goto votables_cleanup;
+ }
+
+ rc = smbchg_regulator_init(chip);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "Couldn't initialize regulator rc=%d\n", rc);
+ goto votables_cleanup;
+ }
+
+ chip->extcon = devm_extcon_dev_allocate(chip->dev, smbchg_extcon_cable);
+ if (IS_ERR(chip->extcon)) {
+ dev_err(chip->dev, "failed to allocate extcon device\n");
+ rc = PTR_ERR(chip->extcon);
+ goto votables_cleanup;
+ }
+
+ rc = devm_extcon_dev_register(chip->dev, chip->extcon);
+ if (rc) {
+ dev_err(chip->dev, "failed to register extcon device\n");
+ goto votables_cleanup;
+ }
+
+ chip->usb_psy_d.name = "usb";
+ chip->usb_psy_d.type = POWER_SUPPLY_TYPE_USB;
+ chip->usb_psy_d.get_property = smbchg_usb_get_property;
+ chip->usb_psy_d.set_property = smbchg_usb_set_property;
+ chip->usb_psy_d.properties = smbchg_usb_properties;
+ chip->usb_psy_d.num_properties = ARRAY_SIZE(smbchg_usb_properties);
+ chip->usb_psy_d.property_is_writeable = smbchg_usb_is_writeable;
+
+ usb_psy_cfg.drv_data = chip;
+ usb_psy_cfg.supplied_to = smbchg_usb_supplicants;
+ usb_psy_cfg.num_supplicants = ARRAY_SIZE(smbchg_usb_supplicants);
+
+ chip->usb_psy = devm_power_supply_register(chip->dev,
+ &chip->usb_psy_d, &usb_psy_cfg);
+ if (IS_ERR(chip->usb_psy)) {
+ dev_err(&pdev->dev, "Unable to register usb_psy rc = %ld\n",
+ PTR_ERR(chip->usb_psy));
+ rc = PTR_ERR(chip->usb_psy);
+ goto votables_cleanup;
+ }
+
+ if (of_find_property(chip->dev->of_node, "dpdm-supply", NULL)) {
+ chip->dpdm_reg = devm_regulator_get(chip->dev, "dpdm");
+ if (IS_ERR(chip->dpdm_reg)) {
+ rc = PTR_ERR(chip->dpdm_reg);
+ goto votables_cleanup;
+ }
+ }
+
+ rc = smbchg_hw_init(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to intialize hardware rc = %d\n", rc);
+ goto out;
+ }
+
+ rc = determine_initial_status(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Unable to determine init status rc = %d\n", rc);
+ goto out;
+ }
+
+ chip->previous_soc = -EINVAL;
+ chip->batt_psy_d.name = chip->battery_psy_name;
+ chip->batt_psy_d.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy_d.get_property = smbchg_battery_get_property;
+ chip->batt_psy_d.set_property = smbchg_battery_set_property;
+ chip->batt_psy_d.properties = smbchg_battery_properties;
+ chip->batt_psy_d.num_properties = ARRAY_SIZE(smbchg_battery_properties);
+ chip->batt_psy_d.external_power_changed = smbchg_external_power_changed;
+ chip->batt_psy_d.property_is_writeable = smbchg_battery_is_writeable;
+
+ batt_psy_cfg.drv_data = chip;
+ batt_psy_cfg.num_supplicants = 0;
+ chip->batt_psy = devm_power_supply_register(chip->dev,
+ &chip->batt_psy_d,
+ &batt_psy_cfg);
+ if (IS_ERR(chip->batt_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register batt_psy rc = %ld\n",
+ PTR_ERR(chip->batt_psy));
+ goto out;
+ }
+
+ if (chip->dc_psy_type != -EINVAL) {
+ chip->dc_psy_d.name = "dc";
+ chip->dc_psy_d.type = chip->dc_psy_type;
+ chip->dc_psy_d.get_property = smbchg_dc_get_property;
+ chip->dc_psy_d.set_property = smbchg_dc_set_property;
+ chip->dc_psy_d.property_is_writeable = smbchg_dc_is_writeable;
+ chip->dc_psy_d.properties = smbchg_dc_properties;
+ chip->dc_psy_d.num_properties
+ = ARRAY_SIZE(smbchg_dc_properties);
+
+ dc_psy_cfg.drv_data = chip;
+ dc_psy_cfg.num_supplicants
+ = ARRAY_SIZE(smbchg_dc_supplicants);
+ dc_psy_cfg.supplied_to = smbchg_dc_supplicants;
+
+ chip->dc_psy = devm_power_supply_register(chip->dev,
+ &chip->dc_psy_d,
+ &dc_psy_cfg);
+ if (IS_ERR(chip->dc_psy)) {
+ dev_err(&pdev->dev,
+ "Unable to register dc_psy rc = %ld\n",
+ PTR_ERR(chip->dc_psy));
+ goto out;
+ }
+ }
+
+ if (chip->cfg_chg_led_support &&
+ chip->schg_version == QPNP_SCHG_LITE) {
+ rc = smbchg_register_chg_led(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Unable to register charger led: %d\n",
+ rc);
+ goto out;
+ }
+
+ rc = smbchg_chg_led_controls(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Failed to set charger led controld bit: %d\n",
+ rc);
+ goto unregister_led_class;
+ }
+ }
+
+ rc = smbchg_request_irqs(chip);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Unable to request irqs rc = %d\n", rc);
+ goto unregister_led_class;
+ }
+
+ rerun_hvdcp_det_if_necessary(chip);
+
+ dump_regs(chip);
+ create_debugfs_entries(chip);
+ dev_info(chip->dev,
+ "SMBCHG successfully probe Charger version=%s Revision DIG:%d.%d ANA:%d.%d batt=%d dc=%d usb=%d\n",
+ version_str[chip->schg_version],
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ get_prop_batt_present(chip),
+ chip->dc_present, chip->usb_present);
+ return 0;
+
+unregister_led_class:
+ if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE)
+ led_classdev_unregister(&chip->led_cdev);
+out:
+ handle_usb_removal(chip);
+votables_cleanup:
+ if (chip->aicl_deglitch_short_votable)
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ if (chip->hw_aicl_rerun_enable_indirect_votable)
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ if (chip->hw_aicl_rerun_disable_votable)
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ if (chip->battchg_suspend_votable)
+ destroy_votable(chip->battchg_suspend_votable);
+ if (chip->dc_suspend_votable)
+ destroy_votable(chip->dc_suspend_votable);
+ if (chip->usb_suspend_votable)
+ destroy_votable(chip->usb_suspend_votable);
+ if (chip->dc_icl_votable)
+ destroy_votable(chip->dc_icl_votable);
+ if (chip->usb_icl_votable)
+ destroy_votable(chip->usb_icl_votable);
+ if (chip->fcc_votable)
+ destroy_votable(chip->fcc_votable);
+ return rc;
+}
+
+static int smbchg_remove(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+
+ debugfs_remove_recursive(chip->debug_root);
+
+ destroy_votable(chip->aicl_deglitch_short_votable);
+ destroy_votable(chip->hw_aicl_rerun_enable_indirect_votable);
+ destroy_votable(chip->hw_aicl_rerun_disable_votable);
+ destroy_votable(chip->battchg_suspend_votable);
+ destroy_votable(chip->dc_suspend_votable);
+ destroy_votable(chip->usb_suspend_votable);
+ destroy_votable(chip->dc_icl_votable);
+ destroy_votable(chip->usb_icl_votable);
+ destroy_votable(chip->fcc_votable);
+
+ return 0;
+}
+
+static void smbchg_shutdown(struct platform_device *pdev)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ if (!is_hvdcp_present(chip))
+ return;
+
+ pr_smb(PR_MISC, "Disable Parallel\n");
+ mutex_lock(&chip->parallel.lock);
+ smbchg_parallel_en = 0;
+ smbchg_parallel_usb_disable(chip);
+ mutex_unlock(&chip->parallel.lock);
+
+ pr_smb(PR_MISC, "Disable all interrupts\n");
+ disable_irq(chip->aicl_done_irq);
+ disable_irq(chip->batt_cold_irq);
+ disable_irq(chip->batt_cool_irq);
+ disable_irq(chip->batt_hot_irq);
+ disable_irq(chip->batt_missing_irq);
+ disable_irq(chip->batt_warm_irq);
+ disable_irq(chip->chg_error_irq);
+ disable_irq(chip->chg_hot_irq);
+ disable_irq(chip->chg_term_irq);
+ disable_irq(chip->dcin_uv_irq);
+ disable_irq(chip->fastchg_irq);
+ disable_irq(chip->otg_fail_irq);
+ disable_irq(chip->otg_oc_irq);
+ disable_irq(chip->power_ok_irq);
+ disable_irq(chip->recharge_irq);
+ disable_irq(chip->src_detect_irq);
+ disable_irq(chip->taper_irq);
+ disable_irq(chip->usbid_change_irq);
+ disable_irq(chip->usbin_ov_irq);
+ disable_irq(chip->usbin_uv_irq);
+ disable_irq(chip->vbat_low_irq);
+ disable_irq(chip->wdog_timeout_irq);
+
+ /* remove all votes for short deglitch */
+ vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_SHORT_DEGLITCH_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+
+ /* vote to ensure AICL rerun is enabled */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ SHUTDOWN_WORKAROUND_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable indirect AICL rerun\n");
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable AICL rerun\n");
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ return;
+ }
+
+ pr_smb(PR_MISC, "Wait 500mS to lower to 5V\n");
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ return;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0)
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0)
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Wait 1S to settle\n");
+ msleep(1000);
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_STATUS, "wrote power off configurations\n");
+}
+
+static const struct dev_pm_ops smbchg_pm_ops = {
+};
+
+MODULE_DEVICE_TABLE(spmi, smbchg_id);
+
+static struct platform_driver smbchg_driver = {
+ .driver = {
+ .name = "qpnp-smbcharger",
+ .owner = THIS_MODULE,
+ .of_match_table = smbchg_match_table,
+ .pm = &smbchg_pm_ops,
+ },
+ .probe = smbchg_probe,
+ .remove = smbchg_remove,
+ .shutdown = smbchg_shutdown,
+};
+
+static int __init smbchg_init(void)
+{
+ return platform_driver_register(&smbchg_driver);
+}
+
+static void __exit smbchg_exit(void)
+{
+ return platform_driver_unregister(&smbchg_driver);
+}
+
+module_init(smbchg_init);
+module_exit(smbchg_exit);
+
+MODULE_DESCRIPTION("QPNP SMB Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-smbcharger");
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index df7aabfd7e2e..93512f155c52 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -3734,8 +3734,165 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
return IRQ_HANDLED;
}
+static int typec_try_sink(struct smb_charger *chg)
+{
+ union power_supply_propval val;
+ bool debounce_done, vbus_detected, sink;
+ u8 stat;
+ int exit_mode = ATTACHED_SRC, rc;
+
+ /* ignore typec interrupt while try.snk WIP */
+ chg->try_sink_active = true;
+
+ /* force SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* reduce Tccdebounce time to ~20ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG,
+ TCC_DEBOUNCE_20MS_BIT, TCC_DEBOUNCE_20MS_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * give opportunity to the other side to be a SRC,
+ * for tDRPTRY + Tccdebounce time
+ */
+ msleep(120);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (!debounce_done)
+ /*
+ * The other side didn't switch to source, either it
+ * is an adamant sink or is removed go back to showing Rp
+ */
+ goto try_wait_src;
+
+ /*
+ * We are in force sink mode and the other side has switched to
+ * showing Rp. Config DRP in case the other side removes Rp so we
+ * can quickly (20ms) switch to showing our Rp. Note that the spec
+ * needs us to show Rp for 80mS while the drp DFP residency is just
+ * 54mS. But 54mS is plenty time for us to react and force Rp for
+ * the remaining 26mS.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ /*
+ * while other side is Rp, wait for VBUS from it; exit if other side
+ * removes Rp
+ */
+ do {
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
+ rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+ vbus_detected = stat & TYPEC_VBUS_STATUS_BIT;
+
+ /* Successfully transitioned to ATTACHED.SNK */
+ if (vbus_detected && debounce_done) {
+ exit_mode = ATTACHED_SINK;
+ goto try_sink_exit;
+ }
+
+ /*
+ * Ensure sink since drp may put us in source if other
+ * side switches back to Rd
+ */
+ sink = !(stat & UFP_DFP_MODE_STATUS_BIT);
+
+ usleep_range(1000, 2000);
+ } while (debounce_done && sink);
+
+try_wait_src:
+ /*
+ * Transition to trywait.SRC state. check if other side still wants
+ * to be SNK or has been removed.
+ */
+ val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set UFP mode rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ /* Need to be in this state for tDRPTRY time, 75ms~150ms */
+ msleep(80);
+
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ goto try_sink_exit;
+ }
+
+ debounce_done = stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT;
+
+ if (debounce_done)
+ /* the other side wants to be a sink */
+ exit_mode = ATTACHED_SRC;
+ else
+ /* the other side is detached */
+ exit_mode = UNATTACHED_SINK;
+
+try_sink_exit:
+ /* release forcing of SRC/SNK mode */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ rc = smblib_set_prop_typec_power_role(chg, &val);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set DFP mode rc=%d\n", rc);
+
+ /* revert Tccdebounce time back to ~120ms */
+ rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set MISC_CFG_REG rc=%d\n", rc);
+
+ chg->try_sink_active = false;
+
+ return exit_mode;
+}
+
static void typec_sink_insertion(struct smb_charger *chg)
{
+ int exit_mode;
+
+ /*
+ * Try.SNK entry status - ATTACHWAIT.SRC state and detected Rd-open
+ * or RD-Ra for TccDebounce time.
+ */
+
+ if (*chg->try_sink_enabled) {
+ exit_mode = typec_try_sink(chg);
+
+ if (exit_mode != ATTACHED_SRC) {
+ smblib_usb_typec_change(chg);
+ return;
+ }
+ }
+
/* when a sink is inserted we should not wait on hvdcp timeout to
* enable pd
*/
@@ -3993,7 +4150,7 @@ static void smblib_handle_typec_cc_state_change(struct smb_charger *chg)
smblib_typec_mode_name[chg->typec_mode]);
}
-static void smblib_usb_typec_change(struct smb_charger *chg)
+void smblib_usb_typec_change(struct smb_charger *chg)
{
int rc;
@@ -4029,7 +4186,8 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
return IRQ_HANDLED;
}
- if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) {
+ if (chg->cc2_detach_wa_active || chg->typec_en_dis_active ||
+ chg->try_sink_active) {
smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n",
chg->cc2_detach_wa_active ?
"cc2_detach_wa" : "typec_en_dis");
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 19c0d19106d6..f292ca09f532 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -128,6 +128,12 @@ enum smb_irq_index {
SMB_IRQ_MAX,
};
+enum try_sink_exit_mode {
+ ATTACHED_SRC = 0,
+ ATTACHED_SINK,
+ UNATTACHED_SINK,
+};
+
struct smb_irq_info {
const char *name;
const irq_handler_t handler;
@@ -232,6 +238,7 @@ struct smb_charger {
struct smb_params param;
struct smb_iio iio;
int *debug_mask;
+ int *try_sink_enabled;
enum smb_mode mode;
struct smb_chg_freq chg_freq;
int smb_version;
@@ -341,6 +348,7 @@ struct smb_charger {
u32 wa_flags;
bool cc2_detach_wa_active;
bool typec_en_dis_active;
+ bool try_sink_active;
int boost_current_ua;
int temp_speed_reading_count;
@@ -518,6 +526,7 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
const union power_supply_propval *val);
+void smblib_usb_typec_change(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index d8671ab1fd06..4ddb085e9300 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -624,6 +624,7 @@ enum {
#define TAPER_TIMER_SEL_CFG_REG (USBIN_BASE + 0x64)
#define TYPEC_SPARE_CFG_BIT BIT(7)
+#define TYPEC_DRP_DFP_TIME_CFG_BIT BIT(5)
#define TAPER_TIMER_SEL_MASK GENMASK(1, 0)
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c
index da72b0b59c3a..6dd425abc5a5 100644
--- a/drivers/pps/clients/pps-gpio.c
+++ b/drivers/pps/clients/pps-gpio.c
@@ -44,6 +44,7 @@ struct pps_gpio_device_data {
bool assert_falling_edge;
bool capture_clear;
unsigned int gpio_pin;
+ bool use_system_time_ts;
};
/*
@@ -56,11 +57,14 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data)
struct pps_event_time ts;
int rising_edge;
- /* Get the time stamp first */
- get_monotonic_boottime(&ts.ts_real);
-
info = data;
+ /* Get the time stamp first */
+ if (!info->use_system_time_ts)
+ get_monotonic_boottime(&ts.ts_real);
+ else
+ pps_get_ts(&ts);
+
rising_edge = gpio_get_value(info->gpio_pin);
if ((rising_edge && !info->assert_falling_edge) ||
(!rising_edge && info->assert_falling_edge))
@@ -119,6 +123,9 @@ static int pps_gpio_probe(struct platform_device *pdev)
if (of_get_property(np, "assert-falling-edge", NULL))
data->assert_falling_edge = true;
+
+ if (of_get_property(np, "use-system-time-ts", NULL))
+ data->use_system_time_ts = true;
}
/* GPIO setup */
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 72c697bdcd29..4cef8904a76a 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -1238,6 +1238,7 @@ static int qpnp_ibb_output_voltage_at_one_pulse_v2(struct qpnp_labibb *labibb,
return rc;
}
+/* For PMI8998 and earlier PMICs */
static const struct ibb_ver_ops ibb_ops_v1 = {
.set_default_voltage = qpnp_ibb_set_default_voltage_v1,
.set_voltage = qpnp_ibb_set_voltage_v1,
@@ -1249,6 +1250,7 @@ static const struct ibb_ver_ops ibb_ops_v1 = {
.voltage_at_one_pulse = qpnp_ibb_output_voltage_at_one_pulse_v1,
};
+/* For PM660A and later PMICs */
static const struct ibb_ver_ops ibb_ops_v2 = {
.set_default_voltage = qpnp_ibb_set_default_voltage_v2,
.set_voltage = qpnp_ibb_set_voltage_v2,
@@ -1358,8 +1360,9 @@ static int qpnp_lab_ps_ctl_v2(struct qpnp_labibb *labibb,
u32 thresh, bool enable)
{
int rc = 0;
- u8 val;
+ u8 val, mask;
+ mask = LAB_PS_CTL_EN;
if (enable) {
for (val = 0; val < ARRAY_SIZE(lab_ps_thresh_table_v2); val++)
if (lab_ps_thresh_table_v2[val] == thresh)
@@ -1371,13 +1374,13 @@ static int qpnp_lab_ps_ctl_v2(struct qpnp_labibb *labibb,
}
val |= LAB_PS_CTL_EN;
+ mask |= LAB_PS_THRESH_MASK;
} else {
val = 0;
}
- rc = qpnp_labibb_write(labibb, labibb->lab_base +
- REG_LAB_PS_CTL, &val, 1);
-
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_PS_CTL, mask, val);
if (rc < 0)
pr_err("write register %x failed rc = %d\n",
REG_LAB_PS_CTL, rc);
@@ -1385,12 +1388,18 @@ static int qpnp_lab_ps_ctl_v2(struct qpnp_labibb *labibb,
return rc;
}
+/* For PMI8996 and earlier PMICs */
static const struct lab_ver_ops lab_ops_v1 = {
.set_default_voltage = qpnp_lab_set_default_voltage_v1,
.ps_ctl = qpnp_lab_ps_ctl_v1,
};
-static const struct lab_ver_ops lab_ops_v2 = {
+static const struct lab_ver_ops pmi8998_lab_ops = {
+ .set_default_voltage = qpnp_lab_set_default_voltage_v1,
+ .ps_ctl = qpnp_lab_ps_ctl_v2,
+};
+
+static const struct lab_ver_ops pm660_lab_ops = {
.set_default_voltage = qpnp_lab_set_default_voltage_v2,
.ps_ctl = qpnp_lab_ps_ctl_v2,
};
@@ -3263,7 +3272,7 @@ static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
struct device_node *of_node)
{
int rc = 0;
- u32 i, tmp = 0;
+ u32 i = 0, tmp = 0;
u8 val, mask;
/*
@@ -3297,37 +3306,48 @@ static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
rc = of_property_read_u32(of_node,
"qcom,qpnp-ibb-lab-pwrdn-delay", &tmp);
if (!rc) {
- for (val = 0; val < ARRAY_SIZE(ibb_pwrdn_dly_table); val++)
- if (ibb_pwrdn_dly_table[val] == tmp)
- break;
+ if (tmp > 0) {
+ for (i = 0; i < ARRAY_SIZE(ibb_pwrdn_dly_table); i++) {
+ if (ibb_pwrdn_dly_table[i] == tmp)
+ break;
+ }
- if (val == ARRAY_SIZE(ibb_pwrdn_dly_table)) {
- pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrdn-delay\n");
- return -EINVAL;
+ if (i == ARRAY_SIZE(ibb_pwrdn_dly_table)) {
+ pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrdn-delay\n");
+ return -EINVAL;
+ }
}
labibb->ibb_vreg.pwrdn_dly = tmp;
- val |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+
+ if (tmp > 0)
+ val = i | IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
+
mask |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY2;
}
rc = of_property_read_u32(of_node,
"qcom,qpnp-ibb-lab-pwrup-delay", &tmp);
if (!rc) {
- for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_table); i++)
- if (ibb_pwrup_dly_table[i] == tmp)
- break;
+ if (tmp > 0) {
+ for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_table); i++) {
+ if (ibb_pwrup_dly_table[i] == tmp)
+ break;
+ }
- if (i == ARRAY_SIZE(ibb_pwrup_dly_table)) {
- pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrup-delay\n");
- return -EINVAL;
+ if (i == ARRAY_SIZE(ibb_pwrup_dly_table)) {
+ pr_err("Invalid value in qcom,qpnp-ibb-lab-pwrup-delay\n");
+ return -EINVAL;
+ }
}
labibb->ibb_vreg.pwrup_dly = tmp;
+ if (tmp > 0)
+ val |= IBB_PWRUP_PWRDN_CTL_1_EN_DLY1;
+
val |= (i << IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT);
- val |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
- IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
+ val |= IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK;
mask |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK |
IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
@@ -4000,7 +4020,10 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
labibb->ibb_ver_ops = &ibb_ops_v2;
- labibb->lab_ver_ops = &lab_ops_v2;
+ labibb->lab_ver_ops = &pm660_lab_ops;
+ } else if (labibb->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE) {
+ labibb->ibb_ver_ops = &ibb_ops_v1;
+ labibb->lab_ver_ops = &pmi8998_lab_ops;
} else {
labibb->ibb_ver_ops = &ibb_ops_v1;
labibb->lab_ver_ops = &lab_ops_v1;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 9bb934ed2a7a..dbe70002b4fb 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -362,6 +362,14 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ if (!rtc->ops || !rtc->ops->alarm_irq_enable)
+ return;
+
+ rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
+}
+
/* Called once per device from rtc_device_register */
int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
@@ -389,7 +397,11 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
rtc->aie_timer.enabled = 1;
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
+ } else if (alarm->enabled && (rtc_tm_to_ktime(now).tv64 >=
+ rtc->aie_timer.node.expires.tv64)){
+ rtc_alarm_disable(rtc);
}
+
mutex_unlock(&rtc->ops_lock);
return err;
}
@@ -782,14 +794,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
-static void rtc_alarm_disable(struct rtc_device *rtc)
-{
- if (!rtc->ops || !rtc->ops->alarm_irq_enable)
- return;
-
- rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
-}
-
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index d5bf36ec8a75..34367d172961 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
+ struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
+
+ /* cap all but accept CT responses to at least the CT header */
+ resph = (struct fc_ct_hdr *)acc;
+ if ((ct_els->status) ||
+ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
+ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
+
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
+ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
+ rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
+ /* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
- (u16)ZFCP_DBF_PAY_MAX_REC);
- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
- "fcp_sns", fsf->req_id);
+ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
+ /* complete FCP_RSP IU in associated PAYload record
+ * but only if there are optional parts
+ */
+ if (fcp_rsp->resp.fr_flags != 0)
+ zfcp_dbf_pl_write(
+ dbf, fcp_rsp,
+ /* at least one full PAY record
+ * but not beyond hardware response field
+ */
+ min_t(u16, max_t(u16, rec->pl_len,
+ ZFCP_DBF_PAY_MAX_REC),
+ FSF_FCP_RSP_SIZE),
+ "fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index db186d44cfaf..b60667c145fd 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
- * Copyright IBM Corp. 2008, 2016
+ * Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
- * @scsi_lun: scsi device logical unit number
+ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
+ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
+ u32 scsi_lun_64_hi;
} __packed;
/**
@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
+ ZFCP_STATUS_FSFREQ_ERROR))) {
+ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
+
+ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
+ struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
+ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index df2b541c8287..a2275825186f 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corp. 2009
+ * Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
+ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
+ /* FCP_DL was not sufficient for SCSI data length */
+ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
}
}
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 27ff38f839fc..1964391db904 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
+ zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
+ zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
- if (scsi_prot_sg_count(scsi_cmnd)) {
+ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
+ scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 07ffdbb5107f..9bd9b9a29dfc 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
- if (ret)
+ if (ret) {
+ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
+ }
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
- if (!fsf_req)
+ if (!fsf_req) {
+ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
+ }
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
+ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index e6fb97cb12f4..7c28dc1cb0dd 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -456,7 +456,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err;
+ int err = SCSI_DH_OK;
char *sp_model;
err = send_inquiry_cmd(sdev, 0, csdev);
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..00602abec0ea 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0e6aaef9a038..c74f74ab981c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi:
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 17c440b9d086..6835bae33ec4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
+ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
+ cmd_mfi->frame->hdr.cmd_status =
+ MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
+ }
}
}
} else {
@@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
+ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
+ dev_info(&instance->pdev->dev,
+ "%s %d out of range class %d send by application\n",
+ __func__, __LINE__, curr_aen.members.class);
+ return 0;
+ }
+
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1ed85dfc008d..ac12ee844bfc 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
+ if (size > ha->optrom_size - start)
+ size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
@@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
- ha->optrom_region_size = start + size > ha->optrom_size ?
- ha->optrom_size - start : size;
+ ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e4b3d8f4fd85..bb4ed7b1f5df 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3697,7 +3697,7 @@ iscsi_if_rx(struct sk_buff *skb)
uint32_t group;
nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) ||
+ if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
skb->len < nlh->nlmsg_len) {
break;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 8aa202faafb5..d0d31415c79b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2800,8 +2800,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_same(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@@ -2816,7 +2814,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@@ -2830,8 +2828,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29d8c74e85e3..b0e2e292e3cb 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
- struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@@ -153,11 +153,11 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
- unsigned save_scat_len; /* original length of trunc. scat. element */
- Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
@@ -166,6 +166,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -209,7 +210,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -625,6 +625,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -633,6 +634,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -732,7 +734,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -837,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -902,7 +937,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
@@ -948,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@@ -961,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@@ -977,12 +1013,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1023,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
- ++val, srp = srp ? srp->nextrp : srp) {
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- if (srp) {
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned =
- srp->sg_io_owned;
- rinfo[val].pack_id =
- srp->header.pack_id;
- rinfo[val].usr_ptr =
- srp->header.usr_ptr;
- }
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- result = __copy_to_user(p, rinfo,
+ result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
@@ -1164,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@@ -1245,6 +1260,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
+ int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1255,8 +1271,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
- if (req_sz > rsv_schp->bufflen)
- return -ENOMEM; /* cannot map more than reserved buffer */
+ mutex_lock(&sfp->f_mutex);
+ if (req_sz > rsv_schp->bufflen) {
+ ret = -ENOMEM; /* cannot map more than reserved buffer */
+ goto out;
+ }
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1270,7 +1289,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
- return 0;
+out:
+ mutex_unlock(&sfp->f_mutex);
+ return ret;
}
static void
@@ -1734,13 +1755,25 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if (hp->flags & SG_FLAG_MMAP_IO) {
+ res = -EBUSY; /* sfp->res_in_use == 1 */
+ if (dxfer_len > rsv_schp->bufflen)
+ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+ return res;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -2029,8 +2062,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
- sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2040,7 +2074,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
+ list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@@ -2058,70 +2092,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
- Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- resp = sfp->headrp;
- if (!resp) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- resp = rp;
- sfp->headrp = resp;
- } else {
- if (0 == sfp->cmd_q)
- resp = NULL; /* command queuing disallowed */
- else {
- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
- if (!rp->parentfp)
- break;
- }
- if (k < SG_MAX_QUEUE) {
- memset(rp, 0, sizeof (Sg_request));
- rp->parentfp = sfp;
- while (resp->nextrp)
- resp = resp->nextrp;
- resp->nextrp = rp;
- resp = rp;
- } else
- resp = NULL;
+ if (!list_empty(&sfp->rq_list)) {
+ if (!sfp->cmd_q)
+ goto out_unlock;
+
+ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+ if (!rp->parentfp)
+ break;
}
+ if (k >= SG_MAX_QUEUE)
+ goto out_unlock;
}
- if (resp) {
- resp->nextrp = NULL;
- resp->header.duration = jiffies_to_msecs(jiffies);
- }
+ memset(rp, 0, sizeof (Sg_request));
+ rp->parentfp = sfp;
+ rp->header.duration = jiffies_to_msecs(jiffies);
+ list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return resp;
+ return rp;
+out_unlock:
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
- Sg_request *prev_rp;
- Sg_request *rp;
unsigned long iflags;
int res = 0;
- if ((!sfp) || (!srp) || (!sfp->headrp))
+ if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
- prev_rp = sfp->headrp;
- if (srp == prev_rp) {
- sfp->headrp = prev_rp->nextrp;
- prev_rp->parentfp = NULL;
+ if (!list_empty(&srp->entry)) {
+ list_del(&srp->entry);
+ srp->parentfp = NULL;
res = 1;
- } else {
- while ((rp = prev_rp->nextrp)) {
- if (srp == rp) {
- prev_rp->nextrp = rp->nextrp;
- rp->parentfp = NULL;
- res = 1;
- break;
- }
- prev_rp = rp;
- }
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@@ -2140,8 +2149,9 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
-
+ INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2180,10 +2190,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
+ Sg_request *srp;
/* Cleanup any responses which were never read(). */
- while (sfp->headrp)
- sg_finish_rem_req(sfp->headrp);
+ while (!list_empty(&sfp->rq_list)) {
+ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ sg_finish_rem_req(srp);
+ }
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@@ -2217,20 +2230,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
@@ -2600,7 +2599,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
- int k, m, new_interface, blen, usg;
+ int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@@ -2620,13 +2619,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
- for (m = 0, srp = fp->headrp;
- srp != NULL;
- ++m, srp = srp->nextrp) {
+ list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
- if (new_interface &&
+ if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
@@ -2657,7 +2654,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
- if (0 == m)
+ if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index cd5c1c060481..6df2841cb7f9 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
index 4547a6dbdb23..494ecd1c5f79 100644
--- a/drivers/scsi/ufs/ufs-qcom-debugfs.c
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,12 +111,15 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
loff_t *ppos)
{
struct ufs_qcom_host *host = file->f_mapping->host->i_private;
- char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {0};
+ char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {'\0'};
loff_t buff_pos = 0;
char *comma;
int ret = 0;
int major;
int minor;
+ unsigned long flags;
+ struct ufs_hba *hba = host->hba;
+
ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
&buff_pos, ubuf, cnt);
@@ -125,6 +128,7 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
__func__);
goto out;
}
+ configuration[ret] = '\0';
comma = strnchr(configuration, TESTBUS_CFG_BUFF_LINE_SIZE, ',');
if (!comma || comma == configuration) {
@@ -142,8 +146,15 @@ static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
goto out;
}
+ if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
host->testbus.select_major = (u8)major;
host->testbus.select_minor = (u8)minor;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* Sanity check of the {major, minor} tuple is done in the
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 47106f937371..f429547aef7b 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2563,12 +2563,13 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
host->testbus.select_minor = 37;
}
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+ u8 select_major, u8 select_minor)
{
- if (host->testbus.select_major >= TSTBUS_MAX) {
+ if (select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
+ __func__, select_major);
return false;
}
@@ -2577,10 +2578,10 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0xFF) {
+ if (select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
+ __func__, select_minor);
return false;
}
@@ -2594,16 +2595,16 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
*/
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
- int reg;
- int offset;
+ int reg = 0;
+ int offset, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
+ unsigned long flags;
+ struct ufs_hba *hba;
if (!host)
return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
+ hba = host->hba;
+ spin_lock_irqsave(hba->host->host_lock, flags);
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
@@ -2661,21 +2662,27 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (reg) {
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << testbus_sel_offset,
REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
+ ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
+ } else {
+ dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
-
- return 0;
+out:
+ return ret;
}
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 9d532691f001..fd98a3381d61 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -100,7 +100,7 @@ enum {
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
+#define TEST_BUS_SEL 0x780000
#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
@@ -391,6 +391,8 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+ u8 select_minor);
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 829876226689..a2136c6863d3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -616,6 +616,15 @@ config MSM_QDSP6_APRV2
used by audio driver to configure QDSP6's
ASM, ADM and AFE.
+config MSM_QDSP6_APRV2_VM
+ bool "Audio QDSP6 APRv2 virtualization support"
+ depends on MSM_HAB
+ help
+ Enable APRv2 IPC protocol support over
+ HAB between application processor and
+ QDSP6. APR is used by audio driver to
+ configure QDSP6's ASM, ADM and AFE.
+
config MSM_QDSP6_APRV3
bool "Audio QDSP6 APRv3 support"
depends on MSM_SMD
@@ -800,7 +809,8 @@ config MSM_EVENT_TIMER
config MSM_AVTIMER
tristate "Avtimer Driver"
- depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK
+ depends on MSM_QDSP6_APRV2 || MSM_QDSP6_APRV3 || MSM_QDSP6_APRV2_GLINK || \
+ MSM_QDSP6_APRV2_VM
help
This driver gets the Q6 out of power collapsed state and
exposes ioctl control to read avtimer tick.
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index ad9bf3a2232d..b8464fdfd310 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -4294,6 +4294,12 @@ static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
rwref_read_get(&xprt_ptr->xprt_state_lhb0);
ctx = get_first_ch_ctx(xprt_ptr);
while (ctx) {
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (!list_empty(&ctx->tx_active))
+ glink_qos_done_ch_tx(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
ctx->local_open_state == GLINK_CHANNEL_OPENING) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index e9a097151141..83efbbe25e6b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -2347,7 +2347,7 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
goto out;
- if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
+ if (test_bit(ICNSS_PD_RESTART, &priv->state) && event_data->crashed) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
event_data->crashed, priv->state);
ICNSS_ASSERT(0);
@@ -2651,7 +2651,9 @@ event_post:
clear_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state);
fw_down_data.crashed = event_data->crashed;
- icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data);
+ if (test_bit(ICNSS_FW_READY, &priv->state))
+ icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN,
+ &fw_down_data);
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
done:
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
index 0c588c586306..d12031901b34 100644
--- a/drivers/soc/qcom/ipc_router_glink_xprt.c
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -31,6 +31,7 @@ static int ipc_router_glink_xprt_debug_mask;
module_param_named(debug_mask, ipc_router_glink_xprt_debug_mask,
int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define IPCRTR_INTENT_REQ_TIMEOUT_MS 5000
#if defined(DEBUG)
#define D(x...) do { \
if (ipc_router_glink_xprt_debug_mask) \
@@ -43,6 +44,7 @@ if (ipc_router_glink_xprt_debug_mask) \
#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
#define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
#define PIL_SUBSYSTEM_NAME_LEN 32
+#define IPC_RTR_WS_NAME_LEN ((2 * GLINK_NAME_SIZE) + 4)
#define MAX_NUM_LO_INTENTS 5
#define MAX_NUM_MD_INTENTS 3
@@ -59,6 +61,7 @@ if (ipc_router_glink_xprt_debug_mask) \
* @transport: Physical Transport Name as identified by Glink.
* @pil_edge: Edge name understood by PIL.
* @ipc_rtr_xprt_name: XPRT Name to be registered with IPC Router.
+ * @notify_rx_ws_name: Name of wakesource used in notify rx path.
* @xprt: IPC Router XPRT structure to contain XPRT specific info.
* @ch_hndl: Opaque Channel handle returned by GLink.
* @xprt_wq: Workqueue to queue read & other XPRT related works.
@@ -79,6 +82,7 @@ struct ipc_router_glink_xprt {
char transport[GLINK_NAME_SIZE];
char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+ char notify_rx_ws_name[IPC_RTR_WS_NAME_LEN];
struct msm_ipc_router_xprt xprt;
void *ch_hndl;
struct workqueue_struct *xprt_wq;
@@ -604,6 +608,7 @@ static void glink_xprt_ch_open(struct ipc_router_glink_xprt *glink_xprtp)
open_cfg.notify_state = glink_xprt_notify_state;
open_cfg.notify_rx_intent_req = glink_xprt_notify_rx_intent_req;
open_cfg.priv = glink_xprtp;
+ open_cfg.rx_intent_req_timeout_ms = IPCRTR_INTENT_REQ_TIMEOUT_MS;
glink_xprtp->pil = msm_ipc_load_subsystem(glink_xprtp);
glink_xprtp->ch_hndl = glink_open(&open_cfg);
@@ -763,8 +768,10 @@ static int ipc_router_glink_config_init(
kfree(glink_xprtp);
return -EFAULT;
}
-
- wakeup_source_init(&glink_xprtp->notify_rxv_ws, xprt_wq_name);
+ scnprintf(glink_xprtp->notify_rx_ws_name, IPC_RTR_WS_NAME_LEN,
+ "%s_%s_rx", glink_xprtp->ch_name, glink_xprtp->edge);
+ wakeup_source_init(&glink_xprtp->notify_rxv_ws,
+ glink_xprtp->notify_rx_ws_name);
mutex_lock(&glink_xprt_list_lock_lha1);
list_add(&glink_xprtp->list, &glink_xprt_list);
mutex_unlock(&glink_xprt_list_lock_lha1);
diff --git a/drivers/soc/qcom/qdsp6v2/Makefile b/drivers/soc/qcom/qdsp6v2/Makefile
index 8c5b0d0e81c8..90feb8b659d1 100644
--- a/drivers/soc/qcom/qdsp6v2/Makefile
+++ b/drivers/soc/qcom/qdsp6v2/Makefile
@@ -2,7 +2,9 @@ obj-$(CONFIG_MSM_QDSP6_APRV2) += apr.o apr_v2.o apr_tal.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV3) += apr.o apr_v3.o apr_tal.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV2_GLINK) += apr.o apr_v2.o apr_tal_glink.o voice_svc.o
obj-$(CONFIG_MSM_QDSP6_APRV3_GLINK) += apr.o apr_v3.o apr_tal_glink.o voice_svc.o
+obj-$(CONFIG_MSM_QDSP6_APRV2_VM) += apr_vm.o apr_v2.o voice_svc.o
obj-$(CONFIG_SND_SOC_MSM_QDSP6V2_INTF) += msm_audio_ion.o
+obj-$(CONFIG_SND_SOC_QDSP6V2_VM) += msm_audio_ion_vm.o
obj-$(CONFIG_MSM_ADSP_LOADER) += adsp-loader.o
obj-$(CONFIG_MSM_QDSP6_SSR) += audio_ssr.o
obj-$(CONFIG_MSM_QDSP6_PDR) += audio_pdr.o
diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c
new file mode 100644
index 000000000000..d0ea7b22717a
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c
@@ -0,0 +1,1270 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <sound/apr_audio-v2.h>
+#include <linux/qdsp6v2/apr.h>
+#include <linux/qdsp6v2/apr_tal.h>
+#include <linux/qdsp6v2/aprv2_vm.h>
+#include <linux/qdsp6v2/dsp_debug.h>
+#include <linux/qdsp6v2/audio_notifier.h>
+#include <linux/ipc_logging.h>
+#include <linux/habmm.h>
+
+#define APR_PKT_IPC_LOG_PAGE_CNT 2
+#define APR_VM_CB_THREAD_NAME "apr_vm_cb_thread"
+#define APR_TX_BUF_SIZE 4096
+#define APR_RX_BUF_SIZE 4096
+
+static struct apr_q6 q6;
+static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
+static void *apr_pkt_ctx;
+static wait_queue_head_t dsp_wait;
+static wait_queue_head_t modem_wait;
+static bool is_modem_up;
+static bool is_initial_boot;
+/* Subsystem restart: QDSP6 data, functions */
+static struct workqueue_struct *apr_reset_workqueue;
+static void apr_reset_deregister(struct work_struct *work);
+static void dispatch_event(unsigned long code, uint16_t proc);
+struct apr_reset_work {
+ void *handle;
+ struct work_struct work;
+};
+
+static bool apr_cf_debug;
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_apr_debug;
+static ssize_t apr_debug_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char cmd;
+
+ if (copy_from_user(&cmd, ubuf, 1))
+ return -EFAULT;
+
+ apr_cf_debug = (cmd == '1') ? true : false;
+
+ return cnt;
+}
+
+static const struct file_operations apr_debug_ops = {
+ .write = apr_debug_write,
+};
+#endif
+
+#define APR_PKT_INFO(x...) \
+do { \
+ if (apr_pkt_ctx) \
+ ipc_log_string(apr_pkt_ctx, "<APR>: "x); \
+} while (0)
+
+/* hab handle */
+static uint32_t hab_handle_tx;
+static uint32_t hab_handle_rx;
+static char apr_tx_buf[APR_TX_BUF_SIZE];
+static char apr_rx_buf[APR_RX_BUF_SIZE];
+
+/* apr callback thread task */
+static struct task_struct *apr_vm_cb_thread_task;
+static int pid;
+
+
+struct apr_svc_table {
+ char name[64];
+ int idx;
+ int id;
+ int dest_svc;
+ int client_id;
+ int handle;
+};
+
+/*
+ * src svc should be assigned dynamically through apr registration:
+ * 1. replace with a proper string name for registration.
+ * e.g. "qcom.apps.lnx." + name
+ * 2. register apr BE, retrieve dynamic src svc address,
+ * apr handle and store in svc tbl.
+ */
+
+static struct mutex m_lock_tbl_qdsp6;
+
+static struct apr_svc_table svc_tbl_qdsp6[] = {
+ {
+ .name = "AFE",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_AFE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ASM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_ASM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "ADM",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_ADM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CORE",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CORE,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_MVM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVS,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_ADSP_CVP,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "USM",
+ .idx = 8,
+ .id = 0,
+ .dest_svc = APR_SVC_USM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+ {
+ .name = "VIDC",
+ .idx = 9,
+ .id = 0,
+ .dest_svc = APR_SVC_VIDC,
+ .handle = 0,
+ },
+ {
+ .name = "LSM",
+ .idx = 10,
+ .id = 0,
+ .dest_svc = APR_SVC_LSM,
+ .client_id = APR_CLIENT_AUDIO,
+ .handle = 0,
+ },
+};
+
+static struct mutex m_lock_tbl_voice;
+
+static struct apr_svc_table svc_tbl_voice[] = {
+ {
+ .name = "VSM",
+ .idx = 0,
+ .id = 0,
+ .dest_svc = APR_SVC_VSM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "VPM",
+ .idx = 1,
+ .id = 0,
+ .dest_svc = APR_SVC_VPM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVS",
+ .idx = 2,
+ .id = 0,
+ .dest_svc = APR_SVC_MVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "MVM",
+ .idx = 3,
+ .id = 0,
+ .dest_svc = APR_SVC_MVM,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVS",
+ .idx = 4,
+ .id = 0,
+ .dest_svc = APR_SVC_CVS,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "CVP",
+ .idx = 5,
+ .id = 0,
+ .dest_svc = APR_SVC_CVP,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "SRD",
+ .idx = 6,
+ .id = 0,
+ .dest_svc = APR_SVC_SRD,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+ {
+ .name = "TEST",
+ .idx = 7,
+ .id = 0,
+ .dest_svc = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_VOICE,
+ .handle = 0,
+ },
+};
+
+enum apr_subsys_state apr_get_modem_state(void)
+{
+ return atomic_read(&q6.modem_state);
+}
+
+void apr_set_modem_state(enum apr_subsys_state state)
+{
+ atomic_set(&q6.modem_state, state);
+}
+
+enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.modem_state, prev, new);
+}
+
+static void apr_modem_down(unsigned long opcode)
+{
+ apr_set_modem_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_MODEM);
+}
+
+static void apr_modem_up(void)
+{
+ if (apr_cmpxchg_modem_state(APR_SUBSYS_DOWN, APR_SUBSYS_UP) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&modem_wait);
+ is_modem_up = 1;
+}
+
+enum apr_subsys_state apr_get_q6_state(void)
+{
+ return atomic_read(&q6.q6_state);
+}
+EXPORT_SYMBOL(apr_get_q6_state);
+
+int apr_set_q6_state(enum apr_subsys_state state)
+{
+ pr_debug("%s: setting adsp state %d\n", __func__, state);
+ if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
+ return -EINVAL;
+ atomic_set(&q6.q6_state, state);
+ return 0;
+}
+EXPORT_SYMBOL(apr_set_q6_state);
+
+enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.q6_state, prev, new);
+}
+
+static void apr_adsp_down(unsigned long opcode)
+{
+ apr_set_q6_state(APR_SUBSYS_DOWN);
+ dispatch_event(opcode, APR_DEST_QDSP6);
+}
+
+static void apr_adsp_up(void)
+{
+ if (apr_cmpxchg_q6_state(APR_SUBSYS_DOWN, APR_SUBSYS_LOADED) ==
+ APR_SUBSYS_DOWN)
+ wake_up(&dsp_wait);
+}
+
+int apr_wait_for_device_up(int dest_id)
+{
+ int rc = -1;
+
+ if (dest_id == APR_DEST_MODEM)
+ rc = wait_event_interruptible_timeout(modem_wait,
+ (apr_get_modem_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else if (dest_id == APR_DEST_QDSP6)
+ rc = wait_event_interruptible_timeout(dsp_wait,
+ (apr_get_q6_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else
+ pr_err("%s: unknown dest_id %d\n", __func__, dest_id);
+ /* returns left time */
+ return rc;
+}
+
+static int apr_vm_nb_receive(int32_t handle, void *dest_buff,
+ uint32_t *size_bytes, uint32_t timeout)
+{
+ int rc;
+ uint32_t dest_buff_bytes = *size_bytes;
+ unsigned long delay = jiffies + (HZ / 2);
+
+ do {
+ *size_bytes = dest_buff_bytes;
+ rc = habmm_socket_recv(handle,
+ dest_buff,
+ size_bytes,
+ timeout,
+ HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
+ } while (time_before(jiffies, delay) && (rc == -EAGAIN) &&
+ (*size_bytes == 0));
+
+ return rc;
+}
+
+static int apr_vm_cb_process_evt(char *buf, int len)
+{
+ struct apr_client_data data;
+ struct apr_client *apr_client;
+ struct apr_svc *c_svc;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t src;
+ uint16_t svc;
+ uint16_t clnt;
+ int i;
+ int temp_port = 0;
+ uint32_t *ptr;
+ uint32_t evt_id;
+
+ pr_debug("APR: len = %d\n", len);
+ ptr = (uint32_t *)buf;
+ pr_debug("\n*****************\n");
+ for (i = 0; i < len/4; i++)
+ pr_debug("%x ", ptr[i]);
+ pr_debug("\n");
+ pr_debug("\n*****************\n");
+
+ if (!buf || len <= APR_HDR_SIZE + sizeof(uint32_t)) {
+ pr_err("APR: Improper apr pkt received: %p %d\n", buf, len);
+ return -EINVAL;
+ }
+
+ evt_id = *((int32_t *)buf);
+ if (evt_id != APRV2_VM_EVT_RX_PKT_AVAILABLE) {
+ pr_err("APR: Wrong evt id: %d\n", evt_id);
+ return -EINVAL;
+ }
+ hdr = (struct apr_hdr *)(buf + sizeof(uint32_t));
+
+ ver = hdr->hdr_field;
+ ver = (ver & 0x000F);
+ if (ver > APR_PKT_VER + 1) {
+ pr_err("APR: Wrong version: %d\n", ver);
+ return -EINVAL;
+ }
+
+ hdr_size = hdr->hdr_field;
+ hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4;
+ if (hdr_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong paket size\n");
+ return -EINVAL;
+ }
+
+ msg_type = hdr->hdr_field;
+ msg_type = (msg_type >> 0x08) & 0x0003;
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+ pr_err("APR: Wrong message type: %d\n", msg_type);
+ return -EINVAL;
+ }
+
+ /*
+ * dest_svc is dynamic created by apr service
+ * no need to check the range of dest_svc
+ */
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX) {
+ pr_err("APR: Wrong APR header\n");
+ return -EINVAL;
+ }
+
+ svc = hdr->dest_svc;
+ if (hdr->src_domain == APR_DOMAIN_MODEM)
+ clnt = APR_CLIENT_VOICE;
+ else if (hdr->src_domain == APR_DOMAIN_ADSP)
+ clnt = APR_CLIENT_AUDIO;
+ else {
+ pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+ return -EINVAL;
+ }
+
+ src = apr_get_data_src(hdr);
+ if (src == APR_DEST_MAX)
+ return -EINVAL;
+
+ pr_debug("src =%d clnt = %d\n", src, clnt);
+ apr_client = &client[src][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++)
+ if (apr_client->svc[i].id == svc) {
+ pr_debug("svc_id = %d\n", apr_client->svc[i].id);
+ c_svc = &apr_client->svc[i];
+ break;
+ }
+
+ if (i == APR_SVC_MAX) {
+ pr_err("APR: service is not registered\n");
+ return -ENXIO;
+ }
+
+ pr_debug("svc_idx = %d\n", i);
+ pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id,
+ c_svc->client_id, c_svc->fn, c_svc->priv);
+
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src = src;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+ if (data.payload_size > 0)
+ data.payload = (char *)hdr + hdr_size;
+
+ if (unlikely(apr_cf_debug)) {
+ if (hdr->opcode == APR_BASIC_RSP_RESULT && data.payload) {
+ uint32_t *ptr = data.payload;
+
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X] rc[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc,
+ hdr->opcode, hdr->token, ptr[1]);
+ } else {
+ APR_PKT_INFO(
+ "Rx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+ }
+
+ temp_port = ((data.dest_port >> 8) * 8) + (data.dest_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
+ if (c_svc->port_cnt && c_svc->port_fn[temp_port])
+ c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
+ else if (c_svc->fn)
+ c_svc->fn(&data, c_svc->priv);
+ else
+ pr_err("APR: Rxed a packet for NULL callback\n");
+
+ return 0;
+}
+
+static int apr_vm_cb_thread(void *data)
+{
+ uint32_t apr_rx_buf_len;
+ struct aprv2_vm_ack_rx_pkt_available_t apr_ack;
+ int status = 0;
+ int ret = 0;
+
+ while (1) {
+ apr_rx_buf_len = sizeof(apr_rx_buf);
+ ret = habmm_socket_recv(hab_handle_rx,
+ (void *)&apr_rx_buf,
+ &apr_rx_buf_len,
+ 0xFFFFFFFF,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: depends on the HAB error code,
+ * may need to implement
+ * a retry mechanism.
+ * break if recv failed ?
+ */
+ break;
+ }
+
+ status = apr_vm_cb_process_evt(apr_rx_buf, apr_rx_buf_len);
+
+ apr_ack.status = status;
+ ret = habmm_socket_send(hab_handle_rx,
+ (void *)&apr_ack,
+ sizeof(apr_ack),
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /* TODO: break if send failed ? */
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int apr_vm_get_svc(const char *svc_name, int domain_id, int *client_id,
+ int *svc_idx, int *svc_id, int *dest_svc, int *handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_register_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_register_t reg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (!strcmp(svc_name, tbl[i].name)) {
+ *client_id = tbl[i].client_id;
+ *svc_idx = tbl[i].idx;
+ if (!tbl[i].id && !tbl[i].handle) {
+ /* need to register a new service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_REGISTER;
+ tx_data.reg_cmd.name_size = snprintf(
+ tx_data.reg_cmd.name,
+ APRV2_VM_MAX_DNS_SIZE,
+ "qcom.apps.lnx.%s",
+ svc_name);
+ tx_data.reg_cmd.addr = 0;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ mutex_unlock(lock);
+ return ret;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ mutex_unlock(lock);
+ return ret;
+ }
+ /* update svc table */
+ tbl[i].handle = apr_rsp.handle;
+ tbl[i].id = apr_rsp.addr &
+ APRV2_VM_PKT_SERVICE_ID_MASK;
+ }
+ *svc_id = tbl[i].id;
+ *dest_svc = tbl[i].dest_svc;
+ *handle = tbl[i].handle;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ pr_debug("%s: svc_name = %s client_id = %d domain_id = %d\n",
+ __func__, svc_name, *client_id, domain_id);
+ pr_debug("%s: src_svc = %d dest_svc = %d handle = %d\n",
+ __func__, *svc_id, *dest_svc, *handle);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc name %s\n", __func__, svc_name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int apr_vm_rel_svc(int domain_id, int svc_id, int handle)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ struct mutex *lock;
+ struct aprv2_vm_cmd_deregister_rsp_t apr_rsp;
+ uint32_t apr_len;
+ int ret = 0;
+ struct {
+ uint32_t cmd_id;
+ struct aprv2_vm_cmd_deregister_t dereg_cmd;
+ } tx_data;
+
+ if (domain_id == APR_DOMAIN_ADSP) {
+ tbl = svc_tbl_qdsp6;
+ size = ARRAY_SIZE(svc_tbl_qdsp6);
+ lock = &m_lock_tbl_qdsp6;
+ } else {
+ tbl = svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ lock = &m_lock_tbl_voice;
+ }
+
+ mutex_lock(lock);
+ for (i = 0; i < size; i++) {
+ if (tbl[i].id == svc_id && tbl[i].handle == handle) {
+ /* need to deregister a service */
+ memset((void *) &tx_data, 0, sizeof(tx_data));
+
+ apr_len = sizeof(tx_data);
+ tx_data.cmd_id = APRV2_VM_CMDID_DEREGISTER;
+ tx_data.dereg_cmd.handle = handle;
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *) &tx_data,
+ apr_len,
+ 0);
+ if (ret)
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ /*
+ * TODO: if send failed, should not wait for recv.
+ * should clear regardless?
+ */
+ /* wait for response */
+ apr_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_len,
+ 0xFFFFFFFF);
+ if (ret)
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ ret = apr_rsp.status;
+ }
+ /* clear svc table */
+ tbl[i].handle = 0;
+ tbl[i].id = 0;
+ break;
+ }
+ }
+ mutex_unlock(lock);
+
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc id %d handle %d\n",
+ __func__, svc_id, handle);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int apr_send_pkt(void *handle, uint32_t *buf)
+{
+ struct apr_svc *svc = handle;
+ struct apr_hdr *hdr;
+ unsigned long flags;
+ uint32_t *cmd_id = (uint32_t *)apr_tx_buf;
+ struct aprv2_vm_cmd_async_send_t *apr_send =
+ (struct aprv2_vm_cmd_async_send_t *)(apr_tx_buf +
+ sizeof(uint32_t));
+ uint32_t apr_send_len;
+ struct aprv2_vm_cmd_async_send_rsp_t apr_rsp;
+ uint32_t apr_rsp_len;
+ int ret = 0;
+
+ if (!handle || !buf) {
+ pr_err("APR: Wrong parameters\n");
+ return -EINVAL;
+ }
+ if (svc->need_reset) {
+ pr_err("APR: send_pkt service need reset\n");
+ return -ENETRESET;
+ }
+
+ if ((svc->dest_id == APR_DEST_QDSP6) &&
+ (apr_get_q6_state() != APR_SUBSYS_LOADED)) {
+ pr_err("%s: Still dsp is not Up\n", __func__);
+ return -ENETRESET;
+ } else if ((svc->dest_id == APR_DEST_MODEM) &&
+ (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+ pr_err("%s: Still Modem is not Up\n", __func__);
+ return -ENETRESET;
+ }
+
+ spin_lock_irqsave(&svc->w_lock, flags);
+ if (!svc->id || !svc->vm_handle) {
+ pr_err("APR: Still service is not yet opened\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ hdr = (struct apr_hdr *)buf;
+
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = svc->id;
+ hdr->dest_domain = svc->dest_domain;
+ hdr->dest_svc = svc->vm_dest_svc;
+
+ if (unlikely(apr_cf_debug)) {
+ APR_PKT_INFO(
+ "Tx: src_addr[0x%X] dest_addr[0x%X] opcode[0x%X] token[0x%X]",
+ (hdr->src_domain << 8) | hdr->src_svc,
+ (hdr->dest_domain << 8) | hdr->dest_svc, hdr->opcode,
+ hdr->token);
+ }
+
+ memset((void *)&apr_tx_buf, 0, sizeof(apr_tx_buf));
+ /* pkt_size + cmd_id + handle */
+ apr_send_len = hdr->pkt_size + sizeof(uint32_t) * 2;
+ *cmd_id = APRV2_VM_CMDID_ASYNC_SEND;
+ apr_send->handle = svc->vm_handle;
+
+ /* safe check */
+ if (hdr->pkt_size > APR_TX_BUF_SIZE - (sizeof(uint32_t) * 2)) {
+ pr_err("APR: Wrong pkt size %d\n", hdr->pkt_size);
+ ret = -ENOMEM;
+ goto done;
+ }
+ memcpy(&apr_send->pkt_header, buf, hdr->pkt_size);
+
+ ret = habmm_socket_send(hab_handle_tx,
+ (void *)&apr_tx_buf,
+ apr_send_len,
+ 0);
+ if (ret) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ /* wait for response */
+ apr_rsp_len = sizeof(apr_rsp);
+ ret = apr_vm_nb_receive(hab_handle_tx,
+ (void *)&apr_rsp,
+ &apr_rsp_len,
+ 0xFFFFFFFF);
+ if (ret) {
+ pr_err("%s: apr_vm_nb_receive failed %d\n",
+ __func__, ret);
+ goto done;
+ }
+ if (apr_rsp.status) {
+ pr_err("%s: apr_vm_nb_receive status %d\n",
+ __func__, apr_rsp.status);
+ /* should translate status properly */
+ ret = -ECOMM;
+ goto done;
+ }
+
+ /* upon successful send, return packet size */
+ ret = hdr->pkt_size;
+
+done:
+ spin_unlock_irqrestore(&svc->w_lock, flags);
+ return ret;
+}
+
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+ uint32_t src_port, void *priv)
+{
+ struct apr_client *clnt;
+ int client_id = 0;
+ int svc_idx = 0;
+ int svc_id = 0;
+ int dest_id = 0;
+ int domain_id = 0;
+ int temp_port = 0;
+ struct apr_svc *svc = NULL;
+ int rc = 0;
+ bool can_open_channel = true;
+ int dest_svc = 0;
+ int handle = 0;
+
+ if (!dest || !svc_name || !svc_fn)
+ return NULL;
+
+ if (!strcmp(dest, "ADSP"))
+ domain_id = APR_DOMAIN_ADSP;
+ else if (!strcmp(dest, "MODEM")) {
+ /* Don't request for SMD channels if destination is MODEM,
+ * as these channels are no longer used and these clients
+ * are to listen only for MODEM SSR events
+ */
+ can_open_channel = false;
+ domain_id = APR_DOMAIN_MODEM;
+ } else {
+ pr_err("APR: wrong destination\n");
+ goto done;
+ }
+
+ dest_id = apr_get_dest_id(dest);
+
+ if (dest_id == APR_DEST_QDSP6) {
+ if (apr_get_q6_state() != APR_SUBSYS_LOADED) {
+ pr_err("%s: adsp not up\n", __func__);
+ return NULL;
+ }
+ pr_debug("%s: adsp Up\n", __func__);
+ } else if (dest_id == APR_DEST_MODEM) {
+ if (apr_get_modem_state() == APR_SUBSYS_DOWN) {
+ if (is_modem_up) {
+ pr_err("%s: modem shutdown due to SSR, ret",
+ __func__);
+ return NULL;
+ }
+ pr_debug("%s: Wait for modem to bootup\n", __func__);
+ rc = apr_wait_for_device_up(APR_DEST_MODEM);
+ if (rc == 0) {
+ pr_err("%s: Modem is not Up\n", __func__);
+ return NULL;
+ }
+ }
+ pr_debug("%s: modem Up\n", __func__);
+ }
+
+ if (apr_vm_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id,
+ &dest_svc, &handle)) {
+ pr_err("%s: apr_vm_get_svc failed\n", __func__);
+ goto done;
+ }
+
+ clnt = &client[dest_id][client_id];
+ svc = &clnt->svc[svc_idx];
+ mutex_lock(&svc->m_lock);
+ clnt->id = client_id;
+ if (svc->need_reset) {
+ mutex_unlock(&svc->m_lock);
+ pr_err("APR: Service needs reset\n");
+ goto done;
+ }
+ svc->id = svc_id;
+ svc->vm_dest_svc = dest_svc;
+ svc->dest_id = dest_id;
+ svc->client_id = client_id;
+ svc->dest_domain = domain_id;
+ svc->pkt_owner = APR_PKT_OWNER_DRIVER;
+ svc->vm_handle = handle;
+
+ if (src_port != 0xFFFFFFFF) {
+ temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", src_port, temp_port);
+ if (temp_port >= APR_MAX_PORTS || temp_port < 0) {
+ pr_err("APR: temp_port out of bounds\n");
+ mutex_unlock(&svc->m_lock);
+ return NULL;
+ }
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->port_cnt++;
+ svc->port_fn[temp_port] = svc_fn;
+ svc->port_priv[temp_port] = priv;
+ } else {
+ if (!svc->fn) {
+ if (!svc->port_cnt && !svc->svc_cnt)
+ clnt->svc_cnt++;
+ svc->fn = svc_fn;
+ if (svc->port_cnt)
+ svc->svc_cnt++;
+ svc->priv = priv;
+ }
+ }
+
+ mutex_unlock(&svc->m_lock);
+done:
+ return svc;
+}
+
+static void apr_reset_deregister(struct work_struct *work)
+{
+ struct apr_svc *handle = NULL;
+ struct apr_reset_work *apr_reset =
+ container_of(work, struct apr_reset_work, work);
+
+ handle = apr_reset->handle;
+ pr_debug("%s:handle[%pK]\n", __func__, handle);
+ apr_deregister(handle);
+ kfree(apr_reset);
+}
+
+int apr_deregister(void *handle)
+{
+ struct apr_svc *svc = handle;
+ struct apr_client *clnt;
+ uint16_t dest_id;
+ uint16_t client_id;
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&svc->m_lock);
+ dest_id = svc->dest_id;
+ client_id = svc->client_id;
+ clnt = &client[dest_id][client_id];
+
+ if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+ if (svc->port_cnt)
+ svc->port_cnt--;
+ else if (svc->svc_cnt)
+ svc->svc_cnt--;
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ client[dest_id][client_id].svc_cnt--;
+ svc->need_reset = 0x0;
+ }
+ } else if (client[dest_id][client_id].svc_cnt > 0) {
+ client[dest_id][client_id].svc_cnt--;
+ if (!client[dest_id][client_id].svc_cnt) {
+ svc->need_reset = 0x0;
+ pr_debug("%s: service is reset %p\n", __func__, svc);
+ }
+ }
+
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ if (apr_vm_rel_svc(svc->dest_domain, svc->id, svc->vm_handle))
+ pr_err("%s: apr_vm_rel_svc failed\n", __func__);
+ svc->priv = NULL;
+ svc->id = 0;
+ svc->vm_dest_svc = 0;
+ svc->fn = NULL;
+ svc->dest_id = 0;
+ svc->client_id = 0;
+ svc->need_reset = 0x0;
+ svc->vm_handle = 0;
+ }
+ mutex_unlock(&svc->m_lock);
+
+ return 0;
+}
+
+void apr_reset(void *handle)
+{
+ struct apr_reset_work *apr_reset_worker = NULL;
+
+ if (!handle)
+ return;
+ pr_debug("%s: handle[%pK]\n", __func__, handle);
+
+ if (apr_reset_workqueue == NULL) {
+ pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
+ return;
+ }
+
+ apr_reset_worker = kzalloc(sizeof(struct apr_reset_work),
+ GFP_ATOMIC);
+
+ if (apr_reset_worker == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return;
+ }
+
+ apr_reset_worker->handle = handle;
+ INIT_WORK(&apr_reset_worker->work, apr_reset_deregister);
+ queue_work(apr_reset_workqueue, &apr_reset_worker->work);
+}
+
+/* Dispatch the Reset events to Modem and audio clients */
+static void dispatch_event(unsigned long code, uint16_t proc)
+{
+ struct apr_client *apr_client;
+ struct apr_client_data data;
+ struct apr_svc *svc;
+ uint16_t clnt;
+ int i, j;
+
+ memset(&data, 0, sizeof(data));
+ data.opcode = RESET_EVENTS;
+ data.reset_event = code;
+
+ /* Service domain can be different from the processor */
+ data.reset_proc = apr_get_reset_domain(proc);
+
+ clnt = APR_CLIENT_AUDIO;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+
+ clnt = APR_CLIENT_VOICE;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+}
+
+static int apr_notifier_service_cb(struct notifier_block *this,
+ unsigned long opcode, void *data)
+{
+ struct audio_notifier_cb_data *cb_data = data;
+
+ if (cb_data == NULL) {
+ pr_err("%s: Callback data is NULL!\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: Service opcode 0x%lx, domain %d\n",
+ __func__, opcode, cb_data->domain);
+
+ switch (opcode) {
+ case AUDIO_NOTIFIER_SERVICE_DOWN:
+ /*
+ * Use flag to ignore down notifications during
+ * initial boot. There is no benefit from error
+ * recovery notifications during initial boot
+ * up since everything is expected to be down.
+ */
+ if (is_initial_boot) {
+ is_initial_boot = false;
+ break;
+ }
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_down(opcode);
+ else
+ apr_adsp_down(opcode);
+ break;
+ case AUDIO_NOTIFIER_SERVICE_UP:
+ is_initial_boot = false;
+ if (cb_data->domain == AUDIO_NOTIFIER_MODEM_DOMAIN)
+ apr_modem_up();
+ else
+ apr_adsp_up();
+ break;
+ default:
+ break;
+ }
+done:
+ return NOTIFY_OK;
+}
+
+static struct notifier_block adsp_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static struct notifier_block modem_service_nb = {
+ .notifier_call = apr_notifier_service_cb,
+ .priority = 0,
+};
+
+static void apr_vm_set_subsys_state(void)
+{
+ /* set default subsys state in vm env.
+ * Both q6 and modem should be in LOADED state,
+ * since vm boots up at late stage after pm.
+ */
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ apr_set_modem_state(APR_SUBSYS_LOADED);
+}
+
+static int __init apr_init(void)
+{
+ int i, j, k;
+ int ret;
+
+ /* open apr channel tx and rx, store as global */
+ ret = habmm_socket_open(&hab_handle_tx,
+ MM_AUD_1,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open tx failed %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = habmm_socket_open(&hab_handle_rx,
+ MM_AUD_2,
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (ret) {
+ pr_err("%s: habmm_socket_open rx failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ return ret;
+ }
+ pr_info("%s: hab_handle_tx %x hab_handle_rx %x\n",
+ __func__, hab_handle_tx, hab_handle_rx);
+
+ /* create apr ch rx cb thread */
+ apr_vm_cb_thread_task = kthread_run(apr_vm_cb_thread,
+ NULL,
+ APR_VM_CB_THREAD_NAME);
+ if (IS_ERR(apr_vm_cb_thread_task)) {
+ ret = PTR_ERR(apr_vm_cb_thread_task);
+ pr_err("%s: kthread_run failed %d\n", __func__, ret);
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ return ret;
+ }
+ pid = apr_vm_cb_thread_task->pid;
+ pr_info("%s: apr_vm_cb_thread started pid %d\n",
+ __func__, pid);
+
+ mutex_init(&m_lock_tbl_qdsp6);
+ mutex_init(&m_lock_tbl_voice);
+
+ for (i = 0; i < APR_DEST_MAX; i++)
+ for (j = 0; j < APR_CLIENT_MAX; j++) {
+ mutex_init(&client[i][j].m_lock);
+ for (k = 0; k < APR_SVC_MAX; k++) {
+ mutex_init(&client[i][j].svc[k].m_lock);
+ spin_lock_init(&client[i][j].svc[k].w_lock);
+ }
+ }
+
+ apr_vm_set_subsys_state();
+ mutex_init(&q6.lock);
+ apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
+ if (!apr_reset_workqueue) {
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+ return -ENOMEM;
+ }
+
+ apr_pkt_ctx = ipc_log_context_create(APR_PKT_IPC_LOG_PAGE_CNT,
+ "apr", 0);
+ if (!apr_pkt_ctx)
+ pr_err("%s: Unable to create ipc log context\n", __func__);
+
+ is_initial_boot = true;
+ subsys_notif_register("apr_adsp", AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &adsp_service_nb);
+ subsys_notif_register("apr_modem", AUDIO_NOTIFIER_MODEM_DOMAIN,
+ &modem_service_nb);
+
+ return 0;
+}
+device_initcall(apr_init);
+
+static int __init apr_late_init(void)
+{
+ int ret = 0;
+
+ init_waitqueue_head(&dsp_wait);
+ init_waitqueue_head(&modem_wait);
+
+ return ret;
+}
+late_initcall(apr_late_init);
+
+static void __exit apr_exit(void)
+{
+ habmm_socket_close(hab_handle_tx);
+ habmm_socket_close(hab_handle_rx);
+ kthread_stop(apr_vm_cb_thread_task);
+}
+__exitcall(apr_exit);
+
+#ifdef CONFIG_DEBUG_FS
+static int __init apr_debug_init(void)
+{
+ debugfs_apr_debug = debugfs_create_file("msm_apr_debug",
+ S_IFREG | S_IRUGO, NULL, NULL,
+ &apr_debug_ops);
+ return 0;
+}
+device_initcall(apr_debug_init);
+#endif
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
new file mode 100644
index 000000000000..a3aa8823d8ce
--- /dev/null
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c
@@ -0,0 +1,838 @@
+/*
+ * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/msm_audio_ion.h>
+#include <linux/habmm.h>
+#include "../../../staging/android/ion/ion_priv.h"
+#include "../../../staging/android/ion/ion_hvenv_driver.h"
+
+#define MSM_AUDIO_ION_PROBED (1 << 0)
+
+#define MSM_AUDIO_SMMU_VM_CMD_MAP 0x00000001
+#define MSM_AUDIO_SMMU_VM_CMD_UNMAP 0x00000002
+#define MSM_AUDIO_SMMU_VM_HAB_MINOR_ID 1
+
+struct msm_audio_ion_private {
+ bool smmu_enabled;
+ bool audioheap_enabled;
+ u8 device_status;
+ struct list_head smmu_map_list;
+ struct mutex smmu_map_mutex;
+};
+
+struct msm_audio_smmu_map_data {
+ struct ion_client *client;
+ struct ion_handle *handle;
+ u32 export_id;
+ struct list_head list;
+};
+
+struct msm_audio_smmu_vm_map_cmd {
+ int cmd_id;
+ u32 export_id;
+ u32 buf_size;
+};
+
+struct msm_audio_smmu_vm_map_cmd_rsp {
+ int status;
+ u64 addr;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd {
+ int cmd_id;
+ u32 export_id;
+};
+
+struct msm_audio_smmu_vm_unmap_cmd_rsp {
+ int status;
+};
+
+static struct msm_audio_ion_private msm_audio_ion_data = {0,};
+static u32 msm_audio_ion_hab_handle;
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len,
+ void *vaddr);
+
+static int msm_audio_ion_smmu_map(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc;
+ u32 export_id;
+ u32 cmd_rsp_size;
+ bool exported = false;
+ struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data = NULL;
+ struct msm_audio_smmu_vm_map_cmd smmu_map_cmd;
+
+ rc = ion_handle_get_size(client, handle, len);
+ if (rc) {
+ pr_err("%s: ion_handle_get_size failed, client = %pK, handle = %pK, rc = %d\n",
+ __func__, client, handle, rc);
+ goto err;
+ }
+
+ /* Data required to track per buffer mapping */
+ map_data = kzalloc(sizeof(*map_data), GFP_KERNEL);
+ if (!map_data) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* Export the buffer to physical VM */
+ rc = habmm_export(msm_audio_ion_hab_handle, vaddr, *len,
+ &export_id, 0);
+ if (rc) {
+ pr_err("%s: habmm_export failed vaddr = %pK, len = %zd, rc = %d\n",
+ __func__, vaddr, *len, rc);
+ goto err;
+ }
+
+ exported = true;
+ smmu_map_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_MAP;
+ smmu_map_cmd.export_id = export_id;
+ smmu_map_cmd.buf_size = *len;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ goto err;
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu, expected %lu\n",
+ __func__, cmd_rsp_size, sizeof(cmd_rsp));
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU map command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ *addr = (ion_phys_addr_t)cmd_rsp.addr;
+
+ map_data->client = client;
+ map_data->handle = handle;
+ map_data->export_id = export_id;
+
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_add_tail(&(map_data->list),
+ &(msm_audio_ion_data.smmu_map_list));
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ return 0;
+
+err:
+ if (exported)
+ (void)habmm_unexport(msm_audio_ion_hab_handle, export_id, 0);
+
+ kfree(map_data);
+
+ return rc;
+}
+
+static int msm_audio_ion_smmu_unmap(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ int rc;
+ bool found = false;
+ u32 cmd_rsp_size;
+ struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp;
+ struct msm_audio_smmu_map_data *map_data, *next;
+ struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd;
+
+ /*
+ * Though list_for_each_entry_safe is delete safe, lock
+ * should be explicitly acquired to avoid race condition
+ * on adding elements to the list.
+ */
+ mutex_lock(&(msm_audio_ion_data.smmu_map_mutex));
+ list_for_each_entry_safe(map_data, next,
+ &(msm_audio_ion_data.smmu_map_list), list) {
+
+ if (map_data->handle == handle && map_data->client == client) {
+ found = true;
+ smmu_unmap_cmd.cmd_id = MSM_AUDIO_SMMU_VM_CMD_UNMAP;
+ smmu_unmap_cmd.export_id = map_data->export_id;
+
+ rc = habmm_socket_send(msm_audio_ion_hab_handle,
+ (void *)&smmu_unmap_cmd,
+ sizeof(smmu_unmap_cmd), 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_send failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ cmd_rsp_size = sizeof(cmd_rsp);
+ rc = habmm_socket_recv(msm_audio_ion_hab_handle,
+ (void *)&cmd_rsp,
+ &cmd_rsp_size,
+ 0xFFFFFFFF,
+ 0);
+ if (rc) {
+ pr_err("%s: habmm_socket_recv failed %d\n",
+ __func__, rc);
+ goto err;
+ }
+
+ if (cmd_rsp_size != sizeof(cmd_rsp)) {
+ pr_err("%s: invalid size for cmd rsp %lu\n",
+ __func__, cmd_rsp_size);
+ rc = -EIO;
+ goto err;
+ }
+
+ if (cmd_rsp.status) {
+ pr_err("%s: SMMU unmap command failed %d\n",
+ __func__, cmd_rsp.status);
+ rc = cmd_rsp.status;
+ goto err;
+ }
+
+ rc = habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ if (rc) {
+ pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
+ __func__, map_data->export_id, rc);
+ }
+
+ list_del(&(map_data->list));
+ kfree(map_data);
+ break;
+ }
+ }
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+
+ if (!found) {
+ pr_err("%s: cannot find map_data ion_handle %pK, ion_client %pK\n",
+ __func__, handle, client);
+ rc = -EINVAL;
+ }
+
+ return rc;
+
+err:
+ if (found) {
+ (void)habmm_unexport(msm_audio_ion_hab_handle,
+ map_data->export_id, 0xFFFFFFFF);
+ list_del(&(map_data->list));
+ kfree(map_data);
+ }
+
+ mutex_unlock(&(msm_audio_ion_data.smmu_map_mutex));
+ return rc;
+}
+
+int msm_audio_ion_alloc(const char *name, struct ion_client **client,
+ struct ion_handle **handle, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = -EINVAL;
+ unsigned long err_ion_ptr = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!name || !client || !handle || !paddr || !vaddr
+ || !bufsz || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ goto err;
+ }
+
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_AUDIO_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (msm_audio_ion_data.smmu_enabled == true) {
+ pr_debug("system heap is used");
+ msm_audio_ion_data.audioheap_enabled = 0;
+ *handle = ion_alloc(*client, bufsz, SZ_4K,
+ ION_HEAP(ION_SYSTEM_HEAP_ID), 0);
+ }
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ if (IS_ERR((void *)(*handle)))
+ err_ion_ptr = PTR_ERR((int *)(*handle));
+ pr_err("%s:ION alloc fail err ptr=%ld, smmu_enabled=%d\n",
+ __func__, err_ion_ptr, msm_audio_ion_data.smmu_enabled);
+ rc = -ENOMEM;
+ goto err_ion_client;
+ }
+ } else {
+ pr_debug("audio heap is used");
+ msm_audio_ion_data.audioheap_enabled = 1;
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ if (bufsz != 0) {
+ pr_debug("%s: memset to 0 %pK %zd\n", __func__, *vaddr, bufsz);
+ memset((void *)*vaddr, 0, bufsz);
+ }
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return rc;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_ion_client:
+ msm_audio_ion_client_destroy(*client);
+ *handle = NULL;
+ *client = NULL;
+err:
+ return rc;
+}
+EXPORT_SYMBOL(msm_audio_ion_alloc);
+
+int msm_audio_ion_phys_free(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ ion_free(client, handle);
+ ion_client_destroy(client);
+
+ return 0;
+}
+
+int msm_audio_ion_phys_assign(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ ion_phys_addr_t *paddr,
+ size_t *pa_len, u8 assign_type)
+{
+ int ret;
+
+ if (!(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client failed\n", __func__);
+ return -EINVAL;
+ }
+
+ *handle = ion_import_dma_buf(*client, fd);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ ret = ion_phys(*client, *handle, paddr, pa_len);
+ if (ret) {
+ pr_err("%s: could not get physical address for handle, ret = %d\n",
+ __func__, ret);
+ goto err_ion_handle;
+ }
+
+ /* hyp assign is not supported in VM */
+
+ return ret;
+
+err_ion_handle:
+ ion_free(*client, *handle);
+
+err_destroy_client:
+ ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+
+ return ret;
+}
+
+int msm_audio_ion_import(const char *name, struct ion_client **client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if ((msm_audio_ion_data.smmu_enabled == true) &&
+ !(msm_audio_ion_data.device_status & MSM_AUDIO_ION_PROBED)) {
+ pr_debug("%s:probe is not done, deferred\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ *client = msm_audio_ion_client_create(name);
+ if (IS_ERR_OR_NULL((void *)(*client))) {
+ pr_err("%s: ION create client for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(*client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *) (*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_destroy_client;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(*client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ goto err_ion_handle;
+ }
+ }
+
+ *vaddr = ion_map_kernel(*client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -ENOMEM;
+ goto err_ion_handle;
+ }
+ pr_debug("%s: mapped address = %pK, size=%zd\n", __func__,
+ *vaddr, bufsz);
+
+ rc = msm_audio_ion_get_phys(*client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(*client, *handle);
+err_ion_handle:
+ ion_free(*client, *handle);
+err_destroy_client:
+ msm_audio_ion_client_destroy(*client);
+ *client = NULL;
+ *handle = NULL;
+err:
+ return rc;
+}
+
+int msm_audio_ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+ int ret = 0;
+
+ if (!client || !handle) {
+ pr_err("%s Invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ ret = msm_audio_ion_smmu_unmap(client, handle);
+ if (ret)
+ pr_err("%s: smmu unmap failed with ret %d\n",
+ __func__, ret);
+ }
+
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ msm_audio_ion_client_destroy(client);
+ return ret;
+}
+EXPORT_SYMBOL(msm_audio_ion_free);
+
+int msm_audio_ion_mmap(struct audio_buffer *ab,
+ struct vm_area_struct *vma)
+{
+ struct sg_table *table;
+ unsigned long addr = vma->vm_start;
+ unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned int i;
+ struct page *page;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+
+ table = ion_sg_table(ab->client, ab->handle);
+
+ if (IS_ERR(table)) {
+ pr_err("%s: Unable to get sg_table from ion: %ld\n",
+ __func__, PTR_ERR(table));
+ return PTR_ERR(table);
+ } else if (!table) {
+ pr_err("%s: sg_list is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* uncached */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ /* We need to check if a page is associated with this sg list because:
+ * If the allocation came from a carveout we currently don't have
+ * pages associated with carved out memory. This might change in the
+ * future and we can remove this check and the else statement.
+ */
+ page = sg_page(table->sgl);
+ if (page) {
+ pr_debug("%s: page is NOT null\n", __func__);
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ unsigned long remainder = vma->vm_end - addr;
+ unsigned long len = sg->length;
+
+ page = sg_page(sg);
+
+ if (offset >= len) {
+ offset -= len;
+ continue;
+ } else if (offset) {
+ page += offset / PAGE_SIZE;
+ len -= offset;
+ offset = 0;
+ }
+ len = min(len, remainder);
+ pr_debug("vma=%pK, addr=%x len=%ld vm_start=%x vm_end=%x vm_page_prot=%ld\n",
+ vma, (unsigned int)addr, len,
+ (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end,
+ (unsigned long int)vma->vm_page_prot);
+ remap_pfn_range(vma, addr, page_to_pfn(page), len,
+ vma->vm_page_prot);
+ addr += len;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ } else {
+ ion_phys_addr_t phys_addr;
+ size_t phys_len;
+ size_t va_len = 0;
+
+ pr_debug("%s: page is NULL\n", __func__);
+
+ ret = ion_phys(ab->client, ab->handle, &phys_addr, &phys_len);
+ if (ret) {
+ pr_err("%s: Unable to get phys address from ION buffer: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ pr_debug("phys=%pK len=%zd\n", &phys_addr, phys_len);
+ pr_debug("vma=%pK, vm_start=%x vm_end=%x vm_pgoff=%ld vm_page_prot=%ld\n",
+ vma, (unsigned int)vma->vm_start,
+ (unsigned int)vma->vm_end, vma->vm_pgoff,
+ (unsigned long int)vma->vm_page_prot);
+ va_len = vma->vm_end - vma->vm_start;
+ if ((offset > phys_len) || (va_len > phys_len-offset)) {
+ pr_err("wrong offset size %ld, lens= %zd, va_len=%zd\n",
+ offset, phys_len, va_len);
+ return -EINVAL;
+ }
+ ret = remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(phys_addr) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+ return 0;
+}
+
+
+bool msm_audio_ion_is_smmu_available(void)
+{
+ return msm_audio_ion_data.smmu_enabled;
+}
+
+/* move to static section again */
+struct ion_client *msm_audio_ion_client_create(const char *name)
+{
+ struct ion_client *pclient = NULL;
+
+ pclient = hvenv_ion_client_create(name);
+ return pclient;
+}
+
+
+void msm_audio_ion_client_destroy(struct ion_client *client)
+{
+ pr_debug("%s: client = %pK smmu_enabled = %d\n", __func__,
+ client, msm_audio_ion_data.smmu_enabled);
+
+ ion_client_destroy(client);
+}
+
+int msm_audio_ion_import_legacy(const char *name, struct ion_client *client,
+ struct ion_handle **handle, int fd,
+ unsigned long *ionflag, size_t bufsz,
+ ion_phys_addr_t *paddr, size_t *pa_len, void **vaddr)
+{
+ int rc = 0;
+
+ if (!name || !client || !handle || !paddr || !vaddr || !pa_len) {
+ pr_err("%s: Invalid params\n", __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+ /* client is already created for legacy and given*/
+ /* name should be audio_acdb_client or Audio_Dec_Client,
+ * bufsz should be 0 and fd shouldn't be 0 as of now
+ */
+ *handle = ion_import_dma_buf(client, fd);
+ pr_debug("%s: DMA Buf name=%s, fd=%d handle=%pK\n", __func__,
+ name, fd, *handle);
+ if (IS_ERR_OR_NULL((void *)(*handle))) {
+ pr_err("%s: ion import dma buffer failed\n",
+ __func__);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if (ionflag != NULL) {
+ rc = ion_handle_get_flags(client, *handle, ionflag);
+ if (rc) {
+ pr_err("%s: could not get flags for the handle\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+ }
+
+ /*Need to add condition SMMU enable or not */
+ *vaddr = ion_map_kernel(client, *handle);
+ if (IS_ERR_OR_NULL((void *)*vaddr)) {
+ pr_err("%s: ION memory mapping for AUDIO failed\n", __func__);
+ rc = -EINVAL;
+ goto err_ion_handle;
+ }
+
+ if (bufsz != 0)
+ memset((void *)*vaddr, 0, bufsz);
+
+ rc = msm_audio_ion_get_phys(client, *handle, paddr, pa_len, *vaddr);
+ if (rc) {
+ pr_err("%s: ION Get Physical for AUDIO failed, rc = %d\n",
+ __func__, rc);
+ goto err_get_phys;
+ }
+
+ return 0;
+
+err_get_phys:
+ ion_unmap_kernel(client, *handle);
+err_ion_handle:
+ ion_free(client, *handle);
+err:
+ return rc;
+}
+
+int msm_audio_ion_free_legacy(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ ion_unmap_kernel(client, handle);
+
+ ion_free(client, handle);
+ /* no client_destrody in legacy*/
+ return 0;
+}
+
+static int msm_audio_ion_get_phys(struct ion_client *client,
+ struct ion_handle *handle,
+ ion_phys_addr_t *addr, size_t *len, void *vaddr)
+{
+ int rc = 0;
+
+ pr_debug("%s: smmu_enabled = %d\n", __func__,
+ msm_audio_ion_data.smmu_enabled);
+
+ if (msm_audio_ion_data.smmu_enabled) {
+ rc = msm_audio_ion_smmu_map(client, handle, addr, len, vaddr);
+ if (rc) {
+ pr_err("%s: failed to do smmu map, err = %d\n",
+ __func__, rc);
+ goto err;
+ }
+ } else {
+ rc = ion_phys(client, handle, addr, len);
+ }
+
+ pr_debug("%s: phys=%pK, len=%zd, rc=%d\n",
+ __func__, &(*addr), *len, rc);
+err:
+ return rc;
+}
+
+static const struct of_device_id msm_audio_ion_dt_match[] = {
+ { .compatible = "qcom,msm-audio-ion-vm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm_audio_ion_dt_match);
+
+u32 msm_audio_populate_upper_32_bits(ion_phys_addr_t pa)
+{
+ return upper_32_bits(pa);
+}
+
+static int msm_audio_ion_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *msm_audio_ion_dt = "qcom,smmu-enabled";
+ bool smmu_enabled;
+ struct device *dev = &pdev->dev;
+
+ if (dev->of_node == NULL) {
+ pr_err("%s: device tree is not found\n",
+ __func__);
+ msm_audio_ion_data.smmu_enabled = 0;
+ return 0;
+ }
+
+ smmu_enabled = of_property_read_bool(dev->of_node,
+ msm_audio_ion_dt);
+ msm_audio_ion_data.smmu_enabled = smmu_enabled;
+
+ pr_info("%s: SMMU is %s\n", __func__,
+ (smmu_enabled) ? "Enabled" : "Disabled");
+
+ if (smmu_enabled) {
+ rc = habmm_socket_open(&msm_audio_ion_hab_handle,
+ HAB_MMID_CREATE(MM_AUD_3,
+ MSM_AUDIO_SMMU_VM_HAB_MINOR_ID),
+ 0xFFFFFFFF,
+ HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE);
+ if (rc) {
+ pr_err("%s: habmm_socket_open failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ pr_info("%s: msm_audio_ion_hab_handle %x\n",
+ __func__, msm_audio_ion_hab_handle);
+
+ INIT_LIST_HEAD(&msm_audio_ion_data.smmu_map_list);
+ mutex_init(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+
+ if (!rc)
+ msm_audio_ion_data.device_status |= MSM_AUDIO_ION_PROBED;
+
+ return rc;
+}
+
+static int msm_audio_ion_remove(struct platform_device *pdev)
+{
+ if (msm_audio_ion_data.smmu_enabled) {
+ if (msm_audio_ion_hab_handle)
+ habmm_socket_close(msm_audio_ion_hab_handle);
+
+ mutex_destroy(&(msm_audio_ion_data.smmu_map_mutex));
+ }
+ msm_audio_ion_data.smmu_enabled = 0;
+ msm_audio_ion_data.device_status = 0;
+
+ return 0;
+}
+
+static struct platform_driver msm_audio_ion_driver = {
+ .driver = {
+ .name = "msm-audio-ion-vm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_audio_ion_dt_match,
+ },
+ .probe = msm_audio_ion_probe,
+ .remove = msm_audio_ion_remove,
+};
+
+static int __init msm_audio_ion_init(void)
+{
+ return platform_driver_register(&msm_audio_ion_driver);
+}
+module_init(msm_audio_ion_init);
+
+static void __exit msm_audio_ion_exit(void)
+{
+ platform_driver_unregister(&msm_audio_ion_driver);
+}
+module_exit(msm_audio_ion_exit);
+
+MODULE_DESCRIPTION("MSM Audio ION VM module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
index 15ff4e149d75..08113a342eed 100644
--- a/drivers/staging/android/fiq_debugger/fiq_debugger.c
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -402,7 +402,7 @@ static void fiq_debugger_work(struct work_struct *work)
cmd += 6;
while (*cmd == ' ')
cmd++;
- if ((cmd != '\0') && sysrq_on())
+ if ((*cmd != '\0') && sysrq_on())
kernel_restart(cmd);
else
kernel_restart(NULL);
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 20314ff08be0..abc66908681d 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -205,11 +205,9 @@ static int ad7192_setup(struct ad7192_state *st,
struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
unsigned long long scale_uv;
int i, ret, id;
- u8 ones[6];
/* reset the serial interface */
- memset(&ones, 0xFF, 6);
- ret = spi_write(st->sd.spi, &ones, 6);
+ ret = ad_sd_reset(&st->sd, 48);
if (ret < 0)
goto out;
usleep_range(500, 1000); /* Wait for at least 500us */
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 60871f3022b1..12a3893b98fd 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 6c88fb021444..4eeb82cf79e4 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
- cmd->seq_end_offset = (cmd->write_data_done +
- ((cmd->se_cmd.data_length >
- conn->sess->sess_ops->FirstBurstLength) ?
- conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
+ cmd->seq_end_offset = min(cmd->se_cmd.data_length,
+ conn->sess->sess_ops->FirstBurstLength);
return;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 416006a3384c..c2c9b9361d64 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -218,7 +218,7 @@ struct msm_hs_wakeup {
};
struct msm_hs_port {
- bool startup_locked;
+ atomic_t startup_locked;
struct uart_port uport;
unsigned long imr_reg; /* shadow value of UARTDM_IMR */
struct clk *clk;
@@ -649,7 +649,6 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
if (val) {
@@ -669,7 +668,6 @@ static int msm_serial_loopback_enable_set(void *data, u64 val)
}
/* Calling CLOCK API. Hence mb() requires here. */
mb();
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
return 0;
}
@@ -681,13 +679,11 @@ static int msm_serial_loopback_enable_get(void *data, u64 *val)
unsigned long flags;
int ret = 0;
- msm_uport->startup_locked = true;
msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
spin_unlock_irqrestore(&uport->lock, flags);
- msm_uport->startup_locked = false;
msm_hs_resource_unvote(msm_uport);
@@ -1372,12 +1368,9 @@ static void msm_hs_stop_rx_locked(struct uart_port *uport)
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s(): Clocks are off\n", __func__);
- /* Make sure resource_on doesn't get called */
- if (msm_hs_clk_bus_vote(msm_uport))
- MSM_HS_ERR("%s:Failed clock vote\n", __func__);
- msm_hs_disable_rx(uport);
- msm_hs_clk_bus_unvote(msm_uport);
+ MSM_HS_WARN("%s(): Clocks are off, Rx still active\n",
+ __func__);
+ return;
} else
msm_hs_disable_rx(uport);
@@ -1421,7 +1414,7 @@ void tx_timeout_handler(unsigned long arg)
if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
dump_uart_hs_registers(msm_uport);
- /* Stop further loging */
+ /* Stop further logging */
MSM_HS_ERR("%s(): Stop IPC logging\n", __func__);
}
@@ -1868,12 +1861,6 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
struct msm_hs_tx *tx = &msm_uport->tx;
unsigned int isr;
- if (msm_uport->startup_locked) {
- MSM_HS_DBG("%s(): No Tx Request, startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
- return;
- }
-
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
@@ -1881,9 +1868,12 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
isr = msm_hs_read(uport, UART_DM_ISR);
- if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
- MSM_HS_DBG("%s():CTS 1: Peer is Busy, ISR 0x%x",
- __func__, isr);
+ if (UARTDM_ISR_CURRENT_CTS_BMSK & isr) {
+ MSM_HS_DBG("%s():CTS 1: Peer is Busy\n",
+ __func__);
+ MSM_HS_DBG("%s():ISR 0x%x\n",
+ __func__, isr);
+ }
} else
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
@@ -2364,11 +2354,11 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport)
unsigned int data;
unsigned long flags;
- if (msm_uport->startup_locked) {
- MSM_HS_WARN("%s(): startup_locked=%d\n",
- __func__, msm_uport->startup_locked);
+ if (atomic_read(&msm_uport->startup_locked)) {
+ MSM_HS_DBG("%s(): Port open in progress\n", __func__);
return;
}
+ msm_hs_disable_flow_control(uport, false);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
msm_uport->rx.flush == FLUSH_STOP) {
@@ -2387,6 +2377,8 @@ void msm_hs_resource_on(struct msm_hs_port *msm_uport)
spin_unlock_irqrestore(&uport->lock, flags);
}
msm_hs_spsconnect_tx(msm_uport);
+
+ msm_hs_enable_flow_control(uport, false);
}
/* Request to turn off uart clock once pending TX is flushed */
@@ -2679,7 +2671,7 @@ static int msm_hs_startup(struct uart_port *uport)
struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
- msm_uport->startup_locked = true;
+ atomic_set(&msm_uport->startup_locked, 1);
rfr_level = uport->fifosize;
if (rfr_level > 16)
rfr_level -= 16;
@@ -2809,7 +2801,7 @@ static int msm_hs_startup(struct uart_port *uport)
atomic_set(&msm_uport->client_req_state, 0);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: Client_Count 0\n", __func__);
- msm_uport->startup_locked = false;
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -2826,6 +2818,7 @@ unconfig_uart_gpios:
free_uart_irq:
free_irq(uport->irq, msm_uport);
unvote_exit:
+ atomic_set(&msm_uport->startup_locked, 0);
msm_hs_resource_unvote(msm_uport);
MSM_HS_ERR("%s(): Error return\n", __func__);
return ret;
@@ -3238,8 +3231,6 @@ static void msm_hs_pm_suspend(struct device *dev)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
msm_hs_resource_off(msm_uport);
obs_manage_irq(msm_uport, false);
- if (!atomic_read(&msm_uport->client_req_state))
- enable_wakeup_interrupt(msm_uport);
msm_hs_clk_bus_unvote(msm_uport);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
@@ -3251,6 +3242,8 @@ static void msm_hs_pm_suspend(struct device *dev)
__func__);
}
+ if (!atomic_read(&msm_uport->client_req_state))
+ enable_wakeup_interrupt(msm_uport);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s: PM State Suspended client_count %d\n", __func__,
client_count);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 4e603d060e80..59828d819145 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -398,6 +398,12 @@ static struct uart_driver sunhv_reg = {
static struct uart_port *sunhv_port;
+void sunhv_migrate_hvcons_irq(int cpu)
+{
+ /* Migrate hvcons irq to param cpu */
+ irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
+}
+
/* Copy 's' into the con_write_page, decoding "\n" into
* "\r\n" along the way. We have to return two lengths
* because the caller needs to know how much to advance
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index fb31eecb708d..8f3566cde3eb 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -362,6 +362,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
+ * __tty_insert_flip_char - Add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte to the tty buffering, with an optional flag.
+ * This is the slow path of tty_insert_flip_char.
+ */
+int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
+{
+ struct tty_buffer *tb;
+ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
+
+ if (!__tty_buffer_request_room(port, 1, flags))
+ return 0;
+
+ tb = port->buf.tail;
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+
+ return 1;
+}
+EXPORT_SYMBOL(__tty_insert_flip_char);
+
+/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from
*
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 0cf149edddd8..f36a1ac3bfbd 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -134,9 +134,9 @@ void ci_handle_vbus_change(struct ci_hdrc *ci)
if (!ci->is_otg)
return;
- if (hw_read_otgsc(ci, OTGSC_BSV))
+ if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
usb_gadget_vbus_connect(&ci->gadget);
- else
+ else if (!hw_read_otgsc(ci, OTGSC_BSV) && ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget);
}
@@ -175,14 +175,21 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
ci_role_stop(ci);
- if (role == CI_ROLE_GADGET)
+ if (role == CI_ROLE_GADGET &&
+ IS_ERR(ci->platdata->vbus_extcon.edev))
/*
- * wait vbus lower than OTGSC_BSV before connecting
- * to host
+ * Wait vbus lower than OTGSC_BSV before connecting
+ * to host. If connecting status is from an external
+ * connector instead of register, we don't need to
+ * care vbus on the board, since it will not affect
+ * external connector status.
*/
hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
+ /* vbus change may have already occurred */
+ if (role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
}
}
/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b1298f093f13..dd5c038c71fd 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -609,15 +609,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
} else if (header->bDescriptorType ==
USB_DT_INTERFACE_ASSOCIATION) {
+ struct usb_interface_assoc_descriptor *d;
+
+ d = (struct usb_interface_assoc_descriptor *)header;
+ if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
+ dev_warn(ddev,
+ "config %d has an invalid interface association descriptor of length %d, skipping\n",
+ cfgno, d->bLength);
+ continue;
+ }
+
if (iad_num == USB_MAXIADS) {
dev_warn(ddev, "found more Interface "
"Association Descriptors "
"than allocated for in "
"configuration %d\n", cfgno);
} else {
- config->intf_assoc[iad_num] =
- (struct usb_interface_assoc_descriptor
- *)header;
+ config->intf_assoc[iad_num] = d;
iad_num++;
}
@@ -818,7 +826,7 @@ int usb_get_configuration(struct usb_device *dev)
}
if (dev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(100);
+ msleep(200);
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 54d2d6b604c0..bd9419213d06 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -519,6 +519,8 @@ static void async_completed(struct urb *urb)
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
+
+ wake_up(&ps->wait);
spin_unlock(&ps->lock);
if (signr) {
@@ -526,8 +528,6 @@ static void async_completed(struct urb *urb)
put_pid(pid);
put_cred(cred);
}
-
- wake_up(&ps->wait);
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
@@ -1417,7 +1417,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
- uurb->buffer_length = totlen;
+ if (totlen <= uurb->buffer_length)
+ uurb->buffer_length = totlen;
+ else
+ WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
+ totlen, uurb->buffer_length);
break;
default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 370ad9690349..1f685ea17d7f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4774,7 +4774,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
goto loop;
if (udev->quirks & USB_QUIRK_DELAY_INIT)
- msleep(1000);
+ msleep(2000);
/* consecutive bus-powered hubs aren't reliable; they can
* violate the voltage drop budget. if the new child has
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 574da2b4529c..82806e311202 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920 and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair Strafe RGB */
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 22ba45f40f0b..723314583ccf 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2095,6 +2095,8 @@ static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
+ struct usb_gadget_strings *gstr = cdev->driver->strings[0];
+ struct usb_string *dev_str = gstr->strings;
/* composite_disconnect() must already have been called
* by the underlying peripheral controller driver!
@@ -2114,6 +2116,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
composite_dev_cleanup(cdev);
+ if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
+ dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
+
kfree(cdev->def_manufacturer);
kfree(cdev);
set_gadget_data(gadget, NULL);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 9b7274821d0b..7edc981c78b6 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -306,8 +306,6 @@ struct fsg_common {
struct completion thread_notifier;
struct task_struct *thread_task;
- /* Callback functions. */
- const struct fsg_operations *ops;
/* Gadget's private data. */
void *private_data;
@@ -2539,6 +2537,7 @@ static void handle_exception(struct fsg_common *common)
static int fsg_main_thread(void *common_)
{
struct fsg_common *common = common_;
+ int i;
/*
* Allow the thread to be killed by a signal, but set the signal mask
@@ -2600,21 +2599,16 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (!common->ops || !common->ops->thread_exits
- || common->ops->thread_exits(common) < 0) {
- int i;
+ /* Eject media from all LUNs */
- down_write(&common->filesem);
- for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
- struct fsg_lun *curlun = common->luns[i];
- if (!curlun || !fsg_lun_is_open(curlun))
- continue;
+ down_write(&common->filesem);
+ for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
+ struct fsg_lun *curlun = common->luns[i];
+ if (curlun && fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
- curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
- }
- up_write(&common->filesem);
}
+ up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2820,13 +2814,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
}
EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops)
-{
- common->ops = ops;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
-
void fsg_common_free_buffers(struct fsg_common *common)
{
_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index b6a9918eaefb..dfa2176f43c2 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
struct fsg_common;
/* FSF callback functions */
-struct fsg_operations {
- /*
- * Callback function to call when thread exits. If no
- * callback is set or it returns value lower then zero MSF
- * will force eject all LUNs it operates on (including those
- * marked as non-removable or with prevent_medium_removal flag
- * set).
- */
- int (*thread_exits)(struct fsg_common *common);
-};
-
struct fsg_lun_opts {
struct config_group group;
struct fsg_lun *lun;
@@ -141,9 +130,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
void fsg_common_remove_luns(struct fsg_common *common);
-void fsg_common_set_ops(struct fsg_common *common,
- const struct fsg_operations *ops);
-
int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
unsigned int id, const char *name,
const char **name_pfx);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 2a7d57cd14cb..8be8d10a0384 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1652,6 +1652,8 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
+ opts->bound = false;
+
hrtimer_cancel(&ncm->task_timer);
tasklet_kill(&ncm->tx_tasklet);
@@ -1662,7 +1664,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_ep_free_request(ncm->notify, ncm->notify_req);
gether_cleanup(netdev_priv(opts->net));
- opts->bound = false;
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 4f47289fcf7c..0468459a5c0f 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -35,6 +35,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -48,6 +53,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -70,6 +80,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -83,6 +98,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -105,6 +125,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
unsigned qmult; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
qmult = gether_get_qmult(opts->net); \
mutex_unlock(&opts->lock); \
@@ -118,6 +143,11 @@
u8 val; \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
@@ -144,6 +174,11 @@ out: \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 43ce2cfcdb4d..b6df47aa25af 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -27,7 +27,7 @@
#include <linux/mmu_context.h>
#include <linux/aio.h>
#include <linux/uio.h>
-
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
@@ -116,6 +116,7 @@ enum ep0_state {
struct dev_data {
spinlock_t lock;
atomic_t count;
+ int udc_usage;
enum ep0_state state; /* P: lock */
struct usb_gadgetfs_event event [N_EVENT];
unsigned ev_next;
@@ -512,9 +513,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
INIT_WORK(&priv->work, ep_user_copy_worker);
schedule_work(&priv->work);
}
- spin_unlock(&epdata->dev->lock);
usb_ep_free_request(ep, req);
+ spin_unlock(&epdata->dev->lock);
put_ep(epdata);
}
@@ -938,9 +939,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
struct usb_request *req = dev->req;
if ((retval = setup_req (ep, req, 0)) == 0) {
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
retval = usb_ep_queue (ep, req, GFP_KERNEL);
spin_lock_irq (&dev->lock);
+ --dev->udc_usage;
}
dev->state = STATE_DEV_CONNECTED;
@@ -982,11 +985,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
retval = -EIO;
else {
len = min (len, (size_t)dev->req->actual);
-// FIXME don't call this with the spinlock held ...
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
if (copy_to_user (buf, dev->req->buf, len))
retval = -EFAULT;
else
retval = len;
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
clean_req (dev->gadget->ep0, dev->req);
/* NOTE userspace can't yet choose to stall */
}
@@ -1130,6 +1136,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
retval = setup_req (dev->gadget->ep0, dev->req, len);
if (retval == 0) {
dev->state = STATE_DEV_CONNECTED;
+ ++dev->udc_usage;
spin_unlock_irq (&dev->lock);
if (copy_from_user (dev->req->buf, buf, len))
retval = -EFAULT;
@@ -1140,10 +1147,10 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
dev->gadget->ep0, dev->req,
GFP_KERNEL);
}
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
if (retval < 0) {
- spin_lock_irq (&dev->lock);
clean_req (dev->gadget->ep0, dev->req);
- spin_unlock_irq (&dev->lock);
} else
retval = len;
@@ -1240,9 +1247,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
struct usb_gadget *gadget = dev->gadget;
long ret = -ENOTTY;
- if (gadget->ops->ioctl)
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_DEV_OPENED ||
+ dev->state == STATE_DEV_UNBOUND) {
+ /* Not bound to a UDC */
+ } else if (gadget->ops->ioctl) {
+ ++dev->udc_usage;
+ spin_unlock_irq(&dev->lock);
+
ret = gadget->ops->ioctl (gadget, code, value);
+ spin_lock_irq(&dev->lock);
+ --dev->udc_usage;
+ }
+ spin_unlock_irq(&dev->lock);
+
return ret;
}
@@ -1460,10 +1479,12 @@ delegate:
if (value < 0)
break;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, dev->req,
GFP_KERNEL);
spin_lock (&dev->lock);
+ --dev->udc_usage;
if (value < 0) {
clean_req (gadget->ep0, dev->req);
break;
@@ -1487,8 +1508,12 @@ delegate:
req->length = value;
req->zero = value < w_length;
+ ++dev->udc_usage;
spin_unlock (&dev->lock);
value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+ spin_lock(&dev->lock);
+ --dev->udc_usage;
+ spin_unlock(&dev->lock);
if (value < 0) {
DBG (dev, "ep_queue --> %d\n", value);
req->status = 0;
@@ -1515,21 +1540,24 @@ static void destroy_ep_files (struct dev_data *dev)
/* break link to FS */
ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
list_del_init (&ep->epfiles);
+ spin_unlock_irq (&dev->lock);
+
dentry = ep->dentry;
ep->dentry = NULL;
parent = d_inode(dentry->d_parent);
/* break link to controller */
+ mutex_lock(&ep->lock);
if (ep->state == STATE_EP_ENABLED)
(void) usb_ep_disable (ep->ep);
ep->state = STATE_EP_UNBOUND;
usb_ep_free_request (ep->ep, ep->req);
ep->ep = NULL;
+ mutex_unlock(&ep->lock);
+
wake_up (&ep->wait);
put_ep (ep);
- spin_unlock_irq (&dev->lock);
-
/* break link to dcache */
mutex_lock (&parent->i_mutex);
d_delete (dentry);
@@ -1600,6 +1628,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
spin_lock_irq (&dev->lock);
dev->state = STATE_DEV_UNBOUND;
+ while (dev->udc_usage > 0) {
+ spin_unlock_irq(&dev->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dev->lock);
+ }
spin_unlock_irq (&dev->lock);
destroy_ep_files (dev);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index 99aa22c81770..b0099d7c3886 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
-static unsigned long msg_registered;
-static void msg_cleanup(void);
-
-static int msg_thread_exits(struct fsg_common *common)
-{
- msg_cleanup();
- return 0;
-}
-
static int msg_do_config(struct usb_configuration *c)
{
struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
static int msg_bind(struct usb_composite_dev *cdev)
{
- static const struct fsg_operations ops = {
- .thread_exits = msg_thread_exits,
- };
struct fsg_opts *opts;
struct fsg_config config;
int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
if (status)
goto fail;
- fsg_common_set_ops(opts->common, &ops);
-
status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
if (status)
goto fail_set_cdev;
@@ -210,7 +196,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
usb_composite_overwrite_options(cdev, &coverwrite);
dev_info(&cdev->gadget->dev,
DRIVER_DESC ", version: " DRIVER_VERSION "\n");
- set_bit(0, &msg_registered);
return 0;
fail_otg_desc:
@@ -261,9 +246,8 @@ static int __init msg_init(void)
}
module_init(msg_init);
-static void msg_cleanup(void)
+static void __exit msg_cleanup(void)
{
- if (test_and_clear_bit(0, &msg_registered))
- usb_composite_unregister(&msg_driver);
+ usb_composite_unregister(&msg_driver);
}
module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f92f5aff0dd5..585cb8734f50 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -28,6 +28,8 @@
#include <asm/gpio.h>
#include "atmel_usba_udc.h"
+#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
+ | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
#ifdef CONFIG_USB_GADGET_DEBUG_FS
#include <linux/debugfs.h>
@@ -2185,7 +2187,7 @@ static int usba_udc_probe(struct platform_device *pdev)
IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(udc->vbus_pin), NULL,
- usba_vbus_irq_thread, IRQF_ONESHOT,
+ usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
"atmel_usba_udc", udc);
if (ret) {
udc->vbus_pin = -ENODEV;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 64f404a1a072..8080a11947b7 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -237,6 +237,8 @@ struct dummy_hcd {
struct usb_device *udev;
struct list_head urbp_list;
+ struct urbp *next_frame_urbp;
+
u32 stream_en_ep;
u8 num_stream[30 / 2];
@@ -253,11 +255,13 @@ struct dummy {
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
+ int callback_usage;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
+ unsigned ints_enabled:1;
unsigned udc_suspended:1;
unsigned pullup:1;
@@ -416,6 +420,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
+ unsigned int power_bit;
dum_hcd->active = 0;
if (dum->pullup)
@@ -426,32 +431,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
return;
set_link_state_by_speed(dum_hcd);
+ power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
+ USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* Currently !connected or in reset */
- if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
+ if ((dum_hcd->port_status & power_bit) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
- unsigned disconnect = USB_PORT_STAT_CONNECTION &
+ unsigned int disconnect = power_bit &
dum_hcd->old_status & (~dum_hcd->port_status);
- unsigned reset = USB_PORT_STAT_RESET &
+ unsigned int reset = USB_PORT_STAT_RESET &
(~dum_hcd->old_status) & dum_hcd->port_status;
/* Report reset and disconnect events to the driver */
- if (dum->driver && (disconnect || reset)) {
+ if (dum->ints_enabled && (disconnect || reset)) {
stop_activity(dum);
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
- } else if (dum_hcd->active != dum_hcd->old_active) {
+ } else if (dum_hcd->active != dum_hcd->old_active &&
+ dum->ints_enabled) {
+ ++dum->callback_usage;
+ spin_unlock(&dum->lock);
if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
+ spin_lock(&dum->lock);
+ --dum->callback_usage;
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -967,8 +983,11 @@ static int dummy_udc_start(struct usb_gadget *g,
* can't enumerate without help from the driver we're binding.
*/
+ spin_lock_irq(&dum->lock);
dum->devstatus = 0;
dum->driver = driver;
+ dum->ints_enabled = 1;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -979,6 +998,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
struct dummy *dum = dum_hcd->dum;
spin_lock_irq(&dum->lock);
+ dum->ints_enabled = 0;
+ stop_activity(dum);
+
+ /* emulate synchronize_irq(): wait for callbacks to finish */
+ while (dum->callback_usage > 0) {
+ spin_unlock_irq(&dum->lock);
+ usleep_range(1000, 2000);
+ spin_lock_irq(&dum->lock);
+ }
+
dum->driver = NULL;
spin_unlock_irq(&dum->lock);
@@ -1032,7 +1061,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.max_speed = USB_SPEED_SUPER;
+ if (mod_data.is_super_speed)
+ dum->gadget.max_speed = USB_SPEED_SUPER;
+ else if (mod_data.is_high_speed)
+ dum->gadget.max_speed = USB_SPEED_HIGH;
+ else
+ dum->gadget.max_speed = USB_SPEED_FULL;
dum->gadget.dev.parent = &pdev->dev;
init_dummy_udc_hw(dum);
@@ -1241,6 +1275,8 @@ static int dummy_urb_enqueue(
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
+ if (!dum_hcd->next_frame_urbp)
+ dum_hcd->next_frame_urbp = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
@@ -1517,6 +1553,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
+ if (!dum->ints_enabled)
+ return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1758,6 +1796,7 @@ static void dummy_timer(unsigned long _dum_hcd)
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
+ dum_hcd->next_frame_urbp = NULL;
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_info[i].name)
@@ -1774,6 +1813,10 @@ restart:
int type;
int status = -EINPROGRESS;
+ /* stop when we reach URBs queued after the timer interrupt */
+ if (urbp == dum_hcd->next_frame_urbp)
+ break;
+
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
@@ -1853,10 +1896,12 @@ restart:
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
+ ++dum->callback_usage;
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
+ --dum->callback_usage;
if (value >= 0) {
/* no delays (max 64KB data stage) */
@@ -2564,8 +2609,6 @@ static struct hc_driver dummy_hcd = {
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
- .flags = HCD_USB3 | HCD_SHARED,
-
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
@@ -2594,8 +2637,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
dum = *((void **)dev_get_platdata(&pdev->dev));
- if (!mod_data.is_super_speed)
+ if (mod_data.is_super_speed)
+ dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
+ else if (mod_data.is_high_speed)
dummy_hcd.flags = HCD_USB2;
+ else
+ dummy_hcd.flags = HCD_USB11;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 03b9a372636f..89e9494c3245 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -133,29 +133,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
pinfo->sb_type.gen = AMD_CHIPSET_SB700;
else if (rev >= 0x40 && rev <= 0x4f)
pinfo->sb_type.gen = AMD_CHIPSET_SB800;
- }
- pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
- 0x145c, NULL);
- if (pinfo->smbus_dev) {
- pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
} else {
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
- if (!pinfo->smbus_dev) {
- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
- return 0;
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
}
-
- rev = pinfo->smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x14)
- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
- else if (rev >= 0x15 && rev <= 0x18)
- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
- else if (rev >= 0x39 && rev <= 0x3a)
- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
}
-
pinfo->sb_type.rev = rev;
return 1;
}
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
*
* Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
* It signals to the BIOS that the OS wants control of the host controller,
- * and then waits 5 seconds for the BIOS to hand over control.
+ * and then waits 1 second for the BIOS to hand over control.
* If we timeout, assume the BIOS is broken and take control anyway.
*/
static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1014,9 +1015,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
if (val & XHCI_HC_BIOS_OWNED) {
writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
- /* Wait for 5 seconds with 10 microsecond polling interval */
+ /* Wait for 1 second with 10 microsecond polling interval */
timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
- 0, 5000, 10);
+ 0, 1000000, 10);
/* Assume a buggy BIOS and take HC ownership anyway */
if (timeout) {
@@ -1045,7 +1046,7 @@ hc_init:
* operational or runtime registers. Wait 5 seconds and no more.
*/
timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
- 5000, 10);
+ 5000000, 10);
/* Assume a buggy HC and start HC initialization anyway */
if (timeout) {
val = readl(op_reg_base + XHCI_STS_OFFSET);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7fc97d930657..c665806983be 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1490,7 +1490,7 @@ struct xhci_bus_state {
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
- if (hcd->speed == HCD_USB3)
+ if (hcd->speed >= HCD_USB3)
return 0;
else
return 1;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 36e5b5c530bd..8bb9367ada45 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -285,11 +285,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
struct usbhs_fifo *fifo)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ int ret = 0;
- if (!usbhs_pipe_is_dcp(pipe))
- usbhsf_fifo_barrier(priv, fifo);
+ if (!usbhs_pipe_is_dcp(pipe)) {
+ /*
+ * This driver checks the pipe condition first to avoid -EBUSY
+ * from usbhsf_fifo_barrier() with about 10 msec delay in
+ * the interrupt handler if the pipe is RX direction and empty.
+ */
+ if (usbhs_pipe_is_dir_in(pipe))
+ ret = usbhs_pipe_is_accessible(pipe);
+ if (!ret)
+ ret = usbhsf_fifo_barrier(priv, fifo);
+ }
- usbhs_write(priv, fifo->ctr, BCLR);
+ /*
+ * if non-DCP pipe, this driver should set BCLR when
+ * usbhsf_fifo_barrier() returns 0.
+ */
+ if (!ret)
+ usbhs_write(priv, fifo->ctr, BCLR);
}
static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
@@ -843,9 +858,9 @@ static void xfer_work(struct work_struct *work)
fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
usbhs_pipe_running(pipe, 1);
- usbhsf_dma_start(pipe, fifo);
usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
dma_async_issue_pending(chan);
+ usbhsf_dma_start(pipe, fifo);
usbhs_pipe_enable(pipe);
xfer_work_end:
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 3806e7014199..2938153fe7b1 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -189,6 +189,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
+ info->port = NULL;
usb_autopm_put_interface(serial->interface);
error_get_interface:
usb_serial_put(serial);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 41a6513646de..1f5ecf905b7d 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -170,6 +170,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e0385d6c0abb..30344efc123f 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
{ USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
+ { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4fcf1cecb6d7..f9d15bd62785 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -610,6 +610,13 @@
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Cypress WICED USB UART
+ */
+#define CYPRESS_VID 0x04B4
+#define CYPRESS_WICED_BT_USB_PID 0x009B
+#define CYPRESS_WICED_WL_USB_PID 0xF900
+
+/*
* Microchip Technology, Inc.
*
* MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index e56cdb436de3..4581fa1dec98 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -234,11 +234,16 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
status = usb_control_msg(usbdev, pipe, request, requesttype, value,
index, buf, 1, MOS_WDR_TIMEOUT);
- if (status == 1)
+ if (status == 1) {
*data = *buf;
- else if (status < 0)
+ } else {
dev_err(&usbdev->dev,
"mos7720: usb_control_msg() failed: %d\n", status);
+ if (status >= 0)
+ status = -EIO;
+ *data = 0;
+ }
+
kfree(buf);
return status;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d17685cc00c9..ed883a7ad533 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -285,9 +285,15 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
+
*val = buf[0];
dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
-
+out:
kfree(buf);
return ret;
}
@@ -353,8 +359,13 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ if (ret < VENDOR_READ_LENGTH) {
+ if (ret >= 0)
+ ret = -EIO;
+ goto out;
+ }
*val = buf[0];
-
+out:
kfree(buf);
return ret;
}
@@ -1490,10 +1501,10 @@ static int mos7840_tiocmget(struct tty_struct *tty)
return -ENODEV;
status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
- if (status != 1)
+ if (status < 0)
return -EIO;
status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
- if (status != 1)
+ if (status < 0)
return -EIO;
result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
| ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fe123153b1a5..db3d34c2c82e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb);
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
+#define TPLINK_PRODUCT_LTE 0x000D
#define TPLINK_PRODUCT_MA180 0x0201
/* Changhong products */
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
@@ -2023,6 +2025,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 652b4334b26d..e1c1e329c877 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
{DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */
+ {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */
+ {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index f58caa9e6a27..a155cd02bce2 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
-static int uas_find_uas_alt_setting(struct usb_interface *intf)
+static struct usb_host_interface *uas_find_uas_alt_setting(
+ struct usb_interface *intf)
{
int i;
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt))
- return alt->desc.bAlternateSetting;
+ return alt;
}
- return -ENODEV;
+ return NULL;
}
static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
unsigned long flags = id->driver_info;
- int r, alt;
-
+ struct usb_host_interface *alt;
+ int r;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
+ if (!alt)
return 0;
- r = uas_find_endpoints(&intf->altsetting[alt], eps);
+ r = uas_find_endpoints(alt, eps);
if (r < 0)
return 0;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index e26e32169a36..f952635ebe5f 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -849,14 +849,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
- int alt;
+ struct usb_host_interface *alt;
alt = uas_find_uas_alt_setting(intf);
- if (alt < 0)
- return alt;
+ if (!alt)
+ return -ENODEV;
- return usb_set_interface(udev,
- intf->altsetting[0].desc.bInterfaceNumber, alt);
+ return usb_set_interface(udev, alt->desc.bInterfaceNumber,
+ alt->desc.bAlternateSetting);
}
static int uas_configure_endpoints(struct uas_dev_info *devinfo)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 640a2e2ec04d..fb96755550ec 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1379,6 +1379,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
+/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
+UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
+ "Seagate",
+ "External",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_WP_DETECT ),
+
UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
"Maxtor",
"USB to SATA",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index e75bbe5a10cd..1212b4b3c5a9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -827,6 +827,8 @@ static int hwarc_probe(struct usb_interface *iface,
if (iface->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
+ return -ENODEV;
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index bdcb13cc1d54..5c9828370217 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -303,18 +303,22 @@ static int uwbd(void *param)
/** Start the UWB daemon */
void uwbd_start(struct uwb_rc *rc)
{
- rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
- if (rc->uwbd.task == NULL)
+ struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
+ if (IS_ERR(task)) {
+ rc->uwbd.task = NULL;
printk(KERN_ERR "UWB: Cannot start management daemon; "
"UWB won't work\n");
- else
+ } else {
+ rc->uwbd.task = task;
rc->uwbd.pid = rc->uwbd.task->pid;
+ }
}
/* Stop the UWB daemon and free any unprocessed events */
void uwbd_stop(struct uwb_rc *rc)
{
- kthread_stop(rc->uwbd.task);
+ if (rc->uwbd.task)
+ kthread_stop(rc->uwbd.task);
uwbd_flush(rc);
}
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index f34ed47fcaf8..7f658fa4d22a 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -1861,7 +1861,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
- struct atyclk clk;
+ struct atyclk clk = { 0 };
union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 6b003239ed34..71584dff75cb 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -1568,6 +1568,19 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
{
int ret = 0;
char ln_map[4];
+ bool connected;
+
+ mutex_lock(&dp_drv->attention_lock);
+ connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+
+ /*
+ * If DP cable disconnected, Avoid link training or turning on DP Path
+ */
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -EINVAL;
+ }
/* wait until link training is completed */
pr_debug("enter, lt_needed=%s\n", lt_needed ? "true" : "false");
@@ -1609,6 +1622,13 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_drv->power_on = true;
ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
+ if (ret) {
+ if (ret == -ENODEV || ret == -EINVAL) {
+ pr_err("main link setup failed\n");
+ mutex_unlock(&dp_drv->train_mutex);
+ return ret;
+ }
+ }
exit_loop:
mutex_unlock(&dp_drv->train_mutex);
@@ -2209,7 +2229,7 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
ret = mdss_dp_dpcd_cap_read(dp);
if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
!mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
- if (ret == EDP_AUX_ERR_TOUT) {
+ if ((ret == -ENODEV) || (ret == EDP_AUX_ERR_TOUT)) {
pr_err("DPCD read timedout, skip connect notification\n");
goto end;
}
@@ -2241,6 +2261,9 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
read_edid:
ret = mdss_dp_edid_read(dp);
if (ret) {
+ if (ret == -ENODEV)
+ goto end;
+
pr_err("edid read error, setting default resolution\n");
goto notify;
}
@@ -3558,9 +3581,33 @@ static void mdss_dp_reset_event_list(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp)
{
+ int ret = 0;
+
pr_debug("enter\n");
mdss_dp_reset_event_list(dp);
+
+ /*
+ * IRQ_HPD attention event handler first turns on DP path and then
+ * notifies CONNECT_IRQ_HPD and waits for userspace to trigger UNBLANK.
+ * In such cases, before UNBLANK call, if cable is disconnected, if
+ * DISCONNECT is notified immediately, userspace might not sense any
+ * change in connection status, leaving DP controller ON.
+ *
+ * To avoid such cases, wait for the connection event to complete before
+ * sending disconnection event
+ */
+ if (atomic_read(&dp->notification_pending)) {
+ pr_debug("waiting for the pending notitfication\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_err("%s timed out\n",
+ mdss_dp_notification_status_to_string(
+ dp->hpd_notification_status));
+ }
+ }
+
atomic_set(&dp->notification_pending, 0);
+ /* complete any waiting completions */
complete_all(&dp->notification_comp);
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 86946adfeeb0..407f230ca71e 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -411,7 +411,8 @@ retry:
if (!connected) {
pr_err("dp cable disconnected\n");
- break;
+ ret = -ENODEV;
+ goto end;
}
dp->aux_error_num = EDP_AUX_ERR_NONE;
@@ -877,7 +878,7 @@ void dp_extract_edid_detailed_timing_description(struct edp_edid *edid,
static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
{
- int cnt, ret;
+ int cnt, ret = 0;
char data = 0;
for (cnt = 5; cnt; cnt--) {
@@ -886,6 +887,10 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
ret, mdss_dp_get_aux_error(ep->aux_error_num));
if (ret >= 0)
break;
+
+ if (ret == -ENODEV)
+ return ret;
+
msleep(100);
}
@@ -973,6 +978,7 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u32 checksum = 0;
bool phy_aux_update_requested = false;
bool ext_block_parsing_done = false;
+ bool connected = false;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -992,6 +998,15 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
u8 segment;
u8 edid_buf[EDID_BLOCK_SIZE] = {0};
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("DP sink not connected\n");
+ return -ENODEV;
+ }
+
/*
* Write the segment first.
* Segment = 0, for blocks 0 and 1
@@ -1243,7 +1258,7 @@ int mdss_dp_aux_link_status_read(struct mdss_dp_drv_pdata *ep, int len)
rlen = dp_aux_read_buf(ep, 0x202, len, 0);
if (rlen < len) {
pr_err("edp aux read failed\n");
- return 0;
+ return rlen;
}
rp = &ep->rxp;
bp = rp->data;
@@ -2459,21 +2474,24 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
+
if (mdss_dp_aux_clock_recovery_done(ep)) {
ret = 0;
break;
}
if (ep->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
if (old_v_level == ep->v_level) {
tries++;
if (tries >= maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break; /* quit */
}
} else {
@@ -2511,7 +2529,9 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
- mdss_dp_aux_link_status_read(ep, 6);
+ ret = mdss_dp_aux_link_status_read(ep, 6);
+ if (ret == -ENODEV)
+ break;
if (mdss_dp_aux_channel_eq_done(ep)) {
ret = 0;
@@ -2519,7 +2539,7 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
if (tries > maximum_retries) {
- ret = -1;
+ ret = -EAGAIN;
break;
}
tries++;
@@ -2584,7 +2604,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_1(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2603,7 +2623,7 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
ret = dp_start_link_train_2(dp);
if (ret < 0) {
- if (!dp_link_rate_down_shift(dp)) {
+ if ((ret == -EAGAIN) && !dp_link_rate_down_shift(dp)) {
pr_debug("retry with lower rate\n");
dp_clear_training_pattern(dp);
return -EAGAIN;
@@ -2640,7 +2660,7 @@ int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep)
ret = mdss_dp_aux_link_status_read(ep, 6);
- if (ret) {
+ if (ret > 0) {
sp = &ep->link_status;
ret = sp->port_0_in_sync; /* 1 == sync */
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index fca1d37b40bb..88f6b9040651 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1488,11 +1488,15 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
{
int ret = 0;
u32 v_total = 0, v_blank = 0, sleep_ms = 0, fps = 0;
- struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ struct mdss_panel_info *pinfo;
- if (ctrl->panel_mode == DSI_CMD_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return ret;
+ pinfo = &ctrl->panel_data.panel_info;
+
if (ctrl->ctrl_state & CTRL_STATE_MDP_ACTIVE) {
mdss_dsi_wait4video_done(ctrl);
v_total = mdss_panel_get_vtotal(pinfo);
@@ -1512,12 +1516,39 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
return ret;
}
+static void mdss_dsi_schedule_dma_cmd(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 v_blank, val = 0x0;
+ struct mdss_panel_info *pinfo;
+
+ /* for dsi 2.0 and below dma scheduling is not supported */
+ if ((!ctrl) || (ctrl->panel_mode == DSI_CMD_MODE) ||
+ (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_201))
+ return;
+
+ pinfo = &ctrl->panel_data.panel_info;
+ v_blank = pinfo->lcdc.v_back_porch + pinfo->lcdc.v_pulse_width;
+
+ /* DMA_SCHEDULE_CTRL */
+ val = MIPI_INP(ctrl->ctrl_io.base + 0x100);
+ val = val | (1 << 28); /* DMA_SCHEDULE_EN */
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val);
+ val |= (pinfo->yres + v_blank);
+ MIPI_OUTP(ctrl->ctrl_io.base + 0x100, val); /* DMA_SCHEDULE_LINE */
+ wmb();
+
+ pr_debug("%s schedule at line %x", __func__, val);
+ MDSS_XLOG(ctrl->ndx, val);
+}
+
static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
{
int in_blanking = 0;
int retry_count = 0;
- if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ /* for dsi 2.1 and above dma scheduling is used */
+ if ((!ctrl) || (ctrl->panel_mode != DSI_VIDEO_MODE) ||
+ (ctrl->shared_data->hw_rev > MDSS_DSI_HW_REV_200))
return;
while (retry_count != MAX_BTA_WAIT_RETRY) {
@@ -2204,6 +2235,10 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
MIPI_OUTP((ctrl->ctrl_base) + 0x04c, len);
wmb();
+ /* schedule dma cmds at start of blanking region */
+ mdss_dsi_schedule_dma_cmd(ctrl);
+
+ /* DSI_CMD_MODE_DMA_SW_TRIGGER */
MIPI_OUTP((ctrl->ctrl_base) + 0x090, 0x01);
wmb();
MDSS_XLOG(ctrl->dma_addr, len);
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index 5bf931ce1353..978098f71761 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
unsigned int timeout)
{
struct kempld_device_data *pld = wdt_data->pld;
- u32 prescaler = kempld_prescaler[PRESCALER_21];
+ u32 prescaler;
u64 stage_timeout64;
u32 stage_timeout;
u32 remainder;
u8 stage_cfg;
+#if GCC_VERSION < 40400
+ /* work around a bug compiling do_div() */
+ prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
+#else
+ prescaler = kempld_prescaler[PRESCALER_21];
+#endif
+
if (!stage)
return -EINVAL;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8a58bbc14de2..f7b19c25c3a4 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -680,3 +680,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ dma_addr, size, attrs);
+#endif
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);