summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/adsprpc.c4
-rw-r--r--drivers/char/diag/diagfwd_cntl.c1
-rw-r--r--drivers/char/diag/diagfwd_glink.c6
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c3
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c9
-rw-r--r--drivers/crypto/msm/ice.c5
-rw-r--r--drivers/crypto/msm/qce50.c20
-rw-r--r--drivers/crypto/msm/qce50.h4
-rw-r--r--drivers/crypto/msm/qcrypto.c14
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c59
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c29
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h7
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c101
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c24
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c5
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c242
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c5
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c90
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c41
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c636
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h124
-rw-r--r--drivers/gpu/msm/adreno.c4
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c43
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c4
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c28
-rw-r--r--drivers/gpu/msm/kgsl_pool.c22
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c17
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h18
-rw-r--r--drivers/input/misc/hbtp_input.c4
-rw-r--r--drivers/input/touchscreen/st/fts.c19
-rw-r--r--drivers/iommu/arm-smmu.c3
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.c6
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c4
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.c41
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c87
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c25
-rw-r--r--drivers/misc/qseecom.c11
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c51
-rw-r--r--drivers/net/wireless/cnss2/debug.c93
-rw-r--r--drivers/net/wireless/cnss2/main.c21
-rw-r--r--drivers/net/wireless/cnss2/pci.c32
-rw-r--r--drivers/net/wireless/cnss2/qmi.c37
-rw-r--r--drivers/net/wireless/wcnss/wcnss_vreg.c32
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c156
-rw-r--r--drivers/pci/host/pci-msm.c33
-rw-r--r--drivers/perf/arm_pmu.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c466
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c14
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c113
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom/fg-core.h11
-rw-r--r--drivers/power/supply/qcom/fg-util.c34
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c136
-rw-r--r--drivers/power/supply/qcom/qpnp-qnovo.c373
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c7
-rw-r--r--drivers/power/supply/qcom/smb-lib.c293
-rw-r--r--drivers/power/supply/qcom/smb-lib.h1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c7
-rw-r--r--drivers/scsi/ufs/ufshcd.c14
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/soc/qcom/icnss.c60
-rw-r--r--drivers/soc/qcom/jtagv8-etm.c13
-rw-r--r--drivers/soc/qcom/jtagv8.c7
-rw-r--r--drivers/soc/qcom/scm.c35
-rw-r--r--drivers/soc/qcom/scm_qcpe.c15
-rw-r--r--drivers/soc/qcom/secure_buffer.c5
-rw-r--r--drivers/tty/serial/msm_serial_hs.c5
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c31
-rw-r--r--drivers/usb/gadget/function/f_gsi.c6
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c6
-rw-r--r--drivers/usb/gadget/function/f_rndis.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c13
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_debug.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator.c12
112 files changed, 3528 insertions, 700 deletions
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 8017961783f7..781fa96726e2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -1456,7 +1456,7 @@ static void smd_event_handler(void *priv, unsigned event)
switch (event) {
case SMD_EVENT_OPEN:
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case SMD_EVENT_CLOSE:
fastrpc_notify_drivers(me, cid);
@@ -1477,7 +1477,7 @@ static void fastrpc_init(struct fastrpc_apps *me)
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
- init_completion(&me->channel[i].workport);
+ init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
}
}
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 4ae2158b5a6b..74777212e4cf 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -67,7 +67,6 @@ void diag_cntl_channel_close(struct diagfwd_info *p_info)
driver->feature[peripheral].sent_feature_mask = 0;
driver->feature[peripheral].rcvd_feature_mask = 0;
- flush_workqueue(driver->cntl_wq);
reg_dirty |= PERIPHERAL_MASK(peripheral);
diag_cmd_remove_reg_by_proc(peripheral);
driver->feature[peripheral].stm_support = DISABLE_STM;
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 42182e3a939d..f1f8f0b2b34b 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -375,8 +375,10 @@ static void diag_glink_notify_rx_work_fn(struct work_struct *work)
struct diag_glink_read_work, work);
struct diag_glink_info *glink_info = read_work->glink_info;
- if (!glink_info || !glink_info->hdl)
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
return;
+ }
diagfwd_channel_read_done(glink_info->fwd_ctxt,
(unsigned char *)(read_work->ptr_read_done),
@@ -388,6 +390,7 @@ static void diag_glink_notify_rx_work_fn(struct work_struct *work)
"diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
read_work->ptr_rx_done, (int)read_work->ptr_read_size,
glink_info->peripheral, glink_info->type);
+ kfree(read_work);
}
static void diag_glink_notify_rx(void *hdl, const void *priv,
@@ -411,6 +414,7 @@ static void diag_glink_notify_rx(void *hdl, const void *priv,
if (!read_work) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
return;
}
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 6860de0d2288..78b8452b19b3 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1285,6 +1285,9 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info)
if (!fwd_info)
return -EIO;
+ if (fwd_info->type == TYPE_CNTL)
+ flush_workqueue(driver->cntl_wq);
+
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 0;
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close)
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index b91e115462ae..abbee61c99c8 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -479,6 +479,7 @@ static void cpufreq_interactive_timer(unsigned long data)
bool skip_hispeed_logic, skip_min_sample_time;
bool jump_to_max_no_ts = false;
bool jump_to_max = false;
+ bool start_hyst = true;
if (!down_read_trylock(&ppol->enable_sem))
return;
@@ -588,8 +589,12 @@ static void cpufreq_interactive_timer(unsigned long data)
}
if (now - ppol->max_freq_hyst_start_time <
- tunables->max_freq_hysteresis)
+ tunables->max_freq_hysteresis) {
+ if (new_freq < ppol->policy->max &&
+ ppol->policy->max <= tunables->hispeed_freq)
+ start_hyst = false;
new_freq = max(tunables->hispeed_freq, new_freq);
+ }
if (!skip_hispeed_logic &&
ppol->target_freq >= tunables->hispeed_freq &&
@@ -646,7 +651,7 @@ static void cpufreq_interactive_timer(unsigned long data)
ppol->floor_validate_time = now;
}
- if (new_freq >= ppol->policy->max && !jump_to_max_no_ts)
+ if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts)
ppol->max_freq_hyst_start_time = now;
if (ppol->target_freq == new_freq &&
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 49165daa807f..490f8d9ddb9f 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -975,7 +975,8 @@ static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
/* scm command buffer structure */
struct qcom_scm_cmd_buf {
@@ -1001,7 +1002,7 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
cbuf.device_id = ICE_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- pr_err("%s: failed, ret %d scm_ret %d\n",
+ pr_err("%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4ab8ca143f6c..b44f926a6ba0 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -2155,6 +2155,10 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info)
pce_sps_data = &preq_info->ce_sps;
qce_callback = preq_info->qce_cb;
areq = (struct ahash_request *) preq_info->areq;
+ if (!areq) {
+ pr_err("sha operation error. areq is NULL\n");
+ return -ENXIO;
+ }
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
DMA_TO_DEVICE);
memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
@@ -2970,7 +2974,7 @@ static inline int qce_alloc_req_info(struct qce_device *pce_dev)
request_index++;
if (request_index >= MAX_QCE_BAM_REQ)
request_index = 0;
- if (xchg(&pce_dev->ce_request_info[request_index].
+ if (atomic_xchg(&pce_dev->ce_request_info[request_index].
in_use, true) == false) {
pce_dev->ce_request_index = request_index;
return request_index;
@@ -2986,7 +2990,8 @@ static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
bool is_complete)
{
pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
- if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+ false) == true) {
if (req_info < MAX_QCE_BAM_REQ && is_complete)
atomic_dec(&pce_dev->no_of_queued_req);
} else
@@ -4610,7 +4615,7 @@ static int qce_dummy_req(struct qce_device *pce_dev)
{
int ret = 0;
- if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
in_use, true) == false))
return -EBUSY;
ret = qce_process_sha_req(pce_dev, NULL);
@@ -5969,7 +5974,7 @@ void *qce_open(struct platform_device *pdev, int *rc)
}
for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
- pce_dev->ce_request_info[i].in_use = false;
+ atomic_set(&pce_dev->ce_request_info[i].in_use, false);
pce_dev->ce_request_index = 0;
pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
@@ -6133,12 +6138,13 @@ EXPORT_SYMBOL(qce_hw_support);
void qce_dump_req(void *handle)
{
int i;
+ bool req_in_use;
struct qce_device *pce_dev = (struct qce_device *)handle;
for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
- pr_info("qce_dump_req %d %d\n", i,
- pce_dev->ce_request_info[i].in_use);
- if (pce_dev->ce_request_info[i].in_use == true)
+ req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+ pr_info("qce_dump_req %d %d\n", i, req_in_use);
+ if (req_in_use == true)
_qce_dump_descr_fifos(pce_dev, i);
}
}
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index 6dba3664ff08..ab0d21da72c5 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -214,7 +214,7 @@ struct ce_sps_data {
};
struct ce_request_info {
- bool in_use;
+ atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 5b364f053b1b..f38fc422b35e 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3972,6 +3972,7 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
if (len <= SHA1_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
@@ -3979,16 +3980,19 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA1;
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA1 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
if (len <= SHA256_BLOCK_SIZE) {
@@ -3997,11 +4001,13 @@ static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA256;
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA256 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha_hmac_init_ihash(struct ahash_request *req,
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 84125b3d1f95..4c082fff2fc5 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -48,6 +48,7 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
+ sde/sde_splash.o \
sde_dbg_evtlog.o \
sde_io_util.o \
dba_bridge.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index e24827590b7c..c085e173232b 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -409,8 +409,8 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b612c9a18faf..624c2a87d593 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -447,9 +447,9 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 37323e962c2c..45a38b247727 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -375,6 +375,7 @@ static const struct {
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
@@ -391,6 +392,11 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+
+ if (state)
+ set_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+ else
+ clear_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
}
static int a5xx_me_init(struct msm_gpu *gpu)
@@ -1168,6 +1174,10 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ /* If we are already up, don't mess with what works */
+ if (gpu->active_cnt > 1)
+ return 0;
+
/* Turn the RBCCU domain first to limit the chances of voltage droop */
gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
@@ -1198,22 +1208,27 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- /* Clear the VBIF pipe before shutting down */
+ /* Only do this next bit if we are about to go down */
+ if (gpu->active_cnt == 1) {
+ /* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
- spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
+ == 0xF);
- gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
- /*
- * Reset the VBIF before power collapse to avoid issue with FIFO
- * entries
- */
-
- if (adreno_is_a530(adreno_gpu)) {
- /* These only need to be done for A530 */
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
- gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x00000000);
+ }
}
return msm_gpu_pm_suspend(gpu);
@@ -1233,13 +1248,29 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ bool enabled = test_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
+
+ /*
+ * Temporarily disable hardware clock gating before going into
+ * adreno_show to avoid issues while reading the registers
+ */
+
+ if (enabled)
+ a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m);
+
+ if (enabled)
+ a5xx_set_hwcg(gpu, true);
+
+ gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index f8b00982fe86..e637237fa811 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -23,6 +23,7 @@
enum {
A5XX_ZAP_SHADER_LOADED = 1,
+ A5XX_HWCG_ENABLED = 2,
};
struct a5xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2273b06b59a6..04e0056f2a49 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -297,8 +297,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
}
- gpu->funcs->pm_resume(gpu);
-
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -311,8 +309,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index e8b3e85603e4..f1c44b30575f 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1253,6 +1253,13 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
uint32_t hpd_ctrl;
int i, ret;
unsigned long flags;
+ struct drm_connector *connector;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ connector = hdmi->connector;
+ priv = connector->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
@@ -1292,9 +1299,11 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
}
}
- sde_hdmi_set_mode(hdmi, false);
- _sde_hdmi_phy_reset(hdmi);
- sde_hdmi_set_mode(hdmi, true);
+ if (!sde_kms->splash_info.handoff) {
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+ }
hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
@@ -2863,6 +2872,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
struct msm_drm_private *priv = NULL;
struct hdmi *hdmi;
struct platform_device *pdev;
+ struct sde_kms *sde_kms;
DBG("");
if (!display || !display->drm_dev || !enc) {
@@ -2921,6 +2931,19 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
enc->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
+ /*
+ * After initialising HDMI bridge, we need to check
+ * whether the early display is enabled for HDMI.
+ * If yes, we need to increase refcount of hdmi power
+ * clocks. This can skip the clock disabling operation in
+ * clock_late_init when finding clk.count == 1.
+ */
+ sde_kms = to_sde_kms(priv->kms);
+ if (sde_kms->splash_info.handoff) {
+ sde_hdmi_bridge_power_on(hdmi->bridge);
+ hdmi->power_on = true;
+ }
+
mutex_unlock(&display->display_lock);
return 0;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index bafb2b949a6b..f2dd5351913b 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -357,6 +357,13 @@ int sde_hdmi_set_property(struct drm_connector *connector,
int property_index,
uint64_t value,
void *display);
+/**
+ * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on.
+ * @bridge: Handle to the drm bridge.
+ *
+ * Return: void.
+ */
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge);
/**
* sde_hdmi_get_property() - get the connector properties
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 62dd3aaf7078..e4eb531c12aa 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -841,6 +841,11 @@ static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
return true;
}
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ _sde_hdmi_bridge_power_on(bridge);
+}
+
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
.pre_enable = _sde_hdmi_bridge_pre_enable,
.enable = _sde_hdmi_bridge_enable,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 924ce64206f4..c8b11425a817 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1923,8 +1923,75 @@ static struct drm_driver msm_driver = {
#ifdef CONFIG_PM_SLEEP
static int msm_pm_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector *conn;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct msm_drm_private *priv;
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+ SDE_EVT32(0);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_lock_all(ddev);
+ ctx = ddev->mode_config.acquire_ctx;
+
+ /* save current state for resume */
+ if (priv->suspend_state)
+ drm_atomic_state_free(priv->suspend_state);
+ priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, ctx);
+ if (IS_ERR_OR_NULL(priv->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ priv->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = ctx;
+ drm_for_each_connector(conn, ddev) {
+
+ if (!conn->state || !conn->state->crtc ||
+ conn->dpms != DRM_MODE_DPMS_ON)
+ continue;
+
+ /* force CRTC to be inactive */
+ crtc_state = drm_atomic_get_crtc_state(state,
+ conn->state->crtc);
+ if (IS_ERR_OR_NULL(crtc_state)) {
+ DRM_ERROR("failed to get crtc %d state\n",
+ conn->state->crtc->base.id);
+ drm_atomic_state_free(state);
+ goto unlock;
+ }
+ crtc_state->active = false;
+ }
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_free(state);
+ }
+
+unlock:
+ drm_modeset_unlock_all(ddev);
+
+ /* disable hot-plug polling */
drm_kms_helper_poll_disable(ddev);
return 0;
@@ -1932,8 +1999,38 @@ static int msm_pm_suspend(struct device *dev)
static int msm_pm_resume(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+
+ SDE_EVT32(priv->suspend_state != NULL);
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ if (priv->suspend_state) {
+ priv->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(priv->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_free(priv->suspend_state);
+ }
+ priv->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+ /* enable hot-plug polling */
drm_kms_helper_poll_enable(ddev);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 08868fce1cb0..49b6029c3342 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -367,6 +367,9 @@ struct msm_drm_private {
struct msm_vblank_ctrl vblank_ctrl;
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+
/* list of clients waiting for events */
struct list_head client_event_list;
};
@@ -414,6 +417,15 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+ if (!dev || !dev->dev_private)
+ return false;
+
+ return ((struct msm_drm_private *)dev->dev_private)->suspend_state !=
+ NULL;
+}
+
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 8148d3e9e850..cd3a710f8f27 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -46,6 +46,8 @@ struct msm_mmu_funcs {
void (*destroy)(struct msm_mmu *mmu);
void (*enable)(struct msm_mmu *mmu);
void (*disable)(struct msm_mmu *mmu);
+ int (*set_property)(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data);
};
struct msm_mmu {
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index eb68eb977aa7..4247243055b6 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -170,12 +170,36 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
+/* user can call this API to set the attribute of smmu*/
+static int msm_smmu_set_property(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ int ret = 0;
+
+ if (!client)
+ return -EINVAL;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain)
+ return -EINVAL;
+
+ ret = iommu_domain_set_attr(domain, attr, data);
+ if (ret)
+ DRM_ERROR("set domain attribute failed\n");
+
+ return ret;
+}
+
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
.unmap = msm_smmu_unmap,
.destroy = msm_smmu_destroy,
+ .set_property = msm_smmu_set_property,
};
static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 875513d2840f..6cc54d15beb2 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -604,6 +604,7 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
struct sde_kms *sde_kms;
struct sde_kms_info *info;
struct sde_connector *c_conn = NULL;
+ struct sde_splash_info *sinfo;
int rc;
if (!dev || !dev->dev_private || !encoder) {
@@ -757,6 +758,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
SDE_DEBUG("connector %d attach encoder %d\n",
c_conn->base.base.id, encoder->base.id);
+ sinfo = &sde_kms->splash_info;
+ if (sinfo && sinfo->handoff)
+ sde_splash_setup_connector_count(sinfo, connector_type);
+
priv->connectors[priv->num_connectors++] = &c_conn->base;
return &c_conn->base;
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 5323c0194594..9ab216213d8e 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -57,7 +57,17 @@
static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
- struct msm_drm_private *priv = crtc->dev->dev_private;
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
return to_sde_kms(priv->kms);
}
@@ -77,10 +87,10 @@ static void sde_crtc_destroy(struct drm_crtc *crtc)
sde_cp_crtc_destroy_properties(crtc);
debugfs_remove_recursive(sde_crtc->debugfs_root);
- mutex_destroy(&sde_crtc->crtc_lock);
sde_fence_deinit(&sde_crtc->output_fence);
drm_crtc_cleanup(crtc);
+ mutex_destroy(&sde_crtc->crtc_lock);
kfree(sde_crtc);
}
@@ -590,14 +600,22 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
{
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
+ struct drm_connector *conn;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
int i;
- if (!crtc || !crtc->state) {
+ if (!crtc || !crtc->state || !crtc->dev) {
SDE_ERROR("invalid crtc\n");
return;
}
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
cstate = to_sde_crtc_state(crtc->state);
SDE_EVT32(DRMID(crtc));
@@ -606,6 +624,20 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc,
for (i = 0; i < cstate->num_connectors; ++i)
sde_connector_complete_commit(cstate->connectors[i]);
+
+ if (!sde_kms->splash_info.handoff &&
+ sde_kms->splash_info.lk_is_exited) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(conn, crtc->dev) {
+ if (conn->state->crtc != crtc)
+ continue;
+
+ sde_splash_clean_up_free_resource(priv->kms,
+ &priv->phandle,
+ conn->connector_type);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ }
}
/**
@@ -941,6 +973,112 @@ end:
}
/**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ */
+static void _sde_crtc_vblank_enable_nolock(
+ struct sde_crtc *sde_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = &sde_crtc->base;
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ if (enable) {
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc,
+ sde_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ }
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid crtc kms\n");
+ return;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+
+ /*
+ * Update CP on suspend/resume transitions
+ */
+ if (enable && !sde_crtc->suspend)
+ sde_cp_crtc_suspend(crtc);
+ else if (!enable && sde_crtc->suspend)
+ sde_cp_crtc_resume(crtc);
+
+ /*
+ * If the vblank refcount != 0, release a power reference on suspend
+ * and take it back during resume (if it is still != 0).
+ */
+ if (sde_crtc->suspend == enable)
+ SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (atomic_read(&sde_crtc->vblank_refcount) != 0)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, !enable);
+
+ sde_crtc->suspend = enable;
+
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
* sde_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
* @Returns: Pointer to new drm_crtc_state structure
@@ -990,6 +1128,10 @@ static void sde_crtc_reset(struct drm_crtc *crtc)
return;
}
+ /* revert suspend actions, if necessary */
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, false);
+
/* remove previous state, if present */
if (crtc->state) {
sde_crtc_destroy_state(crtc, crtc->state);
@@ -1013,37 +1155,67 @@ static void sde_crtc_reset(struct drm_crtc *crtc)
crtc->state = &cstate->base;
}
+static int _sde_crtc_vblank_no_lock(struct sde_crtc *sde_crtc, bool en)
+{
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ } else if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+ SDE_DEBUG("crtc%d vblank enable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, true);
+ } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+ SDE_ERROR("crtc%d invalid vblank disable\n",
+ sde_crtc->base.base.id);
+ return -EINVAL;
+ } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+ SDE_DEBUG("crtc%d vblank disable\n", sde_crtc->base.base.id);
+ if (!sde_crtc->suspend)
+ _sde_crtc_vblank_enable_nolock(sde_crtc, false);
+ } else {
+ SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+ sde_crtc->base.base.id,
+ en ? "enable" : "disable",
+ atomic_read(&sde_crtc->vblank_refcount));
+ }
+
+ return 0;
+}
+
static void sde_crtc_disable(struct drm_crtc *crtc)
{
- struct msm_drm_private *priv;
- struct sde_crtc *sde_crtc;
struct drm_encoder *encoder;
+ struct sde_crtc *sde_crtc;
struct sde_kms *sde_kms;
+ struct msm_drm_private *priv;
- if (!crtc) {
+ if (!crtc || !crtc->dev || !crtc->state) {
SDE_ERROR("invalid crtc\n");
return;
}
sde_crtc = to_sde_crtc(crtc);
sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid kms handle\n");
+ return;
+ }
priv = sde_kms->dev->dev_private;
SDE_DEBUG("crtc%d\n", crtc->base.id);
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, true);
+
mutex_lock(&sde_crtc->crtc_lock);
SDE_EVT32(DRMID(crtc));
- if (atomic_read(&sde_crtc->vblank_refcount)) {
+ if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
SDE_ERROR("crtc%d invalid vblank refcount\n",
crtc->base.id);
- SDE_EVT32(DRMID(crtc));
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
- sde_encoder_register_vblank_callback(encoder, NULL,
- NULL);
- }
- atomic_set(&sde_crtc->vblank_refcount, 0);
+ SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount));
+ while (atomic_read(&sde_crtc->vblank_refcount))
+ if (_sde_crtc_vblank_no_lock(sde_crtc, false))
+ break;
}
if (atomic_read(&sde_crtc->frame_pending)) {
@@ -1262,40 +1434,20 @@ end:
int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
- struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
+ struct sde_crtc *sde_crtc;
+ int rc;
- if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
- SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
- } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
- SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
return -EINVAL;
- } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
- SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
- } else {
- SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
- crtc->base.id,
- en ? "enable" : "disable",
- atomic_read(&sde_crtc->vblank_refcount));
- return 0;
}
+ sde_crtc = to_sde_crtc(crtc);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- SDE_EVT32(DRMID(crtc), en);
-
- if (en)
- sde_encoder_register_vblank_callback(encoder,
- sde_crtc_vblank_cb, (void *)crtc);
- else
- sde_encoder_register_vblank_callback(encoder, NULL,
- NULL);
- }
+ mutex_lock(&sde_crtc->crtc_lock);
+ rc = _sde_crtc_vblank_no_lock(sde_crtc, en);
+ mutex_unlock(&sde_crtc->crtc_lock);
- return 0;
+ return rc;
}
void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc,
@@ -1739,6 +1891,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
crtc->dev = dev;
atomic_set(&sde_crtc->vblank_refcount, 0);
+ mutex_init(&sde_crtc->crtc_lock);
spin_lock_init(&sde_crtc->spin_lock);
atomic_set(&sde_crtc->frame_pending, 0);
@@ -1760,7 +1913,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev,
snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
/* initialize output fence support */
- mutex_init(&sde_crtc->crtc_lock);
sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
/* initialize debugfs support */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index aaa815c76c4e..6b8483d574b1 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -82,6 +82,7 @@ struct sde_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @vblank_cb_time : ktime at vblank count reset
* @vblank_refcount : reference count for vblank enable request
+ * @suspend : whether or not a suspend operation is in progress
* @feature_list : list of color processing features supported on a crtc
* @active_list : list of color processing features are active
* @dirty_list : list of color processing features are dirty
@@ -117,6 +118,7 @@ struct sde_crtc {
u32 vblank_cb_count;
ktime_t vblank_cb_time;
atomic_t vblank_refcount;
+ bool suspend;
struct list_head feature_list;
struct list_head active_list;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 635c2e4e954b..a185eb338134 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -689,6 +689,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats_yuv;
sspp->id = SSPP_VIG0 + *vig_count;
sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
sspp->type = SSPP_TYPE_VIG;
@@ -759,6 +760,7 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = MAX_SSPP_UPSCALE;
sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats;
sspp->id = SSPP_RGB0 + *rgb_count;
sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
sspp->type = SSPP_TYPE_RGB;
@@ -799,6 +801,7 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
set_bit(SDE_SSPP_CURSOR, &sspp->features);
sblk->maxupscale = SSPP_UNITY_SCALE;
sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = cursor_formats;
sspp->id = SSPP_CURSOR0 + *cursor_count;
sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
sspp->type = SSPP_TYPE_CURSOR;
@@ -812,6 +815,7 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
{
sblk->maxupscale = SSPP_UNITY_SCALE;
sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = plane_formats;
sspp->id = SSPP_DMA0 + *dma_count;
sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
sspp->type = SSPP_TYPE_DMA;
@@ -1291,6 +1295,7 @@ static int sde_wb_parse_dt(struct device_node *np,
wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
wb->vbif_idx = VBIF_NRT;
wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
+ wb->format_list = wb2_formats;
if (!prop_exists[WB_LEN])
wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 031493aa42b8..a84d65195363 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -328,24 +328,12 @@ static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
-
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
-
return sde_crtc_vblank(crtc, true);
}
static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
{
- struct sde_kms *sde_kms = to_sde_kms(kms);
- struct drm_device *dev = sde_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
-
sde_crtc_vblank(crtc, false);
-
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
}
static void sde_kms_prepare_commit(struct msm_kms *kms,
@@ -355,6 +343,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms,
struct drm_device *dev = sde_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
+ if (sde_kms->splash_info.handoff)
+ sde_splash_clean_up_exit_lk(kms);
+
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
}
@@ -997,8 +988,15 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
sde_hw_catalog_deinit(sde_kms->catalog);
sde_kms->catalog = NULL;
+ if (sde_kms->splash_info.handoff) {
+ if (sde_kms->core_client)
+ sde_splash_destroy(&sde_kms->splash_info,
+ &priv->phandle, sde_kms->core_client);
+ }
+
if (sde_kms->core_client)
- sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
+ sde_power_client_destroy(&priv->phandle,
+ sde_kms->core_client);
sde_kms->core_client = NULL;
if (sde_kms->vbif[VBIF_NRT])
@@ -1110,6 +1108,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
+ /* Attaching smmu means IOMMU HW starts to work immediately.
+ * However, display HW in LK is still accessing memory
+ * while the memory map is not done yet.
+ * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
+ * stage 1 translation in IOMMU HW.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP,
+ &sde_kms->splash_info.handoff);
+ if (ret) {
+ SDE_ERROR("failed to set map att: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ }
+
aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
mmu, "sde");
if (IS_ERR(aspace)) {
@@ -1127,6 +1143,19 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
goto fail;
}
+ /*
+ * It's safe now to map the physical memory blcok LK accesses.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = sde_splash_smmu_map(sde_kms->dev, mmu,
+ &sde_kms->splash_info);
+ if (ret) {
+ SDE_ERROR("map rsv mem failed: %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+ }
}
return 0;
@@ -1141,6 +1170,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
struct sde_kms *sde_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
+ struct sde_splash_info *sinfo;
int i, rc = -EINVAL;
if (!kms) {
@@ -1230,6 +1260,35 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
+ /*
+ * Read the DISP_INTF_SEL register to check
+ * whether early display is enabled in LK.
+ */
+ rc = sde_splash_get_handoff_status(kms);
+ if (rc) {
+ SDE_ERROR("get early splash status failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * when LK has enabled early display, sde_splash_parse_dt and
+ * sde_splash_init must be called. The first function is to parse the
+ * mandatory memory node for splash function, and the second function
+ * will first do bandwidth voting job, because display hardware is now
+ * accessing AHB data bus, otherwise device reboot will happen, and then
+ * to check if the memory is reserved.
+ */
+ sinfo = &sde_kms->splash_info;
+ if (sinfo->handoff) {
+ rc = sde_splash_parse_dt(dev);
+ if (rc) {
+ SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ goto power_error;
+ }
+
+ sde_splash_init(&priv->phandle, kms);
+ }
+
for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
u32 vbif_idx = sde_kms->catalog->vbif[i].id;
@@ -1304,7 +1363,10 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ if (!sde_kms->splash_info.handoff)
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+
return 0;
drm_obj_init_err:
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 44f6be959ac9..d929e48a3fe8 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -34,6 +34,7 @@
#include "sde_power_handle.h"
#include "sde_irq.h"
#include "sde_core_perf.h"
+#include "sde_splash.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -157,6 +158,9 @@ struct sde_kms {
bool has_danger_ctrl;
void **hdmi_displays;
int hdmi_display_count;
+
+ /* splash handoff structure */
+ struct sde_splash_info splash_info;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6fe1d1629d22..6e2ccfa8e428 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -34,6 +34,14 @@
#include "sde_plane.h"
#include "sde_color_processing.h"
+static bool suspend_blank = true;
+module_param(suspend_blank, bool, 0400);
+MODULE_PARM_DESC(suspend_blank,
+ "If set, active planes will force their outputs to black,\n"
+ "by temporarily enabling the color fill, when recovering\n"
+ "from a system resume instead of attempting to display the\n"
+ "last provided frame buffer.");
+
#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
(pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
@@ -138,6 +146,7 @@ struct sde_plane {
struct sde_debugfs_regset32 debugfs_src;
struct sde_debugfs_regset32 debugfs_scaler;
struct sde_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
};
#define to_sde_plane(x) container_of(x, struct sde_plane, base)
@@ -686,9 +695,20 @@ static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
struct sde_plane_state *pstate)
{
struct sde_plane *psde = pp->sde_plane;
- struct sde_hw_scaler3_cfg *cfg = pp->scaler3_cfg;
+ struct sde_hw_scaler3_cfg *cfg;
int ret = 0;
+ if (!pp || !pp->scaler3_cfg) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ } else if (!pstate) {
+ /* pstate is expected to be null on forced color fill */
+ SDE_DEBUG("null pstate\n");
+ return -EINVAL;
+ }
+
+ cfg = pp->scaler3_cfg;
+
cfg->dir_lut = msm_property_get_blob(
&psde->property_info,
pstate->property_blobs, &cfg->dir_len,
@@ -723,6 +743,7 @@ static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp,
}
memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pp->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
decimated = DECIMATED_DIMENSION(src_w,
pp->pipe_cfg.horz_decimation);
@@ -1070,7 +1091,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
int error;
error = _sde_plane_setup_scaler3_lut(pp, pstate);
- if (error || !pp->pixel_ext_usr) {
+ if (error || !pp->pixel_ext_usr ||
+ psde->debugfs_default_scale) {
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
/* calculate default config for QSEED3 */
_sde_plane_setup_scaler3(pp,
@@ -1081,7 +1103,8 @@ static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
pp->scaler3_cfg, fmt,
chroma_subsmpl_h, chroma_subsmpl_v);
}
- } else if (!pp->pixel_ext_usr) {
+ } else if (!pp->pixel_ext_usr || !pstate ||
+ psde->debugfs_default_scale) {
uint32_t deci_dim, i;
/* calculate default configuration for QSEED2 */
@@ -1701,8 +1724,8 @@ void sde_plane_flush(struct drm_plane *plane)
*/
list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
if (psde->is_error)
- /* force white frame with 0% alpha pipe output on error */
- _sde_plane_color_fill(pp, 0xFFFFFF, 0x0);
+ /* force white frame with 100% alpha pipe output on error */
+ _sde_plane_color_fill(pp, 0xFFFFFF, 0xFF);
else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_sde_plane_color_fill(pp, pp->color_fill, 0xFF);
@@ -1711,6 +1734,10 @@ void sde_plane_flush(struct drm_plane *plane)
pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr);
}
+ /* force black color fill during suspend */
+ if (msm_is_suspend_state(plane->dev) && suspend_blank)
+ _sde_plane_color_fill(pp, 0x0, 0x0);
+
/* flag h/w flush complete */
if (plane->state)
to_sde_plane_state(plane->state)->pending = false;
@@ -2582,6 +2609,10 @@ static void _sde_plane_init_debugfs(struct sde_plane *psde,
sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
psde->debugfs_root,
&psde->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0644,
+ psde->debugfs_root,
+ &psde->debugfs_default_scale);
sde_debugfs_setup_regset32(&psde->debugfs_csc,
sblk->csc_blk.base + cfg->base,
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
new file mode 100644
index 000000000000..79989f3ac1e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_catalog.h"
+
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define DISP_INTF_SEL 0x004
+#define SPLIT_DISPLAY_EN 0x2F4
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0 0x014
+#define SCRATCH_REGISTER_1 0x018
+#define SCRATCH_REGISTER_2 0x01C
+
+#define SDE_LK_RUNNING_VALUE 0xC001CAFE
+#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD
+#define SDE_LK_EXIT_VALUE 0xDEADBEEF
+
+#define SDE_LK_EXIT_MAX_LOOP 20
+/*
+ * In order to free reseved memory from bootup, and we are not
+ * able to call the __init free functions, so we need to free
+ * this memory by ourselves using the free_reserved_page() function.
+ */
+static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys,
+ size_t size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+
+ memblock_free(phys, size);
+
+ pfn_start = phys >> PAGE_SHIFT;
+ pfn_end = (phys + size) >> PAGE_SHIFT;
+
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int _sde_splash_parse_dt_get_lk_pool_node(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ struct device_node *parent, *node;
+ struct resource r;
+ int ret = 0;
+
+ if (!sinfo)
+ return -EINVAL;
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent)
+ return -EINVAL;
+
+ node = of_find_node_by_name(parent, "lk_pool");
+ if (!node) {
+ SDE_ERROR("mem reservation for lk_pool is not presented\n");
+ ret = -EINVAL;
+ goto parent_node_err;
+ }
+
+ /* find the mode */
+ if (of_address_to_resource(node, 0, &r)) {
+ ret = -EINVAL;
+ goto child_node_err;
+ }
+
+ sinfo->lk_pool_paddr = (dma_addr_t)r.start;
+ sinfo->lk_pool_size = r.end - r.start;
+
+ DRM_INFO("lk_pool: addr:%pK, size:%pK\n",
+ (void *)sinfo->lk_pool_paddr,
+ (void *)sinfo->lk_pool_size);
+
+child_node_err:
+ of_node_put(node);
+
+parent_node_err:
+ of_node_put(parent);
+
+ return ret;
+}
+
+static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ unsigned long size = 0;
+ dma_addr_t start;
+ struct device_node *node;
+ int ret = 0, i = 0, len = 0;
+
+ /* get reserved memory for display module */
+ if (of_get_property(dev->dev->of_node, "contiguous-region", &len))
+ sinfo->splash_mem_num = len / sizeof(u32);
+ else
+ sinfo->splash_mem_num = 0;
+
+ sinfo->splash_mem_paddr =
+ kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_paddr) {
+ SDE_ERROR("alloc splash_mem_paddr failed\n");
+ return -ENOMEM;
+ }
+
+ sinfo->splash_mem_size =
+ kmalloc(sizeof(size_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_size) {
+ SDE_ERROR("alloc splash_mem_size failed\n");
+ goto error;
+ }
+
+ sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) *
+ sinfo->splash_mem_num, GFP_KERNEL);
+ if (!sinfo->obj) {
+ SDE_ERROR("construct splash gem objects failed\n");
+ goto error;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ node = of_parse_phandle(dev->dev->of_node,
+ "contiguous-region", i);
+
+ if (node) {
+ struct resource r;
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ size = r.end - r.start;
+ start = (dma_addr_t)r.start;
+
+ sinfo->splash_mem_paddr[i] = start;
+ sinfo->splash_mem_size[i] = size;
+
+ DRM_INFO("blk: %d, addr:%pK, size:%pK\n",
+ i, (void *)sinfo->splash_mem_paddr[i],
+ (void *)sinfo->splash_mem_size[i]);
+
+ of_node_put(node);
+ }
+ }
+
+ return ret;
+
+error:
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+
+ return -ENOMEM;
+}
+
+static bool _sde_splash_lk_check(struct sde_hw_intr *intr)
+{
+ return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw,
+ SCRATCH_REGISTER_1)) ? true : false;
+}
+
+/**
+ * _sde_splash_notify_lk_to_exit.
+ *
+ * Function to monitor LK's status and tell it to exit.
+ */
+static void _sde_splash_notify_lk_exit(struct sde_hw_intr *intr)
+{
+ int i = 0;
+
+ /* first is to write exit signal to scratch register*/
+ SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE);
+
+ while ((SDE_LK_EXIT_VALUE !=
+ SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) &&
+ (++i < SDE_LK_EXIT_MAX_LOOP)) {
+ DRM_INFO("wait for LK's exit");
+ msleep(20);
+ }
+
+ if (i == SDE_LK_EXIT_MAX_LOOP)
+ SDE_ERROR("Loop LK's exit failed\n");
+}
+
+static int _sde_splash_gem_new(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ int i, ret;
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ sinfo->obj[i] = msm_gem_new(dev,
+ sinfo->splash_mem_size[i], MSM_BO_UNCACHED);
+
+ if (IS_ERR(sinfo->obj[i])) {
+ ret = PTR_ERR(sinfo->obj[i]);
+ SDE_ERROR("failed to allocate gem, ret=%d\n", ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (sinfo->obj[i])
+ msm_gem_free_object(sinfo->obj[i]);
+ sinfo->obj[i] = NULL;
+ }
+
+ return ret;
+}
+
+static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
+ dma_addr_t paddr;
+ int npages = obj->size >> PAGE_SHIFT;
+ int i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return -ENOMEM;
+
+ paddr = phys;
+
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ SDE_ERROR("failed to allocate sgt\n");
+ return -ENOMEM;
+ }
+
+ msm_obj->pages = p;
+
+ return 0;
+}
+
+static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj)
+{
+ if (msm_obj->pages) {
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ drm_free_large(msm_obj->pages);
+ msm_obj->pages = NULL;
+ }
+}
+
+static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
+{
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+}
+
+static int _sde_splash_free_resource(struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo, enum splash_connector_type conn)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(sinfo->obj[conn]);
+
+ if (!msm_obj)
+ return -EINVAL;
+
+ if (mmu->funcs && mmu->funcs->unmap)
+ mmu->funcs->unmap(mmu, sinfo->splash_mem_paddr[conn],
+ msm_obj->sgt, NULL);
+
+ _sde_splash_free_bootup_memory_to_system(sinfo->splash_mem_paddr[conn],
+ sinfo->splash_mem_size[conn]);
+
+ _sde_splash_destroy_gem_object(msm_obj);
+
+ return 0;
+}
+
+__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int i = 0;
+
+ if (!phandle || !kms) {
+ SDE_ERROR("invalid phandle/kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+
+ sinfo->dsi_connector_cnt = 0;
+ sinfo->hdmi_connector_cnt = 0;
+
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, true);
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
+ SDE_ERROR("failed to reserve memory\n");
+
+ /* withdraw the vote when failed. */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0;
+
+ if (!sinfo || !phandle || !pclient) {
+ SDE_ERROR("invalid sde_kms/phandle/pclient\n");
+ return;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (msm_obj)
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
+
+ sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false);
+
+ _sde_splash_destroy_splash_node(sinfo);
+}
+
+/*
+ * sde_splash_parse_dt.
+ * In the function, it will parse and reserve two kinds of memory node.
+ * First is to get the reserved memory for display buffers.
+ * Second is to get the memory node LK's code stack is running on.
+ */
+int sde_splash_parse_dt(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+
+ if (!priv || !priv->kms) {
+ SDE_ERROR("Invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) {
+ SDE_ERROR("get display node failed\n");
+ return -EINVAL;
+ }
+
+ if (_sde_splash_parse_dt_get_lk_pool_node(dev, sinfo)) {
+ SDE_ERROR("get LK pool node failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sde_splash_get_handoff_status(struct msm_kms *kms)
+{
+ uint32_t intf_sel = 0;
+ uint32_t split_display = 0;
+ uint32_t num_of_display_on = 0;
+ uint32_t i = 0;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_rm *rm;
+ struct sde_hw_blk_reg_map *c;
+ struct sde_splash_info *sinfo;
+ struct sde_mdss_cfg *catalog;
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rm = &sde_kms->rm;
+
+ if (!rm || !rm->hw_mdp) {
+ SDE_ERROR("invalid rm.\n");
+ return -EINVAL;
+ }
+
+ c = &rm->hw_mdp->hw;
+ if (c) {
+ intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
+ split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN);
+ }
+
+ catalog = sde_kms->catalog;
+
+ if (intf_sel != 0) {
+ for (i = 0; i < catalog->intf_count; i++)
+ if ((intf_sel >> i*8) & 0x000000FF)
+ num_of_display_on++;
+
+ /*
+ * For split display enabled - DSI0, DSI1 interfaces are
+ * considered as single display. So decrement
+ * 'num_of_display_on' by 1
+ */
+ if (split_display)
+ num_of_display_on--;
+ }
+
+ if (num_of_display_on) {
+ sinfo->handoff = true;
+ sinfo->program_scratch_regs = true;
+ } else {
+ sinfo->handoff = false;
+ sinfo->program_scratch_regs = false;
+ }
+
+ sinfo->lk_is_exited = false;
+
+ return 0;
+}
+
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0, ret = 0;
+
+ if (!mmu || !sinfo)
+ return -EINVAL;
+
+ /* first is to construct drm_gem_objects for splash memory */
+ if (_sde_splash_gem_new(dev, sinfo))
+ return -ENOMEM;
+
+ /* second is to contruct sgt table for calling smmu map */
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (_sde_splash_get_pages(sinfo->obj[i],
+ sinfo->splash_mem_paddr[i]))
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (mmu->funcs && mmu->funcs->map) {
+ ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i],
+ msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC, NULL);
+
+ if (!ret) {
+ SDE_ERROR("Map blk %d @%pK failed.\n",
+ i, (void *)sinfo->splash_mem_paddr[i]);
+ return ret;
+ }
+ }
+ }
+
+ return ret ? 0 : -ENOMEM;
+}
+
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type)
+{
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ sinfo->hdmi_connector_cnt++;
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ sinfo->dsi_connector_cnt++;
+ break;
+ default:
+ SDE_ERROR("invalid connector_type %d\n", connector_type);
+ }
+}
+
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle, int connector_type)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ struct msm_mmu *mmu;
+ int ret = 0;
+ static bool hdmi_is_released;
+ static bool dsi_is_released;
+
+ if (!phandle || !kms) {
+ SDE_ERROR("invalid phandle/kms.\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* When both hdmi's and dsi's resource are freed,
+ * 1. Destroy splash node objects.
+ * 2. Release the memory which LK's stack is running on.
+ * 3. Withdraw AHB data bus bandwidth voting.
+ */
+ if (sinfo->hdmi_connector_cnt == 0 &&
+ sinfo->dsi_connector_cnt == 0) {
+ DRM_INFO("HDMI and DSI resource handoff is completed\n");
+
+ sinfo->lk_is_exited = false;
+
+ _sde_splash_destroy_splash_node(sinfo);
+
+ _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr,
+ sinfo->lk_pool_size);
+
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ return 0;
+ }
+
+ mmu = sde_kms->aspace[0]->mmu;
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (!hdmi_is_released)
+ sinfo->hdmi_connector_cnt--;
+
+ if ((sinfo->hdmi_connector_cnt == 0) && (!hdmi_is_released)) {
+ hdmi_is_released = true;
+
+ ret = _sde_splash_free_resource(mmu,
+ sinfo, SPLASH_HDMI);
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (!dsi_is_released)
+ sinfo->dsi_connector_cnt--;
+
+ if ((sinfo->dsi_connector_cnt == 0) && (!dsi_is_released)) {
+ dsi_is_released = true;
+
+ ret = _sde_splash_free_resource(mmu,
+ sinfo, SPLASH_DSI);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ SDE_ERROR("%s: invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+
+ return ret;
+}
+
+/*
+ * In below function, it will
+ * 1. Notify LK to exit and wait for exiting is done.
+ * 2. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms)
+{
+ struct sde_splash_info *sinfo;
+ struct msm_mmu *mmu;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ int ret;
+
+ sinfo = &sde_kms->splash_info;
+
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* Monitor LK's status and tell it to exit. */
+ if (sinfo->program_scratch_regs) {
+ if (_sde_splash_lk_check(sde_kms->hw_intr))
+ _sde_splash_notify_lk_exit(sde_kms->hw_intr);
+
+ sinfo->handoff = false;
+ sinfo->program_scratch_regs = false;
+ }
+
+ if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
+ /* We do not return fault value here, to ensure
+ * flag "lk_is_exited" is set.
+ */
+ SDE_ERROR("invalid mmu\n");
+ WARN_ON(1);
+ } else {
+ mmu = sde_kms->aspace[0]->mmu;
+ /* After LK has exited, set early domain map attribute
+ * to 1 to enable stage 1 translation in iommu driver.
+ */
+ if (mmu->funcs && mmu->funcs->set_property) {
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff);
+
+ if (ret)
+ SDE_ERROR("set_property failed\n");
+ }
+ }
+
+ sinfo->lk_is_exited = true;
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
new file mode 100644
index 000000000000..47965693f0b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SDE_SPLASH_H_
+#define SDE_SPLASH_H_
+
+#include "msm_kms.h"
+#include "msm_mmu.h"
+
+enum splash_connector_type {
+ SPLASH_DSI = 0,
+ SPLASH_HDMI,
+};
+
+struct sde_splash_info {
+ /* handoff flag */
+ bool handoff;
+
+ /* flag of display scratch registers */
+ bool program_scratch_regs;
+
+ /* to indicate LK is totally exited */
+ bool lk_is_exited;
+
+ /* memory node used for display buffer */
+ uint32_t splash_mem_num;
+
+ /* physical address of memory node for display buffer */
+ phys_addr_t *splash_mem_paddr;
+
+ /* size of memory node */
+ size_t *splash_mem_size;
+
+ /* constructed gem objects for smmu mapping */
+ struct drm_gem_object **obj;
+
+ /* physical address of lk pool */
+ phys_addr_t lk_pool_paddr;
+
+ /* memory size of lk pool */
+ size_t lk_pool_size;
+
+ /* registered hdmi connector count */
+ uint32_t hdmi_connector_cnt;
+
+ /* registered dst connector count */
+ uint32_t dsi_connector_cnt;
+};
+
+/* APIs for early splash handoff functions */
+
+/**
+ * sde_splash_get_handoff_status.
+ *
+ * This function will read DISP_INTF_SEL regsiter to get
+ * the status of early splash.
+ */
+int sde_splash_get_handoff_status(struct msm_kms *kms);
+
+/**
+ * sde_splash_init
+ *
+ * This function will do bandwidth vote and reserved memory
+ */
+int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms);
+
+/**
+ *sde_splash_setup_connector_count
+ *
+ * To count connector numbers for DSI and HDMI respectively.
+ */
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type);
+
+/**
+ * sde_splash_clean_up_exit_lk.
+ *
+ * Tell LK to exit, and clean up the resource.
+ */
+int sde_splash_clean_up_exit_lk(struct msm_kms *kms);
+
+/**
+ * sde_splash_clean_up_free_resource.
+ *
+ * According to input connector_type, free
+ * HDMI's and DSI's resource respectively.
+ */
+int sde_splash_clean_up_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle, int connector_type);
+
+/**
+ * sde_splash_parse_dt.
+ *
+ * Parse reserved memory block from DT for early splash.
+ */
+int sde_splash_parse_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_smmu_map.
+ *
+ * Map the physical memory LK visited into iommu driver.
+ */
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo);
+
+/**
+ * sde_splash_destroy
+ *
+ * Destroy the resource in failed case.
+ */
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient);
+
+#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6521ec01413e..afa71116c691 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1309,6 +1309,10 @@ static int _adreno_start(struct adreno_device *adreno_dev)
/* make sure ADRENO_DEVICE_STARTED is not set here */
BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
+ /* disallow l2pc during wake up to improve GPU wake up time */
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_WAKEUP_TIMEOUT);
+
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
pmqos_wakeup_vote);
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 0e56731b16e2..883a9810fbf4 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -537,13 +537,42 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED,
"smmu_info");
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ kgsl_free_global(device, &iommu->smmu_info);
+}
+
#else
static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
{
return -ENODEV;
}
+
+static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev)
+{
+}
#endif
+static void a5xx_preemption_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ del_timer(&preempt->timer);
+ kgsl_free_global(device, &preempt->counters);
+ a5xx_preemption_iommu_close(adreno_dev);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_free_global(device, &rb->preemption_desc);
+ }
+}
+
int a5xx_preemption_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -568,7 +597,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0,
"preemption_counters");
if (ret)
- return ret;
+ goto err;
addr = preempt->counters.gpuaddr;
@@ -576,10 +605,16 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev)
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
if (ret)
- return ret;
+ goto err;
addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
}
- return a5xx_preemption_iommu_init(adreno_dev);
+ ret = a5xx_preemption_iommu_init(adreno_dev);
+
+err:
+ if (ret)
+ a5xx_preemption_close(device);
+
+ return ret;
}
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 8902c3175c79..1a94e71f5c1d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1460,7 +1460,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
spin_unlock(&drawctxt->lock);
- kgsl_pwrctrl_update_l2pc(&adreno_dev->dev);
+ if (device->pwrctrl.l2pc_update_queue)
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_QUEUE_TIMEOUT);
/* Add the context to the dispatcher pending list */
dispatcher_queue_context(adreno_dev, drawctxt);
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index f8f0e7ccb0d3..8dc2ebd26cf9 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -348,7 +348,13 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
struct kgsl_cmd_syncpoint_fence *sync = priv;
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
+ struct sync_fence *fence = NULL;
unsigned int id;
+ int ret = 0;
+
+ fence = sync_fence_fdget(sync->fd);
+ if (fence == NULL)
+ return -EINVAL;
kref_get(&drawobj->refcount);
@@ -364,11 +370,13 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
set_bit(event->id, &syncobj->pending);
+ trace_syncpoint_fence(syncobj, fence->name);
+
event->handle = kgsl_sync_fence_async_wait(sync->fd,
drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
- int ret = PTR_ERR(event->handle);
+ ret = PTR_ERR(event->handle);
clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
@@ -376,18 +384,16 @@ static int drawobj_add_sync_fence(struct kgsl_device *device,
drawobj_put(drawobj);
/*
- * If ret == 0 the fence was already signaled - print a trace
- * message so we can track that
+ * Print a syncpoint_fence_expire trace if
+ * the fence is already signaled or there is
+ * a failure in registering the fence waiter.
*/
- if (ret == 0)
- trace_syncpoint_fence_expire(syncobj, "signaled");
-
- return ret;
+ trace_syncpoint_fence_expire(syncobj, (ret < 0) ?
+ "error" : fence->name);
}
- trace_syncpoint_fence(syncobj, event->handle->name);
-
- return 0;
+ sync_fence_put(fence);
+ return ret;
}
/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index c31a85b07447..685ce3ea968b 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order)
/* Map the page into kernel and zero it out */
static void
-_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
+_kgsl_pool_zero_page(struct page *p)
{
- int i;
-
- for (i = 0; i < (1 << pool_order); i++) {
- struct page *page = nth_page(p, i);
- void *addr = kmap_atomic(page);
+ void *addr = kmap_atomic(p);
- memset(addr, 0, PAGE_SIZE);
- dmac_flush_range(addr, addr + PAGE_SIZE);
- kunmap_atomic(addr);
- }
+ memset(addr, 0, PAGE_SIZE);
+ dmac_flush_range(addr, addr + PAGE_SIZE);
+ kunmap_atomic(addr);
}
/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
- _kgsl_pool_zero_page(p, pool->pool_order);
-
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
@@ -329,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
- _kgsl_pool_zero_page(page, order);
goto done;
}
@@ -349,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
page = alloc_pages(gfp_mask, order);
if (page == NULL)
return -ENOMEM;
- _kgsl_pool_zero_page(page, order);
goto done;
}
}
@@ -379,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
-
- _kgsl_pool_zero_page(page, order);
}
done:
for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
+ _kgsl_pool_zero_page(p);
pages[pcount] = p;
pcount++;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ad8b0131bb46..8c998a5d791b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -43,13 +43,6 @@
#define DEFAULT_BUS_P 25
-/*
- * The effective duration of qos request in usecs. After
- * timeout, qos request is cancelled automatically.
- * Kept 80ms default, inline with default GPU idle time.
- */
-#define KGSL_L2PC_CPU_TIMEOUT (80 * 1000)
-
/* Order deeply matters here because reasons. New entries go on the end */
static const char * const clocks[] = {
"src_clk",
@@ -520,12 +513,14 @@ EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint);
/**
* kgsl_pwrctrl_update_l2pc() - Update existing qos request
* @device: Pointer to the kgsl_device struct
+ * @timeout_us: the effective duration of qos request in usecs.
*
* Updates an existing qos request to avoid L2PC on the
* CPUs (which are selected through dtsi) on which GPU
* thread is running. This would help for performance.
*/
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us)
{
int cpu;
@@ -539,7 +534,7 @@ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
pm_qos_update_request_timeout(
&device->pwrctrl.l2pc_cpus_qos,
device->pwrctrl.pm_qos_cpu_mask_latency,
- KGSL_L2PC_CPU_TIMEOUT);
+ timeout_us);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc);
@@ -2201,6 +2196,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask",
&pwr->l2pc_cpus_mask);
+ pwr->l2pc_update_queue = of_property_read_bool(
+ device->pdev->dev.of_node,
+ "qcom,l2pc-update-queue");
+
pm_runtime_enable(&pdev->dev);
ocmem_bus_node = of_find_node_by_name(
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 42f918b80fcd..5c0071544f60 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -51,6 +51,19 @@
#define KGSL_PWR_DEL_LIMIT 1
#define KGSL_PWR_SET_LIMIT 2
+/*
+ * The effective duration of qos request in usecs at queue time.
+ * After timeout, qos request is cancelled automatically.
+ * Kept 80ms default, inline with default GPU idle time.
+ */
+#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000)
+
+/*
+ * The effective duration of qos request in usecs at wakeup time.
+ * After timeout, qos request is cancelled automatically.
+ */
+#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000)
+
enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER,
};
@@ -128,6 +141,7 @@ struct kgsl_regulator {
* @irq_name - resource name for the IRQ
* @clk_stats - structure of clock statistics
* @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs
+ * @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time
* @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs
* @pm_qos_req_dma - the power management quality of service structure
* @pm_qos_active_latency - allowed CPU latency in microseconds when active
@@ -183,6 +197,7 @@ struct kgsl_pwrctrl {
const char *irq_name;
struct kgsl_clk_stats clk_stats;
unsigned int l2pc_cpus_mask;
+ bool l2pc_update_queue;
struct pm_qos_request l2pc_cpus_qos;
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_active_latency;
@@ -249,5 +264,6 @@ int kgsl_active_count_wait(struct kgsl_device *device, int count);
void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy);
void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
struct kgsl_pwr_constraint *pwrc, uint32_t id);
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device);
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us);
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c
index 56f2732334db..30da797a85dc 100644
--- a/drivers/input/misc/hbtp_input.c
+++ b/drivers/input/misc/hbtp_input.c
@@ -249,6 +249,10 @@ static int hbtp_input_release(struct inode *inode, struct file *file)
return -ENOTTY;
}
hbtp->count--;
+ if (!completion_done(&hbtp->power_suspend_sig))
+ complete(&hbtp->power_suspend_sig);
+ if (!completion_done(&hbtp->power_resume_sig))
+ complete(&hbtp->power_resume_sig);
if (hbtp->power_sig_enabled)
hbtp->power_sig_enabled = false;
mutex_unlock(&hbtp->mutex);
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 78bdd24af28b..08bfb83a9447 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -1003,7 +1003,10 @@ static unsigned char *fts_status_event_handler(
case FTS_WATER_MODE_ON:
case FTS_WATER_MODE_OFF:
default:
- logError(1, "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", tag, __func__, event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]);
+ logError(0,
+ "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ tag, __func__, event[0], event[1], event[2],
+ event[3], event[4], event[5], event[6], event[7]);
break;
}
@@ -1755,8 +1758,6 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va
info->resume_bit = 1;
- fts_system_reset();
-
fts_mode_handler(info, 0);
info->sensor_sleep = false;
@@ -1959,9 +1960,9 @@ static int parse_dt(struct device *dev, struct fts_i2c_platform_data *bdata)
bdata->bus_reg_name = name;
logError(0, "%s bus_reg_name = %s\n", tag, name);
- if (of_property_read_bool(np, "st, reset-gpio")) {
+ if (of_property_read_bool(np, "st,reset-gpio")) {
bdata->reset_gpio = of_get_named_gpio_flags(np,
- "st, reset-gpio", 0, NULL);
+ "st,reset-gpio", 0, NULL);
logError(0, "%s reset_gpio =%d\n", tag, bdata->reset_gpio);
} else {
bdata->reset_gpio = GPIO_NOT_DEFINED;
@@ -2210,7 +2211,13 @@ static int fts_probe(struct i2c_client *client,
}
#endif
- queue_delayed_work(info->fwu_workqueue, &info->fwu_work, msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ /*if wanna auto-update FW when probe,
+ * please don't comment the following code
+ */
+
+ /* queue_delayed_work(info->fwu_workqueue, &info->fwu_work,
+ * msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ */
logError(1, "%s Probe Finished!\n", tag);
return OK;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c6f74b149706..ce18a512b76a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1727,7 +1727,8 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!arm_smmu_is_static_cb(smmu))
return 0;
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
index 420083f019cf..4024748e6afa 100644
--- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
@@ -1053,14 +1053,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
a->value = ctx->format.size->work_size;
break;
case V4L2_CID_FD_WORK_MEMORY_FD:
+ mutex_lock(&ctx->fd_device->recovery_lock);
if (ctx->work_buf.fd != -1)
msm_fd_hw_unmap_buffer(&ctx->work_buf);
if (a->value >= 0) {
ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
a->value, &ctx->work_buf);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&ctx->fd_device->recovery_lock);
return ret;
+ }
}
+ mutex_unlock(&ctx->fd_device->recovery_lock);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h
index 72a76d178aa8..86974eeb4a32 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.h
@@ -355,6 +355,7 @@ struct msm_vfe_hardware_info {
uint32_t dmi_reg_offset;
uint32_t min_ab;
uint32_t min_ib;
+ uint32_t regulator_num;
const char *regulator_names[];
};
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
index d63282f80aca..d33dc758aef9 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev)
int rc = 0;
int i;
- vfe_dev->vfe_num_regulators =
- sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *);
+ vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num;
vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) *
vfe_dev->vfe_num_regulators, GFP_KERNEL);
@@ -2811,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
.dmi_reg_offset = 0xC2C,
.axi_hw_info = &msm_vfe47_axi_hw_info,
.stats_hw_info = &msm_vfe47_stats_hw_info,
+ .regulator_num = 3,
.regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"},
};
EXPORT_SYMBOL(vfe47_hw_info);
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
index 734b37172421..024677e1b755 100644
--- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
@@ -504,23 +504,46 @@ static int32_t msm_flash_init(
}
flash_ctrl->flash_state = MSM_CAMERA_FLASH_INIT;
-
CDBG("Exit");
return 0;
}
-#ifdef CONFIG_COMPAT
static int32_t msm_flash_init_prepare(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
{
+ #ifdef CONFIG_COMPAT
+ struct msm_flash_cfg_data_t flash_data_k;
+ struct msm_flash_init_info_t flash_init_info;
+ int32_t i = 0;
+
+ if (!is_compat_task()) {
+ /*for 64-bit usecase,it need copy the data to local memory*/
+ flash_data_k.cfg_type = flash_data->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data_k.flash_current[i] =
+ flash_data->flash_current[i];
+ flash_data_k.flash_duration[i] =
+ flash_data->flash_duration[i];
+ }
+
+ flash_data_k.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info,
+ (void __user *)(flash_data->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ return msm_flash_init(flash_ctrl, &flash_data_k);
+ }
+ /*
+ * for 32-bit usecase,it already copy the userspace
+ * data to local memory in msm_flash_subdev_do_ioctl()
+ * so here do not need copy from user
+ */
return msm_flash_init(flash_ctrl, flash_data);
-}
#else
-static int32_t msm_flash_init_prepare(
- struct msm_flash_ctrl_t *flash_ctrl,
- struct msm_flash_cfg_data_t *flash_data)
-{
struct msm_flash_cfg_data_t flash_data_k;
struct msm_flash_init_info_t flash_init_info;
int32_t i = 0;
@@ -535,15 +558,15 @@ static int32_t msm_flash_init_prepare(
flash_data_k.cfg.flash_init_info = &flash_init_info;
if (copy_from_user(&flash_init_info,
- (void *)(flash_data->cfg.flash_init_info),
+ (void __user *)(flash_data->cfg.flash_init_info),
sizeof(struct msm_flash_init_info_t))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
return -EFAULT;
}
return msm_flash_init(flash_ctrl, &flash_data_k);
-}
#endif
+}
static int32_t msm_flash_low(
struct msm_flash_ctrl_t *flash_ctrl,
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
index 236660dca3fb..aa7658f359ac 100644
--- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -781,6 +781,7 @@ static long msm_ois_subdev_do_ioctl(
u32 = (struct msm_ois_cfg_data32 *)arg;
parg = arg;
+
switch (cmd) {
case VIDIOC_MSM_OIS_CFG32:
cmd = VIDIOC_MSM_OIS_CFG;
@@ -818,7 +819,6 @@ static long msm_ois_subdev_do_ioctl(
settings.reg_setting =
compat_ptr(settings32.reg_setting);
- ois_data.cfgtype = u32->cfgtype;
ois_data.cfg.settings = &settings;
parg = &ois_data;
break;
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index a04d7ca73fe1..63c3595a3f85 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1057,14 +1057,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
a->value = ctx->format.size->work_size;
break;
case V4L2_CID_FD_WORK_MEMORY_FD:
+ mutex_lock(&ctx->fd_device->recovery_lock);
if (ctx->work_buf.fd != -1)
msm_fd_hw_unmap_buffer(&ctx->work_buf);
if (a->value >= 0) {
ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
a->value, &ctx->work_buf);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&ctx->fd_device->recovery_lock);
return ret;
+ }
}
+ mutex_unlock(&ctx->fd_device->recovery_lock);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 3f900ded090a..9c3bd7b41ce9 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -1025,6 +1025,9 @@ static void msm_ispif_config_stereo(struct ispif_device *ispif,
enum msm_ispif_vfe_intf vfe_intf;
uint32_t stereo_3d_threshold = STEREO_DEFAULT_3D_THRESHOLD;
+ if (params->num > MAX_PARAM_ENTRIES)
+ return;
+
for (i = 0; i < params->num; i++) {
vfe_intf = params->entries[i].vfe_intf;
if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 9cb7d5299ef8..4e5dc66d94a9 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -287,6 +287,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
return;
while (1) {
+ unsigned long wl_flags;
if (try_count > 5) {
pr_err("%s : not able to delete stream %d\n",
@@ -294,18 +295,20 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
break;
}
- write_lock(&session->stream_rwlock);
+ write_lock_irqsave(&session->stream_rwlock, wl_flags);
try_count++;
stream = msm_queue_find(&session->stream_q, struct msm_stream,
list, __msm_queue_find_stream, &stream_id);
if (!stream) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
return;
}
if (msm_vb2_get_stream_state(stream) != 1) {
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock,
+ wl_flags);
continue;
}
@@ -315,7 +318,7 @@ void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
kfree(stream);
stream = NULL;
spin_unlock_irqrestore(&(session->stream_q.lock), flags);
- write_unlock(&session->stream_rwlock);
+ write_unlock_irqrestore(&session->stream_rwlock, wl_flags);
break;
}
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
index 719b14226067..e271c7fcd1b6 100644
--- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -47,22 +47,23 @@ int msm_vb2_buf_init(struct vb2_buffer *vb)
struct msm_session *session;
struct msm_vb2_buffer *msm_vb2_buf;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned long rl_flags;
session = msm_get_session_from_vb2q(vb->vb2_queue);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s: Couldn't find stream\n", __func__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
msm_vb2_buf = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
msm_vb2_buf->in_freeq = 0;
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
@@ -71,7 +72,7 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
@@ -84,19 +85,19 @@ static void msm_vb2_buf_queue(struct vb2_buffer *vb)
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
spin_lock_irqsave(&stream->stream_lock, flags);
list_add_tail(&msm_vb2->list, &stream->queued_list);
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
static void msm_vb2_buf_finish(struct vb2_buffer *vb)
@@ -104,26 +105,26 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2_entry, *temp;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
msm_vb2 = container_of(vbuf, struct msm_vb2_buffer, vb2_v4l2_buf);
if (!msm_vb2) {
pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
- return;
+ return;
}
session = msm_get_session_from_vb2q(vb->vb2_queue);
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(vb->vb2_queue);
if (!stream) {
pr_err("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -136,7 +137,7 @@ static void msm_vb2_buf_finish(struct vb2_buffer *vb)
}
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -145,19 +146,19 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
struct msm_vb2_buffer *msm_vb2, *temp;
struct msm_stream *stream;
struct msm_session *session;
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct vb2_v4l2_buffer *vb2_v4l2_buf;
session = msm_get_session_from_vb2q(q);
if (IS_ERR_OR_NULL(session))
return;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream_from_vb2q(q);
if (!stream) {
pr_err_ratelimited("%s:%d] NULL stream", __func__, __LINE__);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return;
}
@@ -177,7 +178,7 @@ static void msm_vb2_stop_stream(struct vb2_queue *q)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
}
int msm_vb2_get_stream_state(struct msm_stream *stream)
@@ -255,17 +256,17 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -291,7 +292,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -302,18 +303,18 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return NULL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return NULL;
}
@@ -337,7 +338,7 @@ static struct vb2_v4l2_buffer *msm_vb2_get_buf_by_idx(int session_id,
vb2_v4l2_buf = NULL;
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return vb2_v4l2_buf;
}
@@ -349,17 +350,17 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
struct msm_vb2_buffer *msm_vb2;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
int rc = 0;
- unsigned long flags;
+ unsigned long flags, rl_flags;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -374,7 +375,8 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n",
vb, session_id, stream_id);
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 =
@@ -391,7 +393,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -399,7 +401,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
unsigned int stream_id, uint32_t sequence,
struct timeval *ts, uint32_t reserved)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
@@ -410,11 +412,11 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -429,7 +431,8 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n",
session_id, stream_id, vb);
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock,
+ rl_flags);
return -EINVAL;
}
msm_vb2 =
@@ -450,7 +453,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id,
rc = -EINVAL;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
@@ -461,18 +464,18 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
struct msm_session *session;
struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL;
struct msm_vb2_buffer *msm_vb2 = NULL;
- unsigned long flags;
+ unsigned long flags, rl_flags;
long rc = -EINVAL;
session = msm_get_session(session_id);
if (IS_ERR_OR_NULL(session))
return rc;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -501,14 +504,14 @@ long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id,
end:
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return rc;
}
EXPORT_SYMBOL(msm_vb2_return_buf_by_idx);
static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
{
- unsigned long flags;
+ unsigned long flags, rl_flags;
struct msm_vb2_buffer *msm_vb2;
struct msm_stream *stream;
struct msm_session *session;
@@ -518,11 +521,11 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
if (IS_ERR_OR_NULL(session))
return -EINVAL;
- read_lock(&session->stream_rwlock);
+ read_lock_irqsave(&session->stream_rwlock, rl_flags);
stream = msm_get_stream(session, stream_id);
if (IS_ERR_OR_NULL(stream)) {
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return -EINVAL;
}
@@ -534,7 +537,7 @@ static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
msm_vb2->in_freeq = 0;
}
spin_unlock_irqrestore(&stream->stream_lock, flags);
- read_unlock(&session->stream_rwlock);
+ read_unlock_irqrestore(&session->stream_rwlock, rl_flags);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
index 3f079fe2c173..457bd1730232 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -575,6 +575,8 @@ int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
ps[i].seq_val = SENSOR_GPIO_CUSTOM1;
else if (!strcmp(seq_name, "sensor_gpio_custom2"))
ps[i].seq_val = SENSOR_GPIO_CUSTOM2;
+ else if (!strcmp(seq_name, "sensor_gpio_custom3"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM3;
else
rc = -EILSEQ;
break;
@@ -1078,6 +1080,27 @@ int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
rc = 0;
}
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom3", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom3 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom3 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM3] = 1;
+ CDBG("%s qcom,gpio-custom3 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM3]);
+ } else {
+ rc = 0;
+ }
+
return rc;
ERROR:
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 66f3c9f609f2..cf897947fff2 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -8434,9 +8434,10 @@ out:
*/
static int qseecom_check_whitelist_feature(void)
{
- int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+ u64 version = 0;
+ int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version);
- return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+ return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0));
}
static int qseecom_probe(struct platform_device *pdev)
@@ -8530,7 +8531,11 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
pr_err("Ion client cannot be created\n");
- rc = -ENOMEM;
+
+ if (qseecom.ion_clnt != ERR_PTR(-EPROBE_DEFER))
+ rc = -ENOMEM;
+ else
+ rc = -EPROBE_DEFER;
goto exit_del_cdev;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 01e5502917f7..2a58061f3de6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -4623,6 +4623,10 @@ static int mmc_blk_probe(struct mmc_card *card)
dev_set_drvdata(&card->dev, md);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
if (mmc_add_disk(md))
goto out;
@@ -4666,6 +4670,9 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 6906bddb229f..10b33840e5e5 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1886,6 +1886,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_wmi_detach;
}
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
@@ -1997,12 +2003,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
}
}
- /* If firmware indicates Full Rx Reorder support it must be used in a
- * slightly different manner. Let HTT code know.
- */
- ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
- ar->wmi.svc_map));
-
status = ath10k_htt_rx_ring_refill(ar);
if (status) {
ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 9406b6f71a6b..75e81991913a 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -107,8 +107,8 @@ static struct ce_attr host_ce_config_wlan[] = {
{
.flags = CE_ATTR_FLAGS,
.src_nentries = 0,
- .src_sz_max = 512,
- .dest_nentries = 512,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
.recv_cb = ath10k_snoc_htt_rx_cb,
},
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 63bb7686b811..94861020af12 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -960,7 +960,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf,
len, true);
- if (len < sizeof(struct ieee80211_mgmt))
+ if (len < sizeof(struct ieee80211_hdr_3addr))
return -EINVAL;
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 22c59d8c3c45..c9412ca6e794 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -134,7 +134,6 @@ static DEFINE_SPINLOCK(pci_link_down_lock);
#define FW_IMAGE_MISSION (0x02)
#define FW_IMAGE_BDATA (0x03)
#define FW_IMAGE_PRINT (0x04)
-#define FW_SETUP_DELAY 2000
#define SEG_METADATA (0x01)
#define SEG_NON_PAGED (0x02)
@@ -274,10 +273,8 @@ static struct cnss_data {
u32 fw_dma_size;
u32 fw_seg_count;
struct segment_memory fw_seg_mem[MAX_NUM_OF_SEGMENTS];
- atomic_t fw_store_in_progress;
/* Firmware setup complete lock */
struct mutex fw_setup_stat_lock;
- struct completion fw_setup_complete;
void *bdata_cpu;
dma_addr_t bdata_dma;
u32 bdata_dma_size;
@@ -1374,15 +1371,6 @@ int cnss_get_fw_image(struct image_desc_info *image_desc_info)
!penv->fw_seg_count || !penv->bdata_seg_count)
return -EINVAL;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
image_desc_info->fw_addr = penv->fw_dma;
image_desc_info->fw_size = penv->fw_dma_size;
@@ -1627,7 +1615,9 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev,
goto err_pcie_suspend;
}
+ mutex_lock(&penv->fw_setup_stat_lock);
cnss_wlan_fw_mem_alloc(pdev);
+ mutex_unlock(&penv->fw_setup_stat_lock);
ret = device_create_file(&penv->pldev->dev, &dev_attr_wlan_setup);
@@ -1874,17 +1864,11 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (!penv)
return -ENODEV;
- if (atomic_read(&penv->fw_store_in_progress)) {
- pr_info("%s: Firmware setup in progress\n", __func__);
- return 0;
- }
-
- atomic_set(&penv->fw_store_in_progress, 1);
- init_completion(&penv->fw_setup_complete);
+ mutex_lock(&penv->fw_setup_stat_lock);
+ pr_info("%s: Firmware setup in progress\n", __func__);
if (kstrtoint(buf, 0, &val)) {
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
@@ -1895,8 +1879,7 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (ret != 0) {
pr_err("%s: Invalid parsing of FW image files %d",
__func__, ret);
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
penv->fw_image_setup = val;
@@ -1906,9 +1889,8 @@ static ssize_t fw_image_setup_store(struct device *dev,
penv->bmi_test = val;
}
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
-
+ pr_info("%s: Firmware setup completed\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return count;
}
@@ -2007,16 +1989,21 @@ int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg)
{
struct codeswap_codeseg_info *cnss_seg_info = penv->cnss_seg_info;
+ mutex_lock(&penv->fw_setup_stat_lock);
if (!cnss_seg_info) {
swap_seg = NULL;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
+
if (!atomic_read(&penv->fw_available)) {
pr_debug("%s: fw is not available\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
*swap_seg = *cnss_seg_info;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return 0;
}
@@ -2035,15 +2022,6 @@ static void cnss_wlan_memory_expansion(void)
u_int32_t total_length = 0;
struct pci_dev *pdev;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
filename = cnss_wlan_get_evicted_data_file();
pdev = penv->pdev;
@@ -2859,6 +2837,7 @@ static int cnss_probe(struct platform_device *pdev)
penv->vreg_info.wlan_reg = NULL;
penv->vreg_info.state = VREG_OFF;
penv->pci_register_again = false;
+ mutex_init(&penv->fw_setup_stat_lock);
ret = cnss_wlan_get_resources(pdev);
if (ret)
@@ -3016,8 +2995,6 @@ skip_ramdump:
memset(phys_to_virt(0), 0, SZ_4K);
#endif
- atomic_set(&penv->fw_store_in_progress, 0);
- mutex_init(&penv->fw_setup_stat_lock);
ret = device_create_file(dev, &dev_attr_fw_image_setup);
if (ret) {
pr_err("cnss: fw_image_setup sys file creation failed\n");
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 360ab31c61dd..c3bcb38f428f 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include "main.h"
#include "debug.h"
+#include "pci.h"
#define CNSS_IPC_LOG_PAGES 32
@@ -121,13 +122,90 @@ static int cnss_stats_open(struct inode *inode, struct file *file)
}
static const struct file_operations cnss_stats_fops = {
- .read = seq_read,
- .release = single_release,
- .open = cnss_stats_open,
- .owner = THIS_MODULE,
- .llseek = seq_lseek,
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_stats_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
};
+static ssize_t cnss_dev_boot_debug_write(struct file *fp,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct cnss_plat_data *plat_priv =
+ ((struct seq_file *)fp->private_data)->private;
+ char buf[64];
+ char *cmd;
+ unsigned int len = 0;
+ int ret = 0;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ cmd = buf;
+
+ if (sysfs_streq(cmd, "on")) {
+ ret = cnss_power_on_device(plat_priv);
+ } else if (sysfs_streq(cmd, "enumerate")) {
+ ret = cnss_pci_init(plat_priv);
+ } else if (sysfs_streq(cmd, "download")) {
+ ret = cnss_pci_start_mhi(plat_priv->bus_priv);
+ } else {
+ cnss_pr_err("Device boot debugfs command is invalid\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int cnss_dev_boot_debug_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/dev_boot\n");
+ seq_puts(s, "<action> can be one of below:\n");
+ seq_puts(s, "on: turn on device power, assert WLAN_EN\n");
+ seq_puts(s, "enumerate: de-assert PERST, enumerate PCIe\n");
+ seq_puts(s, "download: download FW and do QMI handshake with FW\n");
+
+ return 0;
+}
+
+static int cnss_dev_boot_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_dev_boot_debug_show, inode->i_private);
+}
+
+static const struct file_operations cnss_dev_boot_debug_fops = {
+ .read = seq_read,
+ .write = cnss_dev_boot_debug_write,
+ .release = single_release,
+ .open = cnss_dev_boot_debug_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+#ifdef CONFIG_CNSS2_DEBUG
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ struct dentry *root_dentry = plat_priv->root_dentry;
+
+ debugfs_create_file("dev_boot", 0600, root_dentry, plat_priv,
+ &cnss_dev_boot_debug_fops);
+
+ return 0;
+}
+#else
+static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv)
+{
+ return 0;
+}
+#endif
+
int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -139,11 +217,16 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
cnss_pr_err("Unable to create debugfs %d\n", ret);
goto out;
}
+
plat_priv->root_dentry = root_dentry;
+
debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
&cnss_pin_connect_fops);
debugfs_create_file("stats", 0644, root_dentry, plat_priv,
&cnss_stats_fops);
+
+ cnss_create_debug_only_node(plat_priv);
+
out:
return ret;
}
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index e114d0c51a07..8838a1319629 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
enum cnss_debug_quirks {
LINK_DOWN_SELF_RECOVERY,
+ SKIP_DEVICE_BOOT,
};
unsigned long quirks;
@@ -2232,13 +2233,15 @@ static int cnss_probe(struct platform_device *plat_dev)
if (ret)
goto reset_ctx;
- ret = cnss_power_on_device(plat_priv);
- if (ret)
- goto free_res;
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) {
+ ret = cnss_power_on_device(plat_priv);
+ if (ret)
+ goto free_res;
- ret = cnss_pci_init(plat_priv);
- if (ret)
- goto power_off;
+ ret = cnss_pci_init(plat_priv);
+ if (ret)
+ goto power_off;
+ }
ret = cnss_register_esoc(plat_priv);
if (ret)
@@ -2291,9 +2294,11 @@ unreg_bus_scale:
unreg_esoc:
cnss_unregister_esoc(plat_priv);
deinit_pci:
- cnss_pci_deinit(plat_priv);
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+ cnss_pci_deinit(plat_priv);
power_off:
- cnss_power_off_device(plat_priv);
+ if (!test_bit(SKIP_DEVICE_BOOT, &quirks))
+ cnss_power_off_device(plat_priv);
free_res:
cnss_put_resources(plat_priv);
reset_ctx:
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index edc39af2d361..ce5a7b2bc88e 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -787,36 +787,6 @@ int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
return 0;
}
-#ifdef CONFIG_CNSS_QCA6290
-#define PCI_MAX_BAR_SIZE 0xD00000
-
-static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = PCI_MAX_BAR_SIZE;
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len || !start)
- return NULL;
-
- if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
- if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
- return ioremap(start, len);
- else
- return ioremap_nocache(start, len);
- }
-
- return NULL;
-}
-#else
-static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
- unsigned long maxlen)
-{
- return pci_iomap(dev, bar, maxlen);
-}
-#endif
-
static struct cnss_msi_config msi_config = {
.total_vectors = 32,
.total_users = 4,
@@ -1003,7 +973,7 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
pci_set_master(pci_dev);
- pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+ pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
if (!pci_priv->bar) {
cnss_pr_err("Failed to do PCI IO map!\n");
ret = -EIO;
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 99163d51a497..d1c0423b4517 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -50,6 +50,30 @@ enum cnss_bdf_type {
CNSS_BDF_ELF,
};
+static char *cnss_qmi_mode_to_str(enum wlfw_driver_mode_enum_v01 mode)
+{
+ switch (mode) {
+ case QMI_WLFW_MISSION_V01:
+ return "MISSION";
+ case QMI_WLFW_FTM_V01:
+ return "FTM";
+ case QMI_WLFW_EPPING_V01:
+ return "EPPING";
+ case QMI_WLFW_WALTEST_V01:
+ return "WALTEST";
+ case QMI_WLFW_OFF_V01:
+ return "OFF";
+ case QMI_WLFW_CCPM_V01:
+ return "CCPM";
+ case QMI_WLFW_QVIT_V01:
+ return "QVIT";
+ case QMI_WLFW_CALIBRATION_V01:
+ return "CALIBRATION";
+ default:
+ return "UNKNOWN";
+ }
+};
+
static void cnss_wlfw_clnt_notifier_work(struct work_struct *work)
{
struct cnss_plat_data *plat_priv =
@@ -585,8 +609,8 @@ int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
if (!plat_priv)
return -ENODEV;
- cnss_pr_dbg("Sending mode message, state: 0x%lx, mode: %d\n",
- plat_priv->driver_state, mode);
+ cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
+ cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
if (mode == QMI_WLFW_OFF_V01 &&
test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
@@ -617,14 +641,15 @@ int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
cnss_pr_dbg("WLFW service is disconnected while sending mode off request.\n");
return 0;
}
- cnss_pr_err("Failed to send mode request, mode: %d, err = %d\n",
- mode, ret);
+ cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- cnss_pr_err("Mode request failed, mode: %d, result: %d err: %d\n",
- mode, resp.resp.result, resp.resp.error);
+ cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
+ cnss_qmi_mode_to_str(mode), mode, resp.resp.result,
+ resp.resp.error);
ret = resp.resp.result;
goto out;
}
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 82b90ad00f8b..d0a74744f70a 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -417,13 +417,18 @@ static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
if (regulators[i].state == VREG_NULL_CONFIG)
continue;
+ if (cfg->wcn_external_gpio_support) {
+ if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA)))
+ continue;
+ }
+
/* Remove PWM mode */
if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
- rc = regulator_set_optimum_mode(
- regulators[i].regulator, 0);
- if (rc < 0)
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ rc = regulator_set_load(regulators[i].regulator, 0);
+ if (rc < 0) {
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ }
}
/* Set voltage to lowest level */
@@ -478,7 +483,12 @@ static int wcnss_vregs_on(struct device *dev,
}
for (i = 0; i < size; i++) {
- /* Get regulator source */
+ if (cfg->wcn_external_gpio_support) {
+ if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA)))
+ continue;
+ }
+
+ /* Get regulator source */
regulators[i].regulator =
regulator_get(dev, regulators[i].name);
if (IS_ERR(regulators[i].regulator)) {
@@ -518,11 +528,11 @@ static int wcnss_vregs_on(struct device *dev,
/* Vote for PWM/PFM mode if needed */
if (voltage_level[i].uA_load && (reg_cnt > 0)) {
- rc = regulator_set_optimum_mode(regulators[i].regulator,
- voltage_level[i].uA_load);
+ rc = regulator_set_load(regulators[i].regulator,
+ voltage_level[i].uA_load);
if (rc < 0) {
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
goto fail;
}
regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 505a9e016777..e99d46ca51b0 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -36,6 +36,7 @@
#include <linux/qpnp/qpnp-adc.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_qos.h>
+#include <linux/bitops.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
@@ -56,6 +57,7 @@
#define WCNSS_PM_QOS_TIMEOUT 15000
#define IS_CAL_DATA_PRESENT 0
#define WAIT_FOR_CBC_IND 2
+#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET BIT(8)
/* module params */
#define WCNSS_CONFIG_UNSPECIFIED (-1)
@@ -119,6 +121,8 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define PRONTO_PMU_COM_CSR_OFFSET 0x1040
#define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C
+#define PRONTO_QFUSE_DUAL_BAND_OFFSET 0x0018
+
#define A2XB_CFG_OFFSET 0x00
#define A2XB_INT_SRC_OFFSET 0x0c
#define A2XB_TSTBUS_CTRL_OFFSET 0x14
@@ -381,6 +385,7 @@ static struct {
void __iomem *pronto_saw2_base;
void __iomem *pronto_pll_base;
void __iomem *pronto_mcu_base;
+ void __iomem *pronto_qfuse;
void __iomem *wlan_tx_status;
void __iomem *wlan_tx_phy_aborts;
void __iomem *wlan_brdg_err_source;
@@ -397,7 +402,7 @@ static struct {
int user_cal_read;
int user_cal_available;
u32 user_cal_rcvd;
- int user_cal_exp_size;
+ u32 user_cal_exp_size;
int iris_xo_mode_set;
int fw_vbatt_state;
char wlan_nv_macAddr[WLAN_MAC_ADDR_SIZE];
@@ -423,6 +428,9 @@ static struct {
int pc_disabled;
struct delayed_work wcnss_pm_qos_del_req;
struct mutex pm_qos_mutex;
+ struct clk *snoc_wcnss;
+ unsigned int snoc_wcnss_clock_freq;
+ bool is_dual_band_disabled;
} *penv = NULL;
static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
@@ -595,7 +603,31 @@ void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type)
}
}
-/* Log pronto debug registers before sending reset interrupt */
+int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
+{
+ u32 reg = 0;
+ struct resource *res;
+
+ res = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "pronto_qfuse");
+ if (!res)
+ return -EINVAL;
+
+ penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(penv->pronto_qfuse))
+ return -ENOMEM;
+
+ reg = readl_relaxed(penv->pronto_qfuse +
+ PRONTO_QFUSE_DUAL_BAND_OFFSET);
+ if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET)
+ penv->is_dual_band_disabled = true;
+ else
+ penv->is_dual_band_disabled = false;
+
+ return 0;
+}
+
+/* Log pronto debug registers during SSR Timeout CB */
void wcnss_pronto_log_debug_regs(void)
{
void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
@@ -1683,6 +1715,14 @@ int wcnss_wlan_iris_xo_mode(void)
}
EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode);
+int wcnss_wlan_dual_band_disabled(void)
+{
+ if (penv && penv->pdev)
+ return penv->is_dual_band_disabled;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled);
void wcnss_suspend_notify(void)
{
@@ -2717,23 +2757,23 @@ wcnss_trigger_config(struct platform_device *pdev)
int is_pronto_vadc;
int is_pronto_v3;
int pil_retry = 0;
- int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-pronto-hw");
-
- is_pronto_vadc = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-vadc");
+ struct wcnss_wlan_config *wlan_cfg = &penv->wlan_config;
+ struct device_node *node = (&pdev->dev)->of_node;
+ int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw");
- is_pronto_v3 = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-v3");
+ is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc");
+ is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3");
- penv->is_vsys_adc_channel = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-vsys-adc-channel");
+ penv->is_vsys_adc_channel =
+ of_property_read_bool(node, "qcom,has-vsys-adc-channel");
+ penv->is_a2xb_split_reg =
+ of_property_read_bool(node, "qcom,has-a2xb-split-reg");
- penv->is_a2xb_split_reg = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-a2xb-split-reg");
+ wlan_cfg->wcn_external_gpio_support =
+ of_property_read_bool(node, "qcom,wcn-external-gpio-support");
- if (of_property_read_u32(pdev->dev.of_node,
- "qcom,wlan-rx-buff-count", &penv->wlan_rx_buff_count)) {
+ if (of_property_read_u32(node, "qcom,wlan-rx-buff-count",
+ &penv->wlan_rx_buff_count)) {
penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT;
}
@@ -2794,15 +2834,18 @@ wcnss_trigger_config(struct platform_device *pdev)
goto fail;
}
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vddpa-current",
- "qcom,iris-vddpa-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
+ if (!wlan_cfg->wcn_external_gpio_support) {
+ index++;
+ ret = wcnss_dt_parse_vreg_level(
+ &pdev->dev, index,
+ "qcom,iris-vddpa-current",
+ "qcom,iris-vddpa-voltage-level",
+ penv->wlan_config.iris_vlevel);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "error reading voltage-level property\n");
+ goto fail;
+ }
}
index++;
@@ -2825,8 +2868,8 @@ wcnss_trigger_config(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (WCNSS_CONFIG_UNSPECIFIED == has_48mhz_xo) {
if (has_pronto_hw) {
- has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-48mhz-xo");
+ has_48mhz_xo =
+ of_property_read_bool(node, "qcom,has-48mhz-xo");
} else {
has_48mhz_xo = pdata->has_48mhz_xo;
}
@@ -2837,8 +2880,8 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->wlan_config.is_pronto_v3 = is_pronto_v3;
if (WCNSS_CONFIG_UNSPECIFIED == has_autodetect_xo && has_pronto_hw) {
- has_autodetect_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-autodetect-xo");
+ has_autodetect_xo =
+ of_property_read_bool(node, "qcom,has-autodetect-xo");
}
penv->thermal_mitigation = 0;
@@ -3118,6 +3161,16 @@ wcnss_trigger_config(struct platform_device *pdev)
__func__);
goto fail_ioremap2;
}
+
+ if (of_property_read_bool(node,
+ "qcom,is-dual-band-disabled")) {
+ ret = wcnss_get_dual_band_capability_info(pdev);
+ if (ret) {
+ pr_err(
+ "%s: failed to get dual band info\n", __func__);
+ goto fail_ioremap2;
+ }
+ }
}
penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
@@ -3129,6 +3182,21 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
}
+ penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
+ if (IS_ERR(penv->snoc_wcnss)) {
+ pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+ penv->snoc_wcnss = NULL;
+ } else {
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,snoc-wcnss-clock-freq",
+ &penv->snoc_wcnss_clock_freq)) {
+ pr_debug("%s: wcnss snoc clock frequency is not defined\n",
+ __func__);
+ devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
+ penv->snoc_wcnss = NULL;
+ }
+ }
+
if (penv->wlan_config.is_pronto_vadc) {
penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
@@ -3191,6 +3259,38 @@ fail:
return ret;
}
+/* Driver requires to directly vote the snoc clocks
+ * To enable and disable snoc clock, it call
+ * wcnss_snoc_vote function
+ */
+void wcnss_snoc_vote(bool clk_chk_en)
+{
+ int rc;
+
+ if (!penv->snoc_wcnss) {
+ pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+ return;
+ }
+
+ if (clk_chk_en) {
+ rc = clk_set_rate(penv->snoc_wcnss,
+ penv->snoc_wcnss_clock_freq);
+ if (rc) {
+ pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (clk_prepare_enable(penv->snoc_wcnss)) {
+ pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+ return;
+ }
+ } else {
+ clk_disable_unprepare(penv->snoc_wcnss);
+ }
+}
+EXPORT_SYMBOL(wcnss_snoc_vote);
+
/* wlan prop driver cannot invoke cancel_work_sync
* function directly, so to invoke this function it
* call wcnss_flush_work function
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 217c7ce3f57b..8e66cd5770b5 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -278,6 +278,7 @@
#define PERST_PROPAGATION_DELAY_US_MIN 1000
#define PERST_PROPAGATION_DELAY_US_MAX 1005
+#define SWITCH_DELAY_MAX 20
#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
#define LINK_UP_TIMEOUT_US_MIN 5000
@@ -626,6 +627,7 @@ struct msm_pcie_dev_t {
bool ext_ref_clk;
bool common_phy;
uint32_t ep_latency;
+ uint32_t switch_latency;
uint32_t wr_halt_size;
uint32_t cpl_timeout;
uint32_t current_bdf;
@@ -1735,7 +1737,8 @@ static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!dev) {
pr_err("PCIe: the input pcie dev is NULL.\n");
@@ -1745,7 +1748,7 @@ static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
if (ret || scm_ret) {
PCIE_ERR(dev,
- "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
+ "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%llu\n",
dev->rc_idx, ret, scm_ret);
return ret ? ret : -EINVAL;
}
@@ -1984,6 +1987,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->common_phy);
PCIE_DBG_FS(dev, "ep_latency: %dms\n",
dev->ep_latency);
+ PCIE_DBG_FS(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
dev->wr_halt_size);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -4675,7 +4680,15 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
goto link_fail;
}
- msleep(500);
+ if (dev->switch_latency) {
+ PCIE_DBG(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
+ if (dev->switch_latency <= SWITCH_DELAY_MAX)
+ usleep_range(dev->switch_latency * 1000,
+ dev->switch_latency * 1000);
+ else
+ msleep(dev->switch_latency);
+ }
msm_pcie_config_controller(dev);
@@ -6279,6 +6292,20 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+ msm_pcie_dev[rc_idx].switch_latency = 0;
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,switch-latency",
+ &msm_pcie_dev[rc_idx].switch_latency);
+
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency does not exist.\n",
+ rc_idx);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency: 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].switch_latency);
+
msm_pcie_dev[rc_idx].wr_halt_size = 0;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,wr-halt-size",
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 63ec68e6ac2a..39400dda27c2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -552,6 +552,7 @@ static void armpmu_init(struct arm_pmu *armpmu)
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
+ .events_across_hotplug = 1,
};
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 51806cec1e4d..49aa7f25347d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -649,8 +649,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd591407d10f..5228b2db1410 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
start_ipv6_filter_idx),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 011ca300cc09..0531919487d7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1399,7 +1399,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 834712a71ac6..5dbd43b44540 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1878,7 +1878,9 @@ void q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(ipa_rm_q6_workqueue);
+
+ if (ipa_rm_q6_workqueue)
+ destroy_workqueue(ipa_rm_q6_workqueue);
}
static void wake_tx_queue(struct work_struct *work)
@@ -2187,7 +2189,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- q6_deinitialize_rm();
+
+ if (!atomic_read(&is_ssr))
+ q6_deinitialize_rm();
+
q6_init_err:
free_netdev(ipa_netdevs[0]);
ipa_netdevs[0] = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7ed83fb74fcc..62e263a139cc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1654,6 +1654,21 @@ int ipa3_setup_dflt_rt_tables(void)
return 0;
}
+static int ipa3_clkon_cfg_wa(void)
+{
+ struct ipahal_reg_clkon_cfg clkon_cfg = { 0 };
+ int ret = 0;
+
+ clkon_cfg.cgc_open_misc = 1;
+
+ if (ipa3_cfg_clkon_cfg(&clkon_cfg)) {
+ IPAERR("fail to set cgc_open_misc = 1\n");
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
static int ipa3_setup_exception_path(void)
{
struct ipa_ioc_add_hdr *hdr;
@@ -3962,6 +3977,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
/* Prevent consequent calls from trying to load the FW again. */
if (ipa3_ctx->ipa_initialization_complete)
return 0;
+ /* move proxy vote for modem on ipa3_post_init */
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
@@ -4063,8 +4080,14 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_trigger_ipa_ready_cbs();
complete_all(&ipa3_ctx->init_completion_obj);
- pr_info("IPA driver initialization was successful.\n");
+ /* WA to disable MISC clock gating for IPA_HW_v3_1 */
+ if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1) {
+ pr_info(" WA to set cgc_open_misc = 1\n");
+ ipa3_clkon_cfg_wa();
+ }
+
+ pr_info("IPA driver initialization was successful\n");
return 0;
fail_teth_bridge_driver_init:
@@ -4275,7 +4298,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
int i;
struct ipa3_flt_tbl *flt_tbl;
struct ipa3_rt_tbl_set *rset;
- struct ipa_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -4465,8 +4487,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
- IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
- ipa3_active_clients_log_inc(&log_info, false);
+ /* move proxy vote for modem to ipa3_post_init() */
ipa3_ctx->ipa3_active_clients.cnt = 1;
/* Assign resource limitation to each group */
@@ -4702,7 +4723,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
init_completion(&ipa3_ctx->init_completion_obj);
init_completion(&ipa3_ctx->uc_loaded_completion_obj);
-
/*
* For GSI, we can't register the GSI driver yet, as it expects
* the GSI FW to be up and running before the registration.
@@ -4745,6 +4765,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
IPADBG("ipa cdev added successful. major:%d minor:%d\n",
MAJOR(ipa3_ctx->dev_num),
MINOR(ipa3_ctx->dev_num));
+ /* proxy vote for motem is added in ipa3_post_init() phase */
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
fail_cdev_add:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 7c3b5838242e..ce35ba02154d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -426,8 +426,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index a890e88a8f61..4278dc45aad2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1877,6 +1877,7 @@ void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
#else
#define IPA_DUMP_BUFF(base, phy_base, size)
#endif
+int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg);
int ipa3_init_mem_partition(struct device_node *dev_node);
int ipa3_controller_static_bind(struct ipa3_controller *controller,
enum ipa_hw_type ipa_hw_type);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 690a9db67ff0..571852c076ea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -807,6 +807,11 @@ int ipa3_qmi_filter_notify_send(
return -EINVAL;
}
+ if (req->source_pipe_index == -1) {
+ IPAWANERR("Source pipe index invalid\n");
+ return -EINVAL;
+ }
+
mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d5d850309696..e6f1e2ce0b75 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -121,6 +121,31 @@ extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_per_client_stats_info_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
/**
* struct ipa3_rmnet_context - IPA rmnet context
* @ipa_rmnet_ssr: support modem SSR
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index 6a5cb4891c02..746863732dc5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,8 @@
#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_qmi_service.h"
+
/* Type Definitions */
static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
{
@@ -1756,6 +1758,36 @@ struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
rule_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,435 @@ struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_rule_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ul_firewall_rule_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ is_success),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ ipa_enable_per_client_stats_req_msg_v01,
+ enable_per_client_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_per_client_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01,
+ .elem_size =
+ sizeof(struct ipa_per_client_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list),
+ .ei_array =
+ ipa3_per_client_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+ .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list),
+ .ei_array =
+ ipa3_ul_firewall_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x2,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_ul_firewall_config_result_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+ result),
+ .ei_array =
+ ipa3_ul_firewall_config_result_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index bc7cc7060545..8e790c89ed13 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1479,7 +1479,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa3_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
@@ -1501,6 +1501,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v6;
else {
WARN_ON(1);
+ result = -EINVAL;
goto ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 1fa1196dda63..e8bd0cd2ffb5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -795,6 +795,28 @@ void _ipa_sram_settings_read_v3_0(void)
}
/**
+ * ipa3_cfg_clkon_cfg() - configure IPA clkon_cfg
+ * @clkon_cfg: IPA clkon_cfg
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_clkon_cfg(struct ipahal_reg_clkon_cfg *clkon_cfg)
+{
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ IPADBG("cgc_open_misc = %d\n",
+ clkon_cfg->cgc_open_misc);
+
+ ipahal_write_reg_fields(IPA_CLKON_CFG, clkon_cfg);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+/**
* ipa3_cfg_route() - configure IPA route
* @route: IPA route
*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 585f9e6bd492..0db9f30181a7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -19,6 +19,7 @@
#include "ipahal_reg_i.h"
static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+ __stringify(IPA_CLKON_CFG),
__stringify(IPA_ROUTE),
__stringify(IPA_IRQ_STTS_EE_n),
__stringify(IPA_IRQ_EN_EE_n),
@@ -858,6 +859,18 @@ static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
}
+static void ipareg_construct_clkon_cfg(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_clkon_cfg *clkon_cfg;
+
+ clkon_cfg = (struct ipahal_reg_clkon_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clkon_cfg->cgc_open_misc,
+ IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT,
+ IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK);
+}
+
static void ipareg_construct_route(enum ipahal_reg_name reg,
const void *fields, u32 *val)
{
@@ -1159,6 +1172,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
/* IPAv3.1 */
+ [IPA_HW_v3_1][IPA_CLKON_CFG] = {
+ ipareg_construct_clkon_cfg, ipareg_parse_dummy,
+ 0x00000044, 0},
[IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
ipareg_construct_dummy, ipareg_parse_dummy,
0x00003030, 0x1000},
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index c37415f13380..4db09475d5e2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -22,6 +22,7 @@
* array as well.
*/
enum ipahal_reg_name {
+ IPA_CLKON_CFG,
IPA_ROUTE,
IPA_IRQ_STTS_EE_n,
IPA_IRQ_EN_EE_n,
@@ -90,6 +91,14 @@ enum ipahal_reg_name {
};
/*
+ * struct ipahal_reg_clkon_cfg - IPA clock on configuration register
+ * @cgc_open_misc: clock gating needs for MISC
+ */
+struct ipahal_reg_clkon_cfg {
+ u32 cgc_open_misc;
+};
+
+/*
* struct ipahal_reg_route - IPA route register
* @route_dis: route disable
* @route_def_pipe: route default pipe
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index ac97e5ac0494..37458c4d0697 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -21,6 +21,9 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
(((reg) & (mask)) >> (shift))
+/* IPA_CLKON_CFG register */
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_SHFT 0x3
+#define IPA_CLKON_CFG_CGC_OPEN_MISC_BMSK 0x8
/* IPA_ROUTE register */
#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 039bc7da5153..8fbde6675070 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -702,6 +702,11 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD);
+ if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) {
+ IPAWANERR("ep mapping failed\n");
+ retval = -EFAULT;
+ }
+
req->install_status = QMI_RESULT_SUCCESS_V01;
req->rule_id_valid = 1;
req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
@@ -1947,7 +1952,9 @@ void ipa3_q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+
+ if (rmnet_ipa3_ctx->rm_q6_wq)
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
@@ -2287,7 +2294,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- ipa3_q6_deinitialize_rm();
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_q6_deinitialize_rm();
+
q6_init_err:
free_netdev(dev);
rmnet_ipa3_ctx->wwan_priv = NULL;
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 4563810d7e5b..5b50666d30a2 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -326,73 +326,6 @@ static int mhi_init_inbound(struct uci_client *client_handle)
return ret_val;
}
-static int mhi_uci_send_packet(struct mhi_client_handle **client_handle,
- void *buf,
- u32 size)
-{
- u32 nr_avail_trbs = 0;
- u32 i = 0;
- void *data_loc = NULL;
- unsigned long memcpy_result = 0;
- int data_left_to_insert = 0;
- size_t data_to_insert_now = 0;
- u32 data_inserted_so_far = 0;
- int ret_val = 0;
- struct uci_client *uci_handle;
- struct uci_buf *uci_buf;
-
- uci_handle = container_of(client_handle, struct uci_client,
- out_attr.mhi_handle);
-
- nr_avail_trbs = atomic_read(&uci_handle->out_attr.avail_pkts);
- data_left_to_insert = size;
-
- for (i = 0; i < nr_avail_trbs; ++i) {
- data_to_insert_now = min_t(size_t, data_left_to_insert,
- uci_handle->out_attr.max_packet_size);
- data_loc = kmalloc(data_to_insert_now + sizeof(*uci_buf),
- GFP_KERNEL);
- if (!data_loc) {
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "Failed to allocate memory 0x%zx\n",
- data_to_insert_now);
- return -ENOMEM;
- }
- uci_buf = data_loc + data_to_insert_now;
- uci_buf->data = data_loc;
- uci_buf->pkt_id = uci_handle->out_attr.pkt_count++;
- memcpy_result = copy_from_user(uci_buf->data,
- buf + data_inserted_so_far,
- data_to_insert_now);
- if (memcpy_result)
- goto error_xfer;
-
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "At trb i = %d/%d, size = %lu, id %llu chan %d\n",
- i, nr_avail_trbs, data_to_insert_now, uci_buf->pkt_id,
- uci_handle->out_attr.chan_id);
- ret_val = mhi_queue_xfer(*client_handle, uci_buf->data,
- data_to_insert_now, MHI_EOT);
- if (ret_val) {
- goto error_xfer;
- } else {
- data_left_to_insert -= data_to_insert_now;
- data_inserted_so_far += data_to_insert_now;
- atomic_inc(&uci_handle->out_pkt_pend_ack);
- atomic_dec(&uci_handle->out_attr.avail_pkts);
- list_add_tail(&uci_buf->node,
- &uci_handle->out_attr.buf_head);
- }
- if (!data_left_to_insert)
- break;
- }
- return data_inserted_so_far;
-
-error_xfer:
- kfree(uci_buf->data);
- return data_inserted_so_far;
-}
-
static int mhi_uci_send_status_cmd(struct uci_client *client)
{
void *buf = NULL;
@@ -963,18 +896,11 @@ static ssize_t mhi_uci_client_write(struct file *file,
goto sys_interrupt;
}
- while (bytes_transferrd != count) {
- ret_val = mhi_uci_send_packet(&chan_attr->mhi_handle,
- (void *)buf, count);
- if (ret_val < 0)
- goto sys_interrupt;
+ while (count) {
+ size_t xfer_size;
+ void *data_loc = NULL;
+ struct uci_buf *uci_buf;
- bytes_transferrd += ret_val;
- if (bytes_transferrd == count)
- break;
- uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
- "No descriptors available, did we poll, chan %d?\n",
- chan);
mutex_unlock(&chan_attr->chan_lock);
ret_val = wait_event_interruptible(chan_attr->wq,
(atomic_read(&chan_attr->avail_pkts) ||
@@ -991,6 +917,37 @@ static ssize_t mhi_uci_client_write(struct file *file,
ret_val = -ERESTARTSYS;
goto sys_interrupt;
}
+
+ xfer_size = min_t(size_t, count, chan_attr->max_packet_size);
+ data_loc = kmalloc(xfer_size + sizeof(*uci_buf), GFP_KERNEL);
+ if (!data_loc) {
+ uci_log(uci_handle->uci_ipc_log, UCI_DBG_VERBOSE,
+ "Failed to allocate memory %lu\n", xfer_size);
+ ret_val = -ENOMEM;
+ goto sys_interrupt;
+ }
+
+ uci_buf = data_loc + xfer_size;
+ uci_buf->data = data_loc;
+ uci_buf->pkt_id = uci_handle->out_attr.pkt_count++;
+ ret_val = copy_from_user(uci_buf->data, buf, xfer_size);
+ if (unlikely(ret_val)) {
+ kfree(uci_buf->data);
+ goto sys_interrupt;
+ }
+ ret_val = mhi_queue_xfer(chan_attr->mhi_handle, uci_buf->data,
+ xfer_size, MHI_EOT);
+ if (unlikely(ret_val)) {
+ kfree(uci_buf->data);
+ goto sys_interrupt;
+ }
+
+ bytes_transferrd += xfer_size;
+ count -= xfer_size;
+ buf += xfer_size;
+ atomic_inc(&uci_handle->out_pkt_pend_ack);
+ atomic_dec(&uci_handle->out_attr.avail_pkts);
+ list_add_tail(&uci_buf->node, &uci_handle->out_attr.buf_head);
}
mutex_unlock(&chan_attr->chan_lock);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 0dd6017245fa..cf99eb9c2ba0 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -297,6 +297,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(hw_current_max),
POWER_SUPPLY_ATTR(real_type),
POWER_SUPPLY_ATTR(pr_swap),
+ POWER_SUPPLY_ATTR(cc_step),
+ POWER_SUPPLY_ATTR(cc_step_sel),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 947108c1410e..719f09a7c372 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -81,6 +81,8 @@
#define BATT_THERM_NUM_COEFFS 3
+#define MAX_CC_STEPS 20
+
/* Debug flag definitions */
enum fg_debug_flag {
FG_IRQ = BIT(0), /* Show interrupts */
@@ -309,11 +311,16 @@ struct fg_irq_info {
};
struct fg_circ_buf {
- int arr[20];
+ int arr[10];
int size;
int head;
};
+struct fg_cc_step_data {
+ int arr[MAX_CC_STEPS];
+ int sel;
+};
+
struct fg_pt {
s32 x;
s32 y;
@@ -374,6 +381,7 @@ struct fg_chip {
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
struct fg_cap_learning cl;
+ struct fg_cc_step_data cc_step;
struct mutex bus_lock;
struct mutex sram_rw_lock;
struct mutex batt_avg_lock;
@@ -475,5 +483,6 @@ extern bool is_qnovo_en(struct fg_chip *chip);
extern void fg_circ_buf_add(struct fg_circ_buf *, int);
extern void fg_circ_buf_clr(struct fg_circ_buf *);
extern int fg_circ_buf_avg(struct fg_circ_buf *, int *);
+extern int fg_circ_buf_median(struct fg_circ_buf *, int *);
extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *);
#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 9635044e02a5..0cb1dea7113b 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/sort.h>
#include "fg-core.h"
void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
@@ -39,6 +40,39 @@ int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
return 0;
}
+static int cmp_int(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+int fg_circ_buf_median(struct fg_circ_buf *buf, int *median)
+{
+ int *temp;
+
+ if (buf->size == 0)
+ return -ENODATA;
+
+ if (buf->size == 1) {
+ *median = buf->arr[0];
+ return 0;
+ }
+
+ temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+ sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+ if (buf->size % 2)
+ *median = temp[buf->size / 2];
+ else
+ *median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+ kfree(temp);
+ return 0;
+}
+
int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
{
int i;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index e03681ec13cc..527b6afbe93a 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -75,6 +75,8 @@
#define ESR_TIMER_CHG_MAX_OFFSET 0
#define ESR_TIMER_CHG_INIT_WORD 18
#define ESR_TIMER_CHG_INIT_OFFSET 2
+#define ESR_EXTRACTION_ENABLE_WORD 19
+#define ESR_EXTRACTION_ENABLE_OFFSET 0
#define PROFILE_LOAD_WORD 24
#define PROFILE_LOAD_OFFSET 0
#define ESR_RSLOW_DISCHG_WORD 34
@@ -1611,7 +1613,7 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
- int rc, msoc, bsoc, recharge_soc;
+ int rc, msoc, bsoc, recharge_soc, msoc_raw;
u8 full_soc[2] = {0xFF, 0xFF};
if (!chip->dt.hold_soc_while_full)
@@ -1647,6 +1649,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
pr_err("Error in getting msoc, rc=%d\n", rc);
goto out;
}
+ msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
msoc, bsoc, chip->health, chip->charge_status,
@@ -1670,7 +1673,7 @@ static int fg_charge_full_update(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+ } else if (msoc_raw < recharge_soc && chip->charge_full) {
chip->delta_soc = FULL_CAPACITY - msoc;
/*
@@ -1700,8 +1703,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
rc);
goto out;
}
- fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
- bsoc >> 8, recharge_soc, chip->delta_soc);
+ fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
+ msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
} else {
goto out;
}
@@ -3032,6 +3035,89 @@ static int fg_esr_validate(struct fg_chip *chip)
return 0;
}
+static int fg_force_esr_meas(struct fg_chip *chip)
+{
+ int rc;
+ int esr_uohms;
+
+ /* force esr extraction enable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to enable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in configuring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait 1.5 seconds for hw to measure ESR */
+ msleep(1500);
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ 0);
+ if (rc < 0) {
+ pr_err("Error in restoring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
+ if (rc < 0) {
+ pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* force esr extraction disable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to disable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_get_battery_resistance(chip, &esr_uohms);
+ fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
+
+ return rc;
+}
+
+static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
+{
+ int rc;
+
+ /* force esr extraction disable when qnovo enables */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET,
+ BIT(0), qnovo_enable ? 0 : BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("Error in configuring esr extraction rc=%d\n", rc);
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT,
+ qnovo_enable ? LD_REG_CTRL_BIT : 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
+ return 0;
+}
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -3108,6 +3194,19 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->cc_step.sel >= 0) &&
+ (chip->cc_step.sel < MAX_CC_STEPS)) {
+ pval->intval = chip->cc_step.arr[chip->cc_step.sel];
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ pval->intval = chip->cc_step.sel;
+ break;
default:
pr_err("unsupported property %d\n", psp);
rc = -EINVAL;
@@ -3140,6 +3239,31 @@ static int fg_psy_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ rc = fg_force_esr_meas(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = fg_prepare_for_qnovo(chip, pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->cc_step.sel >= 0) &&
+ (chip->cc_step.sel < MAX_CC_STEPS)) {
+ chip->cc_step.arr[chip->cc_step.sel] = pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) {
+ chip->cc_step.sel = pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ pval->intval);
+ return -EINVAL;
+ }
+ break;
default:
break;
}
@@ -3153,6 +3277,8 @@ static int fg_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CC_STEP:
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
return 1;
default:
break;
@@ -3213,6 +3339,8 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
POWER_SUPPLY_PROP_DEBUG_BATTERY,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
};
static const struct power_supply_desc fg_psy_desc = {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index eb97eb0ff2ac..cf90f9041935 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/pmic-voter.h>
+#include <linux/delay.h>
#define QNOVO_REVISION1 0x00
#define QNOVO_REVISION2 0x01
@@ -114,6 +115,17 @@
#define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter"
#define QNOVO_VOTER "qnovo_voter"
+#define FG_AVAILABLE_VOTER "FG_AVAILABLE_VOTER"
+#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER"
+#define QNI_PT_VOTER "QNI_PT_VOTER"
+#define ESR_VOTER "ESR_VOTER"
+
+#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER"
+#define CHG_READY_VOTER "CHG_READY_VOTER"
+#define USB_READY_VOTER "USB_READY_VOTER"
+#define DC_READY_VOTER "DC_READY_VOTER"
+
+#define PT_RESTART_VOTER "PT_RESTART_VOTER"
struct qnovo_dt_props {
bool external_rsense;
@@ -127,6 +139,10 @@ struct qnovo {
struct qnovo_dt_props dt;
struct device *dev;
struct votable *disable_votable;
+ struct votable *pt_dis_votable;
+ struct votable *not_ok_to_qnovo_votable;
+ struct votable *chg_ready_votable;
+ struct votable *awake_votable;
struct class qnovo_class;
struct pmic_revid_data *pmic_rev_id;
u32 wa_flags;
@@ -138,10 +154,18 @@ struct qnovo {
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- bool ok_to_qnovo;
+ int usb_present;
+ int dc_present;
+ struct delayed_work usb_debounce_work;
+ struct delayed_work dc_debounce_work;
+
+ struct delayed_work ptrain_restart_work;
};
static int debug_mask;
@@ -229,6 +253,39 @@ static bool is_batt_available(struct qnovo *chip)
return true;
}
+static bool is_fg_available(struct qnovo *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_usb_available(struct qnovo *chip)
+{
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_dc_available(struct qnovo *chip)
+{
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
{
union power_supply_propval pval = {0};
@@ -281,10 +338,86 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
return -EINVAL;
}
+ /*
+ * fg must be available for enable FG_AVAILABLE_VOTER
+ * won't enable it otherwise
+ */
+
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
+
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0);
rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
+static int pt_dis_votable_cb(struct votable *votable, void *data, int disable,
+ const char *client)
+{
+ struct qnovo *chip = data;
+ int rc;
+
+ if (disable) {
+ cancel_delayed_work_sync(&chip->ptrain_restart_work);
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+ }
+
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ if (!disable) {
+ vote(chip->awake_votable, PT_RESTART_VOTER, true, 0);
+ schedule_delayed_work(&chip->ptrain_restart_work,
+ msecs_to_jiffies(20));
+ }
+
+ return 0;
+}
+
+static int not_ok_to_qnovo_cb(struct votable *votable, void *data,
+ int not_ok_to_qnovo,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0);
+ if (not_ok_to_qnovo)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static int chg_ready_cb(struct votable *votable, void *data, int ready,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0);
+
+ return 0;
+}
+
+static int awake_cb(struct votable *votable, void *data, int awake,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ if (awake)
+ pm_stay_awake(chip->dev);
+ else
+ pm_relax(chip->dev);
+
+ return 0;
+}
+
static int qnovo_parse_dt(struct qnovo *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -626,8 +759,9 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
char *buf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ int val = get_effective_result(chip->not_ok_to_qnovo_votable);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
@@ -656,21 +790,10 @@ static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
- int i = attr - qnovo_attributes;
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- u8 buf[2] = {0, 0};
- u16 regval;
- int rc;
-
- rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
- if (rc < 0) {
- pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
- return -EINVAL;
- }
- regval = buf[1] << 8 | buf[0];
+ int val = get_effective_result(chip->pt_dis_votable);
- return snprintf(ubuf, PAGE_SIZE, "%d\n",
- (int)(regval & QNOVO_PTRAIN_EN_BIT));
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
@@ -678,21 +801,12 @@ static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- int rc = 0;
-
- if (get_effective_result(chip->disable_votable))
- return -EINVAL;
if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
- if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- (bool)val ? "enable" : "disable", rc);
- return rc;
- }
+ /* val being 0, userspace wishes to disable pt so vote true */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0);
return count;
}
@@ -1116,41 +1230,146 @@ static int qnovo_update_status(struct qnovo *chip)
{
u8 val = 0;
int rc;
- bool ok_to_qnovo;
- bool changed = false;
+ bool hw_ok_to_qnovo;
rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- ok_to_qnovo = false;
+ hw_ok_to_qnovo = false;
} else {
/*
* For CV mode keep qnovo enabled, userspace is expected to
* disable it after few runs
*/
- ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
+ hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ?
+ true : false;
}
- if (chip->ok_to_qnovo ^ ok_to_qnovo) {
+ vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+ !hw_ok_to_qnovo, 0);
+ return 0;
+}
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
- if (!ok_to_qnovo)
- vote(chip->disable_votable, USER_VOTER, true, 0);
+static void usb_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, usb_debounce_work.work);
- chip->ok_to_qnovo = ok_to_qnovo;
- changed = true;
- }
+ vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+}
- return changed;
+static void dc_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, dc_debounce_work.work);
+
+ vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
}
+#define DEBOUNCE_MS 15000 /* 15 seconds */
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
+ union power_supply_propval pval;
+ bool usb_present = false, dc_present = false;
+ int rc;
+
+ if (is_fg_available(chip))
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0);
+
+ if (is_usb_available(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+ }
+
+ if (chip->usb_present && !usb_present) {
+ /* removal */
+ chip->usb_present = 0;
+ cancel_delayed_work_sync(&chip->usb_debounce_work);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+ } else if (!chip->usb_present && usb_present) {
+ /* insertion */
+ chip->usb_present = 1;
+ vote(chip->awake_votable, USB_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->usb_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ if (is_dc_available(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+ dc_present = (rc < 0) ? 0 : pval.intval;
+ }
- if (qnovo_update_status(chip))
- kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ if (usb_present)
+ dc_present = 0;
+
+ if (chip->dc_present && !dc_present) {
+ /* removal */
+ chip->dc_present = 0;
+ cancel_delayed_work_sync(&chip->dc_debounce_work);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+ } else if (!chip->dc_present && dc_present) {
+ /* insertion */
+ chip->dc_present = 1;
+ vote(chip->awake_votable, DC_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->dc_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ qnovo_update_status(chip);
+}
+
+static void ptrain_restart_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, ptrain_restart_work.work);
+ u8 pt_t1, pt_t2;
+ int rc;
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ /* pttime increments every 2 seconds */
+ msleep(2100);
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ if (pt_t1 != pt_t2)
+ goto clean_up;
+
+ /* Toggle pt enable to restart pulse train */
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+ msleep(1000);
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+
+clean_up:
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
}
static int qnovo_notifier_call(struct notifier_block *nb,
@@ -1162,7 +1381,10 @@ static int qnovo_notifier_call(struct notifier_block *nb,
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if (strcmp(psy->desc->name, "battery") == 0)
+ if (strcmp(psy->desc->name, "battery") == 0
+ || strcmp(psy->desc->name, "bms") == 0
+ || strcmp(psy->desc->name, "usb") == 0
+ || strcmp(psy->desc->name, "dc") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1171,7 +1393,23 @@ static int qnovo_notifier_call(struct notifier_block *nb,
static irqreturn_t handle_ptrain_done(int irq, void *data)
{
struct qnovo *chip = data;
+ union power_supply_propval pval = {0};
+ /*
+ * hw resets pt_en bit once ptrain_done triggers.
+ * vote on behalf of QNI to disable it such that
+ * once QNI enables it, the votable state changes
+ * and the callback that sets it is indeed invoked
+ */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, true, 0);
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ &pval);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
@@ -1186,6 +1424,11 @@ static int qnovo_hw_init(struct qnovo *chip)
u8 val;
vote(chip->disable_votable, USER_VOTER, true, 0);
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0);
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
val = 0;
rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
@@ -1349,12 +1592,45 @@ static int qnovo_probe(struct platform_device *pdev)
goto cleanup;
}
+ chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY,
+ pt_dis_votable_cb, chip);
+ if (IS_ERR(chip->pt_dis_votable)) {
+ rc = PTR_ERR(chip->pt_dis_votable);
+ goto destroy_disable_votable;
+ }
+
+ chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK",
+ VOTE_SET_ANY,
+ not_ok_to_qnovo_cb, chip);
+ if (IS_ERR(chip->not_ok_to_qnovo_votable)) {
+ rc = PTR_ERR(chip->not_ok_to_qnovo_votable);
+ goto destroy_pt_dis_votable;
+ }
+
+ chip->chg_ready_votable = create_votable("QNOVO_CHG_READY",
+ VOTE_SET_ANY,
+ chg_ready_cb, chip);
+ if (IS_ERR(chip->chg_ready_votable)) {
+ rc = PTR_ERR(chip->chg_ready_votable);
+ goto destroy_not_ok_to_qnovo_votable;
+ }
+
+ chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY,
+ awake_cb, chip);
+ if (IS_ERR(chip->awake_votable)) {
+ rc = PTR_ERR(chip->awake_votable);
+ goto destroy_chg_ready_votable;
+ }
+
INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work);
+ INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work);
+ INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work);
rc = qnovo_hw_init(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
- goto destroy_votable;
+ goto destroy_awake_votable;
}
rc = qnovo_register_notifier(chip);
@@ -1390,7 +1666,15 @@ static int qnovo_probe(struct platform_device *pdev)
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
-destroy_votable:
+destroy_awake_votable:
+ destroy_votable(chip->awake_votable);
+destroy_chg_ready_votable:
+ destroy_votable(chip->chg_ready_votable);
+destroy_not_ok_to_qnovo_votable:
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+destroy_pt_dis_votable:
+ destroy_votable(chip->pt_dis_votable);
+destroy_disable_votable:
destroy_votable(chip->disable_votable);
cleanup:
platform_set_drvdata(pdev, NULL);
@@ -1403,6 +1687,9 @@ static int qnovo_remove(struct platform_device *pdev)
class_unregister(&chip->qnovo_class);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->chg_ready_votable);
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+ destroy_votable(chip->pt_dis_votable);
destroy_votable(chip->disable_votable);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 7de9429cae8b..630d02eb7a67 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1641,6 +1641,13 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
switch (chip->dt.chg_inhibit_thr_mv) {
case 50:
rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 52775a3bdf1b..2c55ae819cc9 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -488,6 +488,45 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
/********************
* HELPER FUNCTIONS *
********************/
+static int smblib_request_dpdm(struct smb_charger *chg, bool enable)
+{
+ int rc = 0;
+
+ /* fetch the DPDM regulator */
+ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+ "dpdm-supply", NULL)) {
+ chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+ if (IS_ERR(chg->dpdm_reg)) {
+ rc = PTR_ERR(chg->dpdm_reg);
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n",
+ rc);
+ chg->dpdm_reg = NULL;
+ return rc;
+ }
+ }
+
+ if (enable) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+ rc = regulator_enable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable dpdm regulator rc=%d\n",
+ rc);
+ }
+ } else {
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
+ smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
+ rc = regulator_disable(chg->dpdm_reg);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't disable dpdm regulator rc=%d\n",
+ rc);
+ }
+ }
+
+ return rc;
+}
static void smblib_rerun_apsd(struct smb_charger *chg)
{
@@ -514,10 +553,17 @@ static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* if PD is active, APSD is disabled so won't have a valid result */
- if (chg->pd_active)
+ if (chg->pd_active) {
chg->real_charger_type = POWER_SUPPLY_TYPE_USB_PD;
- else
+ } else {
+ /*
+ * Update real charger type only if its not FLOAT
+ * detected as as SDP
+ */
+ if (!(apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ chg->real_charger_type == POWER_SUPPLY_TYPE_USB))
chg->real_charger_type = apsd_result->pst;
+ }
smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
apsd_result->name, chg->pd_active);
@@ -600,13 +646,9 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -698,24 +740,9 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
if (!val.intval)
return 0;
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
chg->uusb_apsd_rerun_done = true;
smblib_rerun_apsd(chg);
@@ -785,6 +812,7 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua)
{
int rc;
u8 icl_options;
+ const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
/* power source is SDP */
switch (icl_ua) {
@@ -809,6 +837,21 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua)
return -EINVAL;
}
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB &&
+ apsd_result->pst == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ /*
+ * change the float charger configuration to SDP, if this
+ * is the case of SDP being detected as FLOAT
+ */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FORCE_FLOAT_SDP_CFG_BIT, FORCE_FLOAT_SDP_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set float ICL options rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
if (rc < 0) {
@@ -1528,8 +1571,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
- u8 stat;
+ bool usb_online, dc_online, qnovo_en;
+ u8 stat, pt_en_cmd;
int rc;
rc = smblib_get_prop_usb_online(chg, &pval);
@@ -1597,11 +1640,22 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
rc);
return rc;
- }
+ }
stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
- if (!stat)
+
+ rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT);
+
+ /* ignore stat7 when qnovo is enabled */
+ if (!qnovo_en && !stat)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
return 0;
@@ -2402,6 +2456,31 @@ int smblib_get_prop_die_health(struct smb_charger *chg,
return 0;
}
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+#define TYPEC_DEFAULT_CURRENT_UA 900000
+#define TYPEC_MEDIUM_CURRENT_UA 1500000
+#define TYPEC_HIGH_CURRENT_UA 3000000
+static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
+{
+ int rp_ua;
+
+ switch (typec_mode) {
+ case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+ rp_ua = TYPEC_HIGH_CURRENT_UA;
+ break;
+ case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+ case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+ /* fall through */
+ default:
+ rp_ua = DCP_CURRENT_UA;
+ }
+
+ return rp_ua;
+}
+
/*******************
* USB PSY SETTERS *
* *****************/
@@ -2419,14 +2498,54 @@ int smblib_set_prop_pd_current_max(struct smb_charger *chg,
return rc;
}
+static int smblib_handle_usb_current(struct smb_charger *chg,
+ int usb_current)
+{
+ int rc = 0, rp_ua, typec_mode;
+
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (usb_current == -ETIMEDOUT) {
+ /*
+ * Valid FLOAT charger, report the current based
+ * of Rp
+ */
+ typec_mode = smblib_get_prop_typec_mode(chg);
+ rp_ua = get_rp_based_dcp_current(chg, typec_mode);
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ true, rp_ua);
+ if (rc < 0)
+ return rc;
+ } else {
+ /*
+ * FLOAT charger detected as SDP by USB driver,
+ * charge with the requested current and update the
+ * real_charger_type
+ */
+ chg->real_charger_type = POWER_SUPPLY_TYPE_USB;
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ if (rc < 0)
+ return rc;
+ rc = vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER,
+ false, 0);
+ if (rc < 0)
+ return rc;
+ }
+ } else {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, usb_current);
+ }
+
+ return rc;
+}
+
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc = 0;
if (!chg->pd_active) {
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
- true, val->intval);
+ rc = smblib_handle_usb_current(chg, val->intval);
} else if (chg->system_suspend_supported) {
if (val->intval <= USBIN_25MA)
rc = vote(chg->usb_icl_votable,
@@ -2875,14 +2994,6 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
/***********************
* USB MAIN PSY SETTERS *
*************************/
-
-#define SDP_CURRENT_UA 500000
-#define CDP_CURRENT_UA 1500000
-#define DCP_CURRENT_UA 1500000
-#define HVDCP_CURRENT_UA 3000000
-#define TYPEC_DEFAULT_CURRENT_UA 900000
-#define TYPEC_MEDIUM_CURRENT_UA 1500000
-#define TYPEC_HIGH_CURRENT_UA 3000000
int smblib_get_charge_current(struct smb_charger *chg,
int *total_current_ua)
{
@@ -3173,25 +3284,10 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V :
chg->chg_freq.freq_removal);
- /* fetch the DPDM regulator */
- if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
- "dpdm-supply", NULL)) {
- chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
- if (IS_ERR(chg->dpdm_reg)) {
- smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
- PTR_ERR(chg->dpdm_reg));
- chg->dpdm_reg = NULL;
- }
- }
-
if (vbus_rising) {
- if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
- rc = regulator_enable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
/* Schedule work to enable parallel charger */
vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
@@ -3211,13 +3307,9 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
}
}
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
}
if (chg->micro_usb_mode)
@@ -3438,24 +3530,6 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
-static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
-{
- int rp_ua;
-
- switch (typec_mode) {
- case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
- rp_ua = TYPEC_HIGH_CURRENT_UA;
- break;
- case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
- case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
- /* fall through */
- default:
- rp_ua = DCP_CURRENT_UA;
- }
-
- return rp_ua;
-}
-
static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
{
int typec_mode;
@@ -3481,11 +3555,17 @@ static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
break;
case POWER_SUPPLY_TYPE_USB_DCP:
- case POWER_SUPPLY_TYPE_USB_FLOAT:
typec_mode = smblib_get_prop_typec_mode(chg);
rp_ua = get_rp_based_dcp_current(chg, typec_mode);
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua);
break;
+ case POWER_SUPPLY_TYPE_USB_FLOAT:
+ /*
+ * limit ICL to 100mA, the USB driver will enumerate to check
+ * if this is a SDP and appropriately set the current
+ */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
+ break;
case POWER_SUPPLY_TYPE_USB_HVDCP:
case POWER_SUPPLY_TYPE_USB_HVDCP_3:
vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
@@ -3617,13 +3697,9 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->cc2_detach_wa_active = false;
- if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
- smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
- rc = regulator_disable(chg->dpdm_reg);
- if (rc < 0)
- smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
- rc);
- }
+ rc = smblib_request_dpdm(chg, false);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data;
@@ -3680,6 +3756,13 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->pd_hard_reset = 0;
chg->typec_legacy_valid = false;
+ /* write back the default FLOAT charger configuration */
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ (u8)FLOAT_OPTIONS_MASK, chg->float_cfg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write float charger options rc=%d\n",
+ rc);
+
/* reset back to 120mS tCC debounce */
rc = smblib_masked_write(chg, MISC_CFG_REG, TCC_DEBOUNCE_20MS_BIT, 0);
if (rc < 0)
@@ -3759,10 +3842,14 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg)
smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
rc);
- if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT)
+ if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) {
typec_sink_insertion(chg);
- else
+ } else {
+ rc = smblib_request_dpdm(chg, true);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc);
typec_sink_removal(chg);
+ }
}
static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
@@ -3775,6 +3862,24 @@ static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
return;
/*
+ * if APSD indicates FLOAT and the USB stack had detected SDP,
+ * do not respond to Rp changes as we do not confirm that its
+ * a legacy cable
+ */
+ if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB)
+ return;
+ /*
+ * We want the ICL vote @ 100mA for a FLOAT charger
+ * until the detection by the USB stack is complete.
+ * Ignore the Rp changes unless there is a
+ * pre-existing valid vote.
+ */
+ if (apsd->pst == POWER_SUPPLY_TYPE_USB_FLOAT &&
+ get_client_vote(chg->usb_icl_votable,
+ LEGACY_UNKNOWN_VOTER) <= 100000)
+ return;
+
+ /*
* handle Rp change for DCP/FLOAT/OCP.
* Update the current only if the Rp is different from
* the last Rp value.
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 43a795085755..8c810352f78f 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -326,6 +326,7 @@ struct smb_charger {
int typec_mode;
int usb_icl_change_irq_enabled;
u32 jeita_status;
+ u8 float_cfg;
/* workaround flag */
u32 wa_flags;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aae796678ffe..47106f937371 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2416,7 +2416,8 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
*/
static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* scm command buffer structrue */
@@ -2457,7 +2458,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
cbuf.device_id = UFS_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
@@ -2466,7 +2467,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
}
out:
- dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
__func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
return ret;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a2f2cc0c2c51..c23023f43d30 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1483,7 +1483,7 @@ start:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
- queue_work(hba->clk_gating.ungating_workq,
+ queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
@@ -1751,7 +1751,8 @@ static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
struct ufs_hba *hba = container_of(timer, struct ufs_hba,
clk_gating.gate_hrtimer);
- schedule_work(&hba->clk_gating.gate_work);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work);
return HRTIMER_NORESTART;
}
@@ -1759,7 +1760,7 @@ static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
- char wq_name[sizeof("ufs_clk_ungating_00")];
+ char wq_name[sizeof("ufs_clk_gating_00")];
hba->clk_gating.state = CLKS_ON;
@@ -1788,9 +1789,10 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
- snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_ungating_%d",
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
- hba->clk_gating.ungating_workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.clk_gating_workq =
+ create_singlethread_workqueue(wq_name);
gating->is_enabled = true;
@@ -1854,7 +1856,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- destroy_workqueue(hba->clk_gating.ungating_workq);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index d66205ff9f5d..da3ad78d3405 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -431,7 +431,7 @@ struct ufs_clk_gating {
struct device_attribute enable_attr;
bool is_enabled;
int active_reqs;
- struct workqueue_struct *ungating_workq;
+ struct workqueue_struct *clk_gating_workq;
};
/* Hibern8 state */
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index c74cd3b814cd..bf815cb68f90 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -84,35 +84,59 @@ module_param(qmi_timeout, ulong, 0600);
} while (0)
#define icnss_pr_err(_fmt, ...) do { \
- pr_err(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_ERR, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_warn(_fmt, ...) do { \
- pr_warn(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("WRN: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_WARNING, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
#define icnss_pr_info(_fmt, ...) do { \
- pr_info(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("INF: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ printk("%s" pr_fmt(_fmt), KERN_INFO, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
+#if defined(CONFIG_DYNAMIC_DEBUG)
#define icnss_pr_dbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_string(pr_fmt(_fmt), ##__VA_ARGS__); \
} while (0)
#define icnss_pr_vdbg(_fmt, ...) do { \
- pr_debug(_fmt, ##__VA_ARGS__); \
- icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt), \
- ##__VA_ARGS__); \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string(pr_fmt(_fmt), ##__VA_ARGS__); \
+ } while (0)
+#elif defined(DEBUG)
+#define icnss_pr_dbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#else
+#define icnss_pr_dbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
} while (0)
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ no_printk("%s" pr_fmt(_fmt), KERN_DEBUG, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("%s" pr_fmt(_fmt), "", \
+ ##__VA_ARGS__); \
+ } while (0)
+#endif
+
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
if (!(_condition)) { \
@@ -2579,21 +2603,22 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
if (event_data == NULL)
return notifier_from_errno(-ENOMEM);
+ event_data->crashed = true;
+
if (state == NULL) {
- event_data->crashed = true;
priv->stats.recovery.root_pd_crash++;
goto event_post;
}
switch (*state) {
case ROOT_PD_WDOG_BITE:
- event_data->crashed = true;
event_data->wdog_bite = true;
priv->stats.recovery.root_pd_crash++;
break;
case ROOT_PD_SHUTDOWN:
cause = ICNSS_ROOT_PD_SHUTDOWN;
priv->stats.recovery.root_pd_shutdown++;
+ event_data->crashed = false;
break;
case USER_PD_STATE_CHANGE:
if (test_bit(ICNSS_HOST_TRIGGERED_PDR, &priv->state)) {
@@ -2605,7 +2630,6 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
}
break;
default:
- event_data->crashed = true;
priv->stats.recovery.root_pd_crash++;
break;
}
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 2c15f7896c82..63432e6026e2 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1503,6 +1503,7 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ u64 version = 0;
if (!etm[cpu])
goto out;
@@ -1524,8 +1525,8 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
goto out;
}
if (etm_arch_supported(etm[cpu]->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && version < TZ_DBG_ETM_VER)
etm[cpu]->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
@@ -1615,8 +1616,10 @@ static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
mutex_lock(&etmdata->mutex);
if (etmdata->init && !etmdata->enable) {
if (etm_arch_supported(etmdata->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ u64 version = 0;
+
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && (version < TZ_DBG_ETM_VER))
etmdata->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/jtagv8.c b/drivers/soc/qcom/jtagv8.c
index 94c391eabaea..f09ccce8f9c3 100644
--- a/drivers/soc/qcom/jtagv8.c
+++ b/drivers/soc/qcom/jtagv8.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -974,7 +974,7 @@ static struct notifier_block jtag_cpu_pm_notifier = {
static int __init msm_jtag_dbg_init(void)
{
int ret;
-
+ u64 version = 0;
if (msm_jtag_fuse_apps_access_disabled())
return -EPERM;
@@ -982,7 +982,8 @@ static int __init msm_jtag_dbg_init(void)
dbg_init_arch_data();
if (dbg_arch_supported(dbg.arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) &&
+ version < TZ_DBG_ETM_VER) {
dbg.save_restore_enabled = true;
} else {
pr_info("dbg save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 31de6e5c173c..cc3f5d6a7c89 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -1117,54 +1117,55 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
sizeof(svc_cmd), &ret_val, sizeof(ret_val));
- if (ret)
- return ret;
+ if (!ret && ret_val)
+ return 1;
+ else
+ return 0;
- return ret_val;
}
desc.arginfo = SCM_ARGS(1);
desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+ desc.ret[0] = 0;
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
- if (ret)
- return ret;
+ if (!ret && desc.ret[0])
+ return 1;
+ else
+ return 0;
- return desc.ret[0];
}
EXPORT_SYMBOL(scm_is_call_available);
#define GET_FEAT_VERSION_CMD 3
-int scm_get_feat_version(u32 feat)
+int scm_get_feat_version(u32 feat, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
if (!is_scm_armv8()) {
if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
- u32 version;
- if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
- sizeof(feat), &version, sizeof(version)))
- return version;
+ ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD,
+ &feat, sizeof(feat), scm_ret, sizeof(*scm_ret));
+ return ret;
}
- return 0;
}
ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
if (ret <= 0)
- return 0;
+ return -EAGAIN;
desc.args[0] = feat;
desc.arginfo = SCM_ARGS(1);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
&desc);
- if (!ret)
- return desc.ret[0];
- return 0;
+ *scm_ret = desc.ret[0];
+
+ return ret;
}
EXPORT_SYMBOL(scm_get_feat_version);
#define RESTORE_SEC_CFG 2
-int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c
index 54a978157bda..0263350ae931 100644
--- a/drivers/soc/qcom/scm_qcpe.c
+++ b/drivers/soc/qcom/scm_qcpe.c
@@ -219,7 +219,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
if (!opened) {
ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0);
if (ret != HAB_OK) {
- pr_err("scm_call2: habmm_socket_open failed with ret = %d",
+ pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d",
ret);
return ret;
}
@@ -239,19 +239,26 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc)
return ret;
size_bytes = sizeof(smc_params);
+ memset(&smc_params, 0x0, sizeof(smc_params));
ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0);
if (ret != HAB_OK)
return ret;
+ if (size_bytes != sizeof(smc_params)) {
+ pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n",
+ sizeof(smc_params), size_bytes);
+ return SCM_ERROR;
+ }
+
desc->ret[0] = smc_params.x1;
desc->ret[1] = smc_params.x2;
desc->ret[2] = smc_params.x3;
- pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx",
- desc->ret[0], desc->ret[1], desc->ret[2]);
+ pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx",
+ smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]);
- return 0;
+ return smc_params.x0;
}
static u32 smc(u32 cmd_addr)
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 4307937d9f6d..e7a00cdb5b03 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -424,13 +424,14 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
bool msm_secure_v2_is_supported(void)
{
- int version = scm_get_feat_version(FEATURE_ID_CP);
+ u64 version;
+ int ret = scm_get_feat_version(FEATURE_ID_CP, &version);
/*
* if the version is < 1.1.0 then dynamic buffer allocation is
* not supported
*/
- return version >= MAKE_CP_VERSION(1, 1, 0);
+ return (ret == 0) && (version >= MAKE_CP_VERSION(1, 1, 0));
}
static int __init alloc_secure_shared_memory(void)
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index fc9faaee3170..b80e75fc3521 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -2255,7 +2255,7 @@ void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
return;
if (msm_uport->wakeup.enabled) {
- disable_irq_nosync(msm_uport->wakeup.irq);
+ disable_irq(msm_uport->wakeup.irq);
enable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
msm_uport->wakeup.enabled = false;
@@ -2614,8 +2614,7 @@ static int msm_hs_startup(struct uart_port *uport)
msm_hs_resource_vote(msm_uport);
if (is_use_low_power_wakeup(msm_uport)) {
- ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
- msm_hs_wakeup_isr,
+ ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"msm_hs_wakeup", msm_uport);
if (unlikely(ret)) {
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c3077ac11709..70db070b05d8 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2634,8 +2634,6 @@ done:
static void check_for_sdp_connection(struct work_struct *w)
{
- int ret;
- union power_supply_propval pval = {0};
struct dwc3_msm *mdwc =
container_of(w, struct dwc3_msm, sdp_check.work);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
@@ -2646,21 +2644,6 @@ static void check_for_sdp_connection(struct work_struct *w)
/* floating D+/D- lines detected */
if (dwc->gadget.state < USB_STATE_DEFAULT &&
dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
- if (!mdwc->usb_psy) {
- mdwc->usb_psy = power_supply_get_by_name("usb");
- if (!mdwc->usb_psy) {
- dev_dbg(mdwc->dev,
- "Could not get usb power_supply\n");
- return;
- }
- }
- pval.intval = -ETIMEDOUT;
- ret = power_supply_set_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
- if (ret)
- dev_dbg(mdwc->dev,
- "power supply error when setting property\n");
-
mdwc->vbus_active = 0;
dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
@@ -3708,14 +3691,16 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
return 0;
psy_type = get_psy_type(mdwc);
- if (psy_type != POWER_SUPPLY_TYPE_USB &&
- psy_type != POWER_SUPPLY_TYPE_USB_FLOAT)
+ if (psy_type == POWER_SUPPLY_TYPE_USB) {
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+ } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ pval.intval = -ETIMEDOUT;
+ } else {
return 0;
+ }
- dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
-
- /* Set max current limit in uA */
- pval.intval = 1000 * mA;
ret = power_supply_set_property(mdwc->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
if (ret) {
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 3f903d4776b4..19fe6c8cb25a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1579,6 +1579,12 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
struct f_gsi *rndis = req->context;
int status;
+ if (req->status != 0) {
+ log_event_err("RNDIS command completion error %d\n",
+ req->status);
+ return;
+ }
+
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
log_event_err("RNDIS command error %d, %d/%d",
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 434af820e827..a28bcd084dc3 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -545,6 +545,12 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
rndis_init_msg_type *buf;
u32 ul_max_xfer_size, dl_max_xfer_size;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
spin_lock(&rndis_lock);
rndis = _rndis_qc;
if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 13888821109d..0917bc500023 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -463,6 +463,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
int status;
rndis_init_msg_type *buf;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error:%d\n",
+ __func__, req->status);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 23a63121b78c..786fe10055da 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -395,6 +395,7 @@ static int dp_aux_rw_cmds_retry(struct mdss_dp_drv_pdata *dp,
int i;
u32 aux_cfg1_config_count;
int ret;
+ bool connected = false;
aux_cfg1_config_count = mdss_dp_phy_aux_get_config_cnt(dp,
PHY_AUX_CFG1);
@@ -404,6 +405,15 @@ retry:
do {
struct edp_cmd cmd1 = *cmd;
+ mutex_lock(&dp->attention_lock);
+ connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+
+ if (!connected) {
+ pr_err("dp cable disconnected\n");
+ break;
+ }
+
dp->aux_error_num = EDP_AUX_ERR_NONE;
pr_debug("Trying %s, iteration count: %d\n",
mdss_dp_aux_transaction_to_string(transaction),
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 4ac10ab494e5..f9a71375c207 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -2720,8 +2720,6 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
ctrl_pdata->update_phy_timing);
rc = mdss_dsi_on(pdata);
- mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
- pdata);
break;
case MDSS_EVENT_UNBLANK:
if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index abd2192997e5..2143c2bdb84b 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -156,10 +156,13 @@ struct hdmi_edid_ctrl {
};
static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
- struct msm_hdmi_mode_timing_info *timing)
+ struct msm_hdmi_mode_timing_info *timing, u32 out_format)
{
+ u32 pclk = hdmi_tx_setup_tmds_clk_rate(timing->pixel_freq,
+ out_format, false);
+
if (!timing->supported ||
- timing->pixel_freq > edid_ctrl->init_data.max_pclk_khz)
+ pclk > edid_ctrl->init_data.max_pclk_khz)
return false;
return true;
@@ -936,7 +939,8 @@ static void hdmi_edid_add_sink_y420_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 ret = hdmi_get_supported_mode(&timing,
&edid_ctrl->init_data.ds_data,
video_format);
- u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl,
+ &timing, MDP_Y_CBCR_H2V2);
struct hdmi_edid_sink_data *sink = &edid_ctrl->sink_data;
if (video_format >= HDMI_VFRMT_MAX) {
@@ -1704,7 +1708,8 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 ret = hdmi_get_supported_mode(&timing,
&edid_ctrl->init_data.ds_data,
video_format);
- u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
+ u32 supported = hdmi_edid_is_mode_supported(edid_ctrl,
+ &timing, MDP_RGBA_8888);
struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
u32 i = 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6fb32761a767..4f8c399762fe 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1797,7 +1797,8 @@ static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
{
- int ret, scm_ret = 0;
+ int ret;
+ u64 scm_ret = 0;
if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
return;
@@ -1808,7 +1809,7 @@ static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
if (ret || scm_ret)
- pr_warn("scm_restore_sec_cfg failed %d %d\n",
+ pr_warn("scm_restore_sec_cfg failed %d %llu\n",
ret, scm_ret);
__mdss_mdp_reg_access_clk_enable(mdata, false);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 0165c48a0467..54fb21a5f35d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -696,7 +696,6 @@ static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
rate = mdss_mdp_clk_fudge_factor(mixer, rate);
- rate = min(mdata->max_mdp_clk_rate, rate);
return rate;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 1035d23fe9ce..09d1dab0d180 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -1863,12 +1863,14 @@ static void __print_buf(struct seq_file *s, struct mdss_mdp_data *buf,
seq_puts(s, "\n");
}
-static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
+static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe,
+ struct msm_fb_data_type *mfd)
{
struct mdss_mdp_data *buf;
int format;
int smps[4];
int i;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%16llx play_cnt=%u xin_id=%d\n",
pipe->num, mdss_mdp_pipetype2str(pipe->type),
@@ -1923,11 +1925,14 @@ static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
seq_puts(s, "Data:\n");
+ mutex_lock(&mdp5_data->list_lock);
list_for_each_entry(buf, &pipe->buf_queue, pipe_list)
__print_buf(s, buf, false);
+ mutex_unlock(&mdp5_data->list_lock);
}
-static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
+static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer,
+ struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
int i, cnt = 0;
@@ -1944,7 +1949,7 @@ static void __dump_mixer(struct seq_file *s, struct mdss_mdp_mixer *mixer)
for (i = 0; i < ARRAY_SIZE(mixer->stage_pipe); i++) {
pipe = mixer->stage_pipe[i];
if (pipe) {
- __dump_pipe(s, pipe);
+ __dump_pipe(s, pipe, mfd);
cnt++;
}
}
@@ -2019,8 +2024,8 @@ static void __dump_ctl(struct seq_file *s, struct mdss_mdp_ctl *ctl)
seq_printf(s, "Play Count=%u Underrun Count=%u\n",
ctl->play_cnt, ctl->underrun_cnt);
- __dump_mixer(s, ctl->mixer_left);
- __dump_mixer(s, ctl->mixer_right);
+ __dump_mixer(s, ctl->mixer_left, ctl->mfd);
+ __dump_mixer(s, ctl->mixer_right, ctl->mfd);
}
static int __dump_mdp(struct seq_file *s, struct mdss_data_type *mdata)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index c31c0149866a..bdf6705ef597 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -643,8 +643,13 @@ static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
den_polarity = 0;
- hsync_polarity = p->h_polarity;
- vsync_polarity = p->v_polarity;
+ if (ctx->intf_type == MDSS_INTF_HDMI) {
+ hsync_polarity = p->h_polarity;
+ vsync_polarity = p->v_polarity;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(vsync_polarity << 1) | /* VSYNC Polarity */
(hsync_polarity << 0); /* HSYNC Polarity */
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
index 61b0518d3ee6..2028222748c3 100644
--- a/drivers/video/fbdev/msm/mdss_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -373,6 +373,15 @@ static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
return false;
}
+static void mdss_rotator_install_fence_fd(struct mdss_rot_entry_container *req)
+{
+ int i = 0;
+
+ for (i = 0; i < req->count; i++)
+ sync_fence_install(req->entries[i].output_fence,
+ req->entries[i].output_fence_fd);
+}
+
static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
{
int ret = 0, fd;
@@ -411,7 +420,6 @@ static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
goto get_fd_err;
}
- sync_fence_install(fence, fd);
rot_timeline->next_value++;
mutex_unlock(&rot_timeline->lock);
@@ -2240,6 +2248,7 @@ static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
goto handle_request_err1;
}
+ mdss_rotator_install_fence_fd(req);
mdss_rotator_queue_request(mgr, private, req);
mutex_unlock(&mgr->lock);
@@ -2400,6 +2409,7 @@ static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
goto handle_request32_err1;
}
+ mdss_rotator_install_fence_fd(req);
mdss_rotator_queue_request(mgr, private, req);
mutex_unlock(&mgr->lock);