summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/msm_pcie.txt4
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt4
-rw-r--r--Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt21
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi4
-rw-r--r--drivers/char/adsprpc.c3
-rw-r--r--drivers/crypto/msm/ice.c5
-rw-r--r--drivers/crypto/msm/qce50.c20
-rw-r--r--drivers/crypto/msm/qce50.h4
-rw-r--r--drivers/crypto/msm/qcrypto.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c26
-rw-r--r--drivers/gpu/msm/adreno.c4
-rw-r--r--drivers/gpu/msm/adreno.h47
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c6
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c6
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c8
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c4
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c7
-rw-r--r--drivers/gpu/msm/adreno_trace.h25
-rw-r--r--drivers/gpu/msm/kgsl_pool.c22
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c17
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h18
-rw-r--r--drivers/input/touchscreen/st/fts.c19
-rw-r--r--drivers/iommu/arm-smmu.c3
-rw-r--r--drivers/media/platform/msm/ais/fd/msm_fd_dev.c6
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/ais/isp/msm_isp47.c4
-rw-r--r--drivers/misc/qseecom.c5
-rw-r--r--drivers/mmc/card/block.c9
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c9
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c138
-rw-r--r--drivers/net/wireless/wcnss/wcnss_vreg.c32
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c154
-rw-r--r--drivers/pci/host/pci-msm.c33
-rw-r--r--drivers/perf/arm_pmu.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c62
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c466
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c14
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c2
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c4
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c5
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c4
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c3
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom/fg-core.h36
-rw-r--r--drivers/power/supply/qcom/fg-util.c34
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c817
-rw-r--r--drivers/power/supply/qcom/qpnp-qnovo.c373
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c3
-rw-r--r--drivers/power/supply/qcom/smb-lib.c24
-rw-r--r--drivers/power/supply/qcom/smb-lib.h1
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c321
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.h2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c7
-rw-r--r--drivers/soc/qcom/jtagv8-etm.c13
-rw-r--r--drivers/soc/qcom/jtagv8.c7
-rw-r--r--drivers/soc/qcom/scm.c35
-rw-r--r--drivers/soc/qcom/secure_buffer.c5
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c24
-rw-r--r--drivers/usb/gadget/function/f_gsi.c6
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c6
-rw-r--r--drivers/usb/gadget/function/f_rndis.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c5
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/wcnss_wlan.h7
-rw-r--r--include/soc/qcom/scm.h8
-rw-r--r--include/uapi/linux/ipa_qmi_service_v01.h303
-rw-r--r--kernel/events/core.c52
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/sched/rt.c15
-rw-r--r--net/rmnet_data/rmnet_data_handlers.c10
-rw-r--r--net/wireless/nl80211.c8
-rw-r--r--sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c60
85 files changed, 2847 insertions, 707 deletions
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index d8c3a7c35465..32de5ce3da7e 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -141,6 +141,9 @@ Optional Properties:
rendering thread is running on masked CPUs.
Bit 0 is for CPU-0, bit 1 is for CPU-1...
+- qcom,l2pc-update-queue:
+ Disables L2PC on masked CPUs at queue time when it's true.
+
- qcom,snapshot-size:
Specify the size of snapshot in bytes. This will override
snapshot size defined in the driver code.
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index fc019bda50a7..bf3ad8a71c26 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -97,6 +97,9 @@ Optional Properties:
and assign for each endpoint.
- qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become
stable after power on, before de-assert the PERST to the endpoint.
+ - qcom,switch-latency: The time (unit: ms) to wait for the PCIe endpoint's link
+ training with switch downstream port after the link between switch upstream
+ port and RC is up.
- qcom,wr-halt-size: With base 2, this exponent determines the size of the
data that PCIe core will halt on for each write transaction.
- qcom,cpl-timeout: Completion timeout value. This value specifies the time range
@@ -276,6 +279,7 @@ Example:
qcom,smmu-exist;
qcom,smmu-sid-base = <0x1480>;
qcom,ep-latency = <100>;
+ qcom,switch-latency = <100>;
qcom,wr-halt-size = <0xa>; /* 1KB */
qcom,cpl-timeout = <0x2>;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
index 3527779ef93c..f01eae10bf4f 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt
@@ -178,6 +178,10 @@ Charger specific properties:
Definition: WD bark-timeout in seconds. The possible values are
16, 32, 64, 128. If not defined it defaults to 64.
+- qcom,sw-jeita-enable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables sw compensation for jeita
=============================================
Second Level Nodes - SMB2 Charger Peripherals
diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
index 061c1d16ad24..77d6bf06ee26 100644
--- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
+++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt
@@ -12,7 +12,7 @@ Required properties:
"riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base",
"pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source",
"wlan_tx_status", "alarms_txctl", "alarms_tactl",
- "pronto_mcu_base".
+ "pronto_mcu_base", "pronto_qfuse".
- interupts: Pronto to Apps interrupts for tx done and rx pending.
- qcom,pronto-vddmx-supply: regulator to supply pronto pll.
- qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM
@@ -29,7 +29,7 @@ Required properties:
- qcom,wcnss-vadc: VADC handle for battery voltage notification APIs.
- pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt
- pinctrl-names : Names corresponding to the numbered pinctrl states
-- clocks: from common clock binding: handle to xo and rf_clk clocks.
+- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks.
- clock-names: Names of all the clocks that are accessed by the subsystem
- qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage
for iris and pronto regulators in milli-volts.
@@ -39,11 +39,16 @@ iris and pronto regulators in micro-amps.
Optional properties:
- qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect
should be performed during boot up.
+- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz.
+If wcnss_snoc clock is specified in the list of clocks, this property needs
+to be set to make it functional.
- qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value,
using a smaller count for this buffer will reduce the memory usage.
- qcom,is-pronto-v3: boolean flag to determine the pronto hardware version
in use. subsequently correct workqueue will be used by DXE engine to push frames
in TX data path.
+- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band
+ capability.
- qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature
support for pronto hardware.
- qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time,
@@ -59,6 +64,8 @@ support for pronto hardware.
to use for VBATT feature.
- qcom,has-a2xb-split-reg: boolean flag to determine A2xb split timeout limit
register is available or not.
+- qcom,wcn-external-gpio-support: boolean flag to determine 3.3v gpio support
+for pronto hardware for a target.
Example:
@@ -80,6 +87,7 @@ Example:
gpios = <&msmgpio 36 0>, <&msmgpio 37 0>, <&msmgpio 38 0>,
<&msmgpio 39 0>, <&msmgpio 40 0>;
+ qcom,wcn-external-gpio-support;
qcom,has-48mhz-xo;
qcom,is-pronto-vt;
qcom,wlan-rx-buff-count = <512>;
@@ -94,7 +102,12 @@ Example:
clocks = <&clock_rpm clk_xo_wlan_clk>,
<&clock_rpm clk_rf_clk2>,
<&clock_debug clk_gcc_debug_mux>,
- <&clock_gcc clk_wcnss_m_clk>;
- clock-names = "xo", "rf_clk", "measure", "wcnss_debug";
+ <&clock_gcc clk_wcnss_m_clk>,
+ <&clock_gcc clk_snoc_wcnss_a_clk>;
+
+ clock-names = "xo", "rf_clk", "measure", "wcnss_debug",
+ "snoc_wcnss";
+
+ qcom,snoc-wcnss-clock-freq = <200000000>;
qcom,wcnss-pm = <11 21 1200 1 1 6>;
};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
index 51a225b82f47..ff3b7b80c449 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,6 +54,8 @@
qcom,ulps-enabled;
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-pan-physical-width-dimension = <68>;
+ qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
index 02c87067f212..933746b8abe7 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,8 @@
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
qcom,mdss-dsi-tx-eot-append;
+ qcom,mdss-pan-physical-width-dimension = <68>;
+ qcom,mdss-pan-physical-height-dimension = <121>;
qcom,adjust-timer-wakeup-ms = <1>;
qcom,mdss-dsi-on-command = [
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 781fa96726e2..8560a2b731b5 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2500,6 +2500,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
kref_init(&me->channel[cid].kref);
pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name,
MAJOR(me->dev_no), cid);
+ err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64);
+ if (err)
+ pr_info("adsprpc: initial intent failed for %d\n", cid);
if (cid == 0 && me->channel[cid].ssrcount !=
me->channel[cid].prevssrcount) {
if (fastrpc_mmap_remove_ssr(fl))
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 49165daa807f..490f8d9ddb9f 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -975,7 +975,8 @@ static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
/* scm command buffer structure */
struct qcom_scm_cmd_buf {
@@ -1001,7 +1002,7 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
cbuf.device_id = ICE_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- pr_err("%s: failed, ret %d scm_ret %d\n",
+ pr_err("%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 4ab8ca143f6c..b44f926a6ba0 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -2155,6 +2155,10 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info)
pce_sps_data = &preq_info->ce_sps;
qce_callback = preq_info->qce_cb;
areq = (struct ahash_request *) preq_info->areq;
+ if (!areq) {
+ pr_err("sha operation error. areq is NULL\n");
+ return -ENXIO;
+ }
qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
DMA_TO_DEVICE);
memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
@@ -2970,7 +2974,7 @@ static inline int qce_alloc_req_info(struct qce_device *pce_dev)
request_index++;
if (request_index >= MAX_QCE_BAM_REQ)
request_index = 0;
- if (xchg(&pce_dev->ce_request_info[request_index].
+ if (atomic_xchg(&pce_dev->ce_request_info[request_index].
in_use, true) == false) {
pce_dev->ce_request_index = request_index;
return request_index;
@@ -2986,7 +2990,8 @@ static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
bool is_complete)
{
pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
- if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+ if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
+ false) == true) {
if (req_info < MAX_QCE_BAM_REQ && is_complete)
atomic_dec(&pce_dev->no_of_queued_req);
} else
@@ -4610,7 +4615,7 @@ static int qce_dummy_req(struct qce_device *pce_dev)
{
int ret = 0;
- if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+ if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
in_use, true) == false))
return -EBUSY;
ret = qce_process_sha_req(pce_dev, NULL);
@@ -5969,7 +5974,7 @@ void *qce_open(struct platform_device *pdev, int *rc)
}
for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
- pce_dev->ce_request_info[i].in_use = false;
+ atomic_set(&pce_dev->ce_request_info[i].in_use, false);
pce_dev->ce_request_index = 0;
pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
@@ -6133,12 +6138,13 @@ EXPORT_SYMBOL(qce_hw_support);
void qce_dump_req(void *handle)
{
int i;
+ bool req_in_use;
struct qce_device *pce_dev = (struct qce_device *)handle;
for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
- pr_info("qce_dump_req %d %d\n", i,
- pce_dev->ce_request_info[i].in_use);
- if (pce_dev->ce_request_info[i].in_use == true)
+ req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
+ pr_info("qce_dump_req %d %d\n", i, req_in_use);
+ if (req_in_use == true)
_qce_dump_descr_fifos(pce_dev, i);
}
}
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
index 6dba3664ff08..ab0d21da72c5 100644
--- a/drivers/crypto/msm/qce50.h
+++ b/drivers/crypto/msm/qce50.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -214,7 +214,7 @@ struct ce_sps_data {
};
struct ce_request_info {
- bool in_use;
+ atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index 5b364f053b1b..f38fc422b35e 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -3972,6 +3972,7 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
if (len <= SHA1_BLOCK_SIZE) {
memcpy(&sha_ctx->authkey[0], key, len);
@@ -3979,16 +3980,19 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA1;
sha_ctx->diglen = SHA1_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA1 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int len)
{
struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+ int ret = 0;
memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
if (len <= SHA256_BLOCK_SIZE) {
@@ -3997,11 +4001,13 @@ static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
} else {
sha_ctx->alg = QCE_HASH_SHA256;
sha_ctx->diglen = SHA256_DIGEST_SIZE;
- _sha_hmac_setkey(tfm, key, len);
+ ret = _sha_hmac_setkey(tfm, key, len);
+ if (ret)
+ pr_err("SHA256 hmac setkey failed\n");
sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
}
- return 0;
+ return ret;
}
static int _sha_hmac_init_ihash(struct ahash_request *req,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index e637237fa811..c30b65785ab6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -70,6 +70,8 @@ struct a5xx_gpu {
* PREEMPT_NONE - no preemption in progress. Next state START.
* PREEMPT_START - The trigger is evaulating if preemption is possible. Next
* states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
* PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
* states: FAULTED, PENDING
* PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
@@ -81,6 +83,7 @@ struct a5xx_gpu {
enum preempt_state {
PREEMPT_NONE = 0,
PREEMPT_START,
+ PREEMPT_ABORT,
PREEMPT_TRIGGERED,
PREEMPT_FAULTED,
PREEMPT_PENDING,
@@ -184,7 +187,10 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
/* Return true if we are in a preempt state */
static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
{
- return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
}
int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 6ab3ba076c2f..44d4ca35fa09 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -128,9 +128,20 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
* one do nothing except to update the wptr to the latest and greatest
*/
if (!ring || (a5xx_gpu->cur_ring == ring)) {
- update_wptr(gpu, ring);
-
- /* Set the state back to NONE */
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
return;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 705ec2d0dfa2..a84d65195363 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1260,12 +1260,6 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = sde_splash_parse_dt(dev);
- if (rc) {
- SDE_ERROR("parse dt for splash info failed: %d\n", rc);
- goto power_error;
- }
-
/*
* Read the DISP_INTF_SEL register to check
* whether early display is enabled in LK.
@@ -1277,15 +1271,23 @@ static int sde_kms_hw_init(struct msm_kms *kms)
}
/*
- * when LK has enabled early display, sde_splash_init should be
- * called first. This function will first do bandwidth voting job
- * because display hardware is accessing AHB data bus, otherwise
- * device reboot will happen. Second is to check if the memory is
- * reserved.
+ * when LK has enabled early display, sde_splash_parse_dt and
+ * sde_splash_init must be called. The first function is to parse the
+ * mandatory memory node for splash function, and the second function
+ * will first do bandwidth voting job, because display hardware is now
+ * accessing AHB data bus, otherwise device reboot will happen, and then
+ * to check if the memory is reserved.
*/
sinfo = &sde_kms->splash_info;
- if (sinfo->handoff)
+ if (sinfo->handoff) {
+ rc = sde_splash_parse_dt(dev);
+ if (rc) {
+ SDE_ERROR("parse dt for splash info failed: %d\n", rc);
+ goto power_error;
+ }
+
sde_splash_init(&priv->phandle, kms);
+ }
for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
u32 vbif_idx = sde_kms->catalog->vbif[i].id;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 6521ec01413e..afa71116c691 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1309,6 +1309,10 @@ static int _adreno_start(struct adreno_device *adreno_dev)
/* make sure ADRENO_DEVICE_STARTED is not set here */
BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv));
+ /* disallow l2pc during wake up to improve GPU wake up time */
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_WAKEUP_TIMEOUT);
+
pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
pmqos_wakeup_vote);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 218d08e6dfc3..4a0acdcf8844 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -568,6 +568,8 @@ enum adreno_regs {
ADRENO_REG_RBBM_RBBM_CTL,
ADRENO_REG_UCHE_INVALIDATE0,
ADRENO_REG_UCHE_INVALIDATE1,
+ ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
ADRENO_REG_RBBM_SECVID_TRUST_CONTROL,
@@ -1508,21 +1510,60 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
spin_unlock_irqrestore(&rb->preempt_lock, flags);
}
+static inline bool is_power_counter_overflow(struct adreno_device *adreno_dev,
+ unsigned int reg, unsigned int prev_val, unsigned int *perfctr_pwr_hi)
+{
+ unsigned int val;
+ bool ret = false;
+
+ /*
+ * If prev_val is zero, it is first read after perf counter reset.
+ * So set perfctr_pwr_hi register to zero.
+ */
+ if (prev_val == 0) {
+ *perfctr_pwr_hi = 0;
+ return ret;
+ }
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val);
+ if (val != *perfctr_pwr_hi) {
+ *perfctr_pwr_hi = val;
+ ret = true;
+ }
+ return ret;
+}
+
static inline unsigned int counter_delta(struct kgsl_device *device,
unsigned int reg, unsigned int *counter)
{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int val;
unsigned int ret = 0;
+ bool overflow = true;
+ static unsigned int perfctr_pwr_hi;
/* Read the value */
kgsl_regread(device, reg, &val);
+ if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg
+ (adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO))
+ overflow = is_power_counter_overflow(adreno_dev, reg,
+ *counter, &perfctr_pwr_hi);
+
/* Return 0 for the first read */
if (*counter != 0) {
- if (val < *counter)
- ret = (0xFFFFFFFF - *counter) + val;
- else
+ if (val >= *counter) {
ret = val - *counter;
+ } else if (overflow == true) {
+ ret = (0xFFFFFFFF - *counter) + val;
+ } else {
+ /*
+ * Since KGSL got abnormal value from the counter,
+ * We will drop the value from being accumulated.
+ */
+ pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n",
+ val, *counter, reg);
+ return 0;
+ }
}
*counter = val;
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 423071811b43..0e3e5b64bdc7 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1530,6 +1530,10 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
A3XX_UCHE_CACHE_INVALIDATE0_REG),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1,
A3XX_UCHE_CACHE_INVALIDATE1_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A3XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A3XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index 5ca04e522270..6170cc263e4a 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -806,6 +806,10 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A4XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A4XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 0715022be6e3..f4dfae1a115f 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -715,6 +715,10 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
if (ret)
goto err;
+ /* Integer overflow check for cmd_size */
+ if (data[2] > (data[0] - 2))
+ goto err;
+
cmds = data + data[2] + 3;
cmd_size = data[0] - data[2] - 2;
@@ -3069,6 +3073,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2,
A5XX_RBBM_BLOCK_SW_RESET_CMD2),
ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO,
+ A5XX_RBBM_PERFCTR_RBBM_0_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI,
+ A5XX_RBBM_PERFCTR_RBBM_0_HI),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO,
A5XX_RBBM_PERFCTR_LOAD_VALUE_LO),
ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI,
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 8902c3175c79..1a94e71f5c1d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1460,7 +1460,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
spin_unlock(&drawctxt->lock);
- kgsl_pwrctrl_update_l2pc(&adreno_dev->dev);
+ if (device->pwrctrl.l2pc_update_queue)
+ kgsl_pwrctrl_update_l2pc(&adreno_dev->dev,
+ KGSL_L2PC_QUEUE_TIMEOUT);
/* Add the context to the dispatcher pending list */
dispatcher_queue_context(adreno_dev, drawctxt);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d79d9613043f..ddc53edce3c1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -26,6 +26,7 @@
#include "adreno_iommu.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
#include "a3xx_reg.h"
#include "adreno_a5xx.h"
@@ -58,6 +59,7 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb)
}
static void adreno_get_submit_time(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
struct adreno_submit_time *time)
{
unsigned long flags;
@@ -87,6 +89,9 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev,
} else
time->ticks = 0;
+ /* Trace the GPU time to create a mapping to ftrace time */
+ trace_adreno_cmdbatch_sync(rb->drawctxt_active, time->ticks);
+
/* Get the kernel clock for time since boot */
time->ktime = local_clock();
@@ -128,7 +133,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
_cff_write_ringbuffer(rb);
if (time != NULL)
- adreno_get_submit_time(adreno_dev, time);
+ adreno_get_submit_time(adreno_dev, rb, time);
adreno_ringbuffer_wptr(adreno_dev, rb);
}
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 16ca0980cfbe..74c4c4e6e1fa 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -148,6 +148,29 @@ TRACE_EVENT(adreno_cmdbatch_retired,
)
);
+TRACE_EVENT(adreno_cmdbatch_sync,
+ TP_PROTO(struct adreno_context *drawctxt,
+ uint64_t ticks),
+ TP_ARGS(drawctxt, ticks),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(uint64_t, ticks)
+ __field(int, prio)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ __entry->timestamp = drawctxt->timestamp;
+ __entry->ticks = ticks;
+ __entry->prio = drawctxt->base.priority;
+ ),
+ TP_printk(
+ "ctx=%u ctx_prio=%d ts=%u ticks=%lld",
+ __entry->id, __entry->prio, __entry->timestamp,
+ __entry->ticks
+ )
+);
+
TRACE_EVENT(adreno_cmdbatch_fault,
TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
TP_ARGS(cmdobj, fault),
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index c31a85b07447..685ce3ea968b 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order)
/* Map the page into kernel and zero it out */
static void
-_kgsl_pool_zero_page(struct page *p, unsigned int pool_order)
+_kgsl_pool_zero_page(struct page *p)
{
- int i;
-
- for (i = 0; i < (1 << pool_order); i++) {
- struct page *page = nth_page(p, i);
- void *addr = kmap_atomic(page);
+ void *addr = kmap_atomic(p);
- memset(addr, 0, PAGE_SIZE);
- dmac_flush_range(addr, addr + PAGE_SIZE);
- kunmap_atomic(addr);
- }
+ memset(addr, 0, PAGE_SIZE);
+ dmac_flush_range(addr, addr + PAGE_SIZE);
+ kunmap_atomic(addr);
}
/* Add a page to specified pool */
static void
_kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p)
{
- _kgsl_pool_zero_page(p, pool->pool_order);
-
spin_lock(&pool->list_lock);
list_add_tail(&p->lru, &pool->page_list);
pool->page_count++;
@@ -329,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
- _kgsl_pool_zero_page(page, order);
goto done;
}
@@ -349,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
page = alloc_pages(gfp_mask, order);
if (page == NULL)
return -ENOMEM;
- _kgsl_pool_zero_page(page, order);
goto done;
}
}
@@ -379,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages,
} else
return -ENOMEM;
}
-
- _kgsl_pool_zero_page(page, order);
}
done:
for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) {
p = nth_page(page, j);
+ _kgsl_pool_zero_page(p);
pages[pcount] = p;
pcount++;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index ad8b0131bb46..8c998a5d791b 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -43,13 +43,6 @@
#define DEFAULT_BUS_P 25
-/*
- * The effective duration of qos request in usecs. After
- * timeout, qos request is cancelled automatically.
- * Kept 80ms default, inline with default GPU idle time.
- */
-#define KGSL_L2PC_CPU_TIMEOUT (80 * 1000)
-
/* Order deeply matters here because reasons. New entries go on the end */
static const char * const clocks[] = {
"src_clk",
@@ -520,12 +513,14 @@ EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint);
/**
* kgsl_pwrctrl_update_l2pc() - Update existing qos request
* @device: Pointer to the kgsl_device struct
+ * @timeout_us: the effective duration of qos request in usecs.
*
* Updates an existing qos request to avoid L2PC on the
* CPUs (which are selected through dtsi) on which GPU
* thread is running. This would help for performance.
*/
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us)
{
int cpu;
@@ -539,7 +534,7 @@ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device)
pm_qos_update_request_timeout(
&device->pwrctrl.l2pc_cpus_qos,
device->pwrctrl.pm_qos_cpu_mask_latency,
- KGSL_L2PC_CPU_TIMEOUT);
+ timeout_us);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc);
@@ -2201,6 +2196,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask",
&pwr->l2pc_cpus_mask);
+ pwr->l2pc_update_queue = of_property_read_bool(
+ device->pdev->dev.of_node,
+ "qcom,l2pc-update-queue");
+
pm_runtime_enable(&pdev->dev);
ocmem_bus_node = of_find_node_by_name(
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 42f918b80fcd..5c0071544f60 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -51,6 +51,19 @@
#define KGSL_PWR_DEL_LIMIT 1
#define KGSL_PWR_SET_LIMIT 2
+/*
+ * The effective duration of qos request in usecs at queue time.
+ * After timeout, qos request is cancelled automatically.
+ * Kept 80ms default, inline with default GPU idle time.
+ */
+#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000)
+
+/*
+ * The effective duration of qos request in usecs at wakeup time.
+ * After timeout, qos request is cancelled automatically.
+ */
+#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000)
+
enum kgsl_pwrctrl_timer_type {
KGSL_PWR_IDLE_TIMER,
};
@@ -128,6 +141,7 @@ struct kgsl_regulator {
* @irq_name - resource name for the IRQ
* @clk_stats - structure of clock statistics
* @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs
+ * @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time
* @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs
* @pm_qos_req_dma - the power management quality of service structure
* @pm_qos_active_latency - allowed CPU latency in microseconds when active
@@ -183,6 +197,7 @@ struct kgsl_pwrctrl {
const char *irq_name;
struct kgsl_clk_stats clk_stats;
unsigned int l2pc_cpus_mask;
+ bool l2pc_update_queue;
struct pm_qos_request l2pc_cpus_qos;
struct pm_qos_request pm_qos_req_dma;
unsigned int pm_qos_active_latency;
@@ -249,5 +264,6 @@ int kgsl_active_count_wait(struct kgsl_device *device, int count);
void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy);
void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
struct kgsl_pwr_constraint *pwrc, uint32_t id);
-void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device);
+void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
+ unsigned long timeout_us);
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 78bdd24af28b..08bfb83a9447 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -1003,7 +1003,10 @@ static unsigned char *fts_status_event_handler(
case FTS_WATER_MODE_ON:
case FTS_WATER_MODE_OFF:
default:
- logError(1, "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", tag, __func__, event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]);
+ logError(0,
+ "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ tag, __func__, event[0], event[1], event[2],
+ event[3], event[4], event[5], event[6], event[7]);
break;
}
@@ -1755,8 +1758,6 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va
info->resume_bit = 1;
- fts_system_reset();
-
fts_mode_handler(info, 0);
info->sensor_sleep = false;
@@ -1959,9 +1960,9 @@ static int parse_dt(struct device *dev, struct fts_i2c_platform_data *bdata)
bdata->bus_reg_name = name;
logError(0, "%s bus_reg_name = %s\n", tag, name);
- if (of_property_read_bool(np, "st, reset-gpio")) {
+ if (of_property_read_bool(np, "st,reset-gpio")) {
bdata->reset_gpio = of_get_named_gpio_flags(np,
- "st, reset-gpio", 0, NULL);
+ "st,reset-gpio", 0, NULL);
logError(0, "%s reset_gpio =%d\n", tag, bdata->reset_gpio);
} else {
bdata->reset_gpio = GPIO_NOT_DEFINED;
@@ -2210,7 +2211,13 @@ static int fts_probe(struct i2c_client *client,
}
#endif
- queue_delayed_work(info->fwu_workqueue, &info->fwu_work, msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ /*if wanna auto-update FW when probe,
+ * please don't comment the following code
+ */
+
+ /* queue_delayed_work(info->fwu_workqueue, &info->fwu_work,
+ * msecs_to_jiffies(EXP_FN_WORK_DELAY_MS));
+ */
logError(1, "%s Probe Finished!\n", tag);
return OK;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c6f74b149706..ce18a512b76a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1727,7 +1727,8 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!arm_smmu_is_static_cb(smmu))
return 0;
diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
index 420083f019cf..4024748e6afa 100644
--- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c
@@ -1053,14 +1053,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
a->value = ctx->format.size->work_size;
break;
case V4L2_CID_FD_WORK_MEMORY_FD:
+ mutex_lock(&ctx->fd_device->recovery_lock);
if (ctx->work_buf.fd != -1)
msm_fd_hw_unmap_buffer(&ctx->work_buf);
if (a->value >= 0) {
ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
a->value, &ctx->work_buf);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&ctx->fd_device->recovery_lock);
return ret;
+ }
}
+ mutex_unlock(&ctx->fd_device->recovery_lock);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h
index 72a76d178aa8..86974eeb4a32 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp.h
+++ b/drivers/media/platform/msm/ais/isp/msm_isp.h
@@ -355,6 +355,7 @@ struct msm_vfe_hardware_info {
uint32_t dmi_reg_offset;
uint32_t min_ab;
uint32_t min_ib;
+ uint32_t regulator_num;
const char *regulator_names[];
};
diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c
index d63282f80aca..d33dc758aef9 100644
--- a/drivers/media/platform/msm/ais/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c
@@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev)
int rc = 0;
int i;
- vfe_dev->vfe_num_regulators =
- sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *);
+ vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num;
vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) *
vfe_dev->vfe_num_regulators, GFP_KERNEL);
@@ -2811,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = {
.dmi_reg_offset = 0xC2C,
.axi_hw_info = &msm_vfe47_axi_hw_info,
.stats_hw_info = &msm_vfe47_stats_hw_info,
+ .regulator_num = 3,
.regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"},
};
EXPORT_SYMBOL(vfe47_hw_info);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7bc5b5ad1122..cf897947fff2 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -8434,9 +8434,10 @@ out:
*/
static int qseecom_check_whitelist_feature(void)
{
- int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+ u64 version = 0;
+ int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version);
- return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+ return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0));
}
static int qseecom_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 01e5502917f7..d39b4056c169 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1173,7 +1173,7 @@ idata_free:
cmd_done:
mmc_blk_put(md);
- if (card->cmdq_init)
+ if (card && card->cmdq_init)
wake_up(&card->host->cmdq_ctx.wait);
return err;
}
@@ -4623,6 +4623,10 @@ static int mmc_blk_probe(struct mmc_card *card)
dev_set_drvdata(&card->dev, md);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
if (mmc_add_disk(md))
goto out;
@@ -4666,6 +4670,9 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index 60b7a64c2edb..de14dcc6f4ed 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -942,10 +942,13 @@ net_dev_reg_fail:
netif_napi_del(&(rmnet_mhi_ptr->napi));
free_netdev(rmnet_mhi_ptr->dev);
net_dev_alloc_fail:
- mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
- rmnet_mhi_ptr->dev = NULL;
+ if (rmnet_mhi_ptr->rx_client_handle) {
+ mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
+ rmnet_mhi_ptr->dev = NULL;
+ }
mhi_rx_chan_start_fail:
- mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
+ if (rmnet_mhi_ptr->tx_client_handle)
+ mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
mhi_tx_chan_start_fail:
rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited ret %d.\n", ret);
return ret;
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 22c59d8c3c45..d150c2dd0f0f 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -114,6 +114,8 @@
#define WLAN_VREG_SP2T_MIN 2700000
#define POWER_ON_DELAY 2
+#define WLAN_VREG_IO_DELAY_MIN 100
+#define WLAN_VREG_IO_DELAY_MAX 1000
#define WLAN_ENABLE_DELAY 10
#define WLAN_RECOVERY_DELAY 1
#define PCIE_ENABLE_DELAY 100
@@ -134,7 +136,6 @@ static DEFINE_SPINLOCK(pci_link_down_lock);
#define FW_IMAGE_MISSION (0x02)
#define FW_IMAGE_BDATA (0x03)
#define FW_IMAGE_PRINT (0x04)
-#define FW_SETUP_DELAY 2000
#define SEG_METADATA (0x01)
#define SEG_NON_PAGED (0x02)
@@ -274,10 +275,8 @@ static struct cnss_data {
u32 fw_dma_size;
u32 fw_seg_count;
struct segment_memory fw_seg_mem[MAX_NUM_OF_SEGMENTS];
- atomic_t fw_store_in_progress;
/* Firmware setup complete lock */
struct mutex fw_setup_stat_lock;
- struct completion fw_setup_complete;
void *bdata_cpu;
dma_addr_t bdata_dma;
u32 bdata_dma_size;
@@ -307,13 +306,6 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
}
}
- ret = regulator_enable(vreg_info->wlan_reg);
- if (ret) {
- pr_err("%s: regulator enable failed for WLAN power\n",
- __func__);
- goto error_enable;
- }
-
if (vreg_info->wlan_reg_io) {
ret = regulator_enable(vreg_info->wlan_reg_io);
if (ret) {
@@ -321,6 +313,8 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
__func__);
goto error_enable_reg_io;
}
+
+ usleep_range(WLAN_VREG_IO_DELAY_MIN, WLAN_VREG_IO_DELAY_MAX);
}
if (vreg_info->wlan_reg_xtal_aon) {
@@ -341,6 +335,13 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info)
}
}
+ ret = regulator_enable(vreg_info->wlan_reg);
+ if (ret) {
+ pr_err("%s: regulator enable failed for WLAN power\n",
+ __func__);
+ goto error_enable;
+ }
+
if (vreg_info->wlan_reg_sp2t) {
ret = regulator_enable(vreg_info->wlan_reg_sp2t);
if (ret) {
@@ -377,6 +378,8 @@ error_enable_ant_switch:
if (vreg_info->wlan_reg_sp2t)
regulator_disable(vreg_info->wlan_reg_sp2t);
error_enable_reg_sp2t:
+ regulator_disable(vreg_info->wlan_reg);
+error_enable:
if (vreg_info->wlan_reg_xtal)
regulator_disable(vreg_info->wlan_reg_xtal);
error_enable_reg_xtal:
@@ -386,8 +389,6 @@ error_enable_reg_xtal_aon:
if (vreg_info->wlan_reg_io)
regulator_disable(vreg_info->wlan_reg_io);
error_enable_reg_io:
- regulator_disable(vreg_info->wlan_reg);
-error_enable:
if (vreg_info->wlan_reg_core)
regulator_disable(vreg_info->wlan_reg_core);
error_enable_reg_core:
@@ -425,6 +426,13 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info)
}
}
+ ret = regulator_disable(vreg_info->wlan_reg);
+ if (ret) {
+ pr_err("%s: regulator disable failed for WLAN power\n",
+ __func__);
+ goto error_disable;
+ }
+
if (vreg_info->wlan_reg_xtal) {
ret = regulator_disable(vreg_info->wlan_reg_xtal);
if (ret) {
@@ -452,13 +460,6 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info)
}
}
- ret = regulator_disable(vreg_info->wlan_reg);
- if (ret) {
- pr_err("%s: regulator disable failed for WLAN power\n",
- __func__);
- goto error_disable;
- }
-
if (vreg_info->wlan_reg_core) {
ret = regulator_disable(vreg_info->wlan_reg_core);
if (ret) {
@@ -718,24 +719,6 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
}
}
- vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME);
-
- if (IS_ERR(vreg_info->wlan_reg)) {
- if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER)
- pr_err("%s: vreg probe defer\n", __func__);
- else
- pr_err("%s: vreg regulator get failed\n", __func__);
- ret = PTR_ERR(vreg_info->wlan_reg);
- goto err_reg_get;
- }
-
- ret = regulator_enable(vreg_info->wlan_reg);
-
- if (ret) {
- pr_err("%s: vreg initial vote failed\n", __func__);
- goto err_reg_enable;
- }
-
if (of_get_property(pdev->dev.of_node,
WLAN_VREG_IO_NAME"-supply", NULL)) {
vreg_info->wlan_reg_io = regulator_get(&pdev->dev,
@@ -755,12 +738,33 @@ static int cnss_wlan_get_resources(struct platform_device *pdev)
__func__);
goto err_reg_io_enable;
}
+
+ usleep_range(WLAN_VREG_IO_DELAY_MIN,
+ WLAN_VREG_IO_DELAY_MAX);
}
}
if (cnss_enable_xtal_ldo(pdev))
goto err_reg_xtal_enable;
+ vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME);
+
+ if (IS_ERR(vreg_info->wlan_reg)) {
+ if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER)
+ pr_err("%s: vreg probe defer\n", __func__);
+ else
+ pr_err("%s: vreg regulator get failed\n", __func__);
+ ret = PTR_ERR(vreg_info->wlan_reg);
+ goto err_reg_get;
+ }
+
+ ret = regulator_enable(vreg_info->wlan_reg);
+
+ if (ret) {
+ pr_err("%s: vreg initial vote failed\n", __func__);
+ goto err_reg_enable;
+ }
+
if (of_get_property(pdev->dev.of_node,
WLAN_VREG_SP2T_NAME"-supply", NULL)) {
vreg_info->wlan_reg_sp2t =
@@ -929,7 +933,11 @@ err_reg_sp2t_enable:
err_reg_sp2t_set:
if (vreg_info->wlan_reg_sp2t)
regulator_put(vreg_info->wlan_reg_sp2t);
+ regulator_disable(vreg_info->wlan_reg);
+err_reg_enable:
+ regulator_put(vreg_info->wlan_reg);
+err_reg_get:
cnss_disable_xtal_ldo(pdev);
err_reg_xtal_enable:
@@ -940,12 +948,6 @@ err_reg_io_enable:
err_reg_io_set:
if (vreg_info->wlan_reg_io)
regulator_put(vreg_info->wlan_reg_io);
- regulator_disable(vreg_info->wlan_reg);
-
-err_reg_enable:
- regulator_put(vreg_info->wlan_reg);
-
-err_reg_get:
if (vreg_info->wlan_reg_core)
regulator_disable(vreg_info->wlan_reg_core);
@@ -975,13 +977,13 @@ static void cnss_wlan_release_resources(void)
regulator_put(vreg_info->ant_switch);
if (vreg_info->wlan_reg_sp2t)
regulator_put(vreg_info->wlan_reg_sp2t);
+ regulator_put(vreg_info->wlan_reg);
if (vreg_info->wlan_reg_xtal)
regulator_put(vreg_info->wlan_reg_xtal);
if (vreg_info->wlan_reg_xtal_aon)
regulator_put(vreg_info->wlan_reg_xtal_aon);
if (vreg_info->wlan_reg_io)
regulator_put(vreg_info->wlan_reg_io);
- regulator_put(vreg_info->wlan_reg);
if (vreg_info->wlan_reg_core)
regulator_put(vreg_info->wlan_reg_core);
vreg_info->state = VREG_OFF;
@@ -1374,15 +1376,6 @@ int cnss_get_fw_image(struct image_desc_info *image_desc_info)
!penv->fw_seg_count || !penv->bdata_seg_count)
return -EINVAL;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
image_desc_info->fw_addr = penv->fw_dma;
image_desc_info->fw_size = penv->fw_dma_size;
@@ -1627,7 +1620,9 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev,
goto err_pcie_suspend;
}
+ mutex_lock(&penv->fw_setup_stat_lock);
cnss_wlan_fw_mem_alloc(pdev);
+ mutex_unlock(&penv->fw_setup_stat_lock);
ret = device_create_file(&penv->pldev->dev, &dev_attr_wlan_setup);
@@ -1874,17 +1869,11 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (!penv)
return -ENODEV;
- if (atomic_read(&penv->fw_store_in_progress)) {
- pr_info("%s: Firmware setup in progress\n", __func__);
- return 0;
- }
-
- atomic_set(&penv->fw_store_in_progress, 1);
- init_completion(&penv->fw_setup_complete);
+ mutex_lock(&penv->fw_setup_stat_lock);
+ pr_info("%s: Firmware setup in progress\n", __func__);
if (kstrtoint(buf, 0, &val)) {
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
@@ -1895,8 +1884,7 @@ static ssize_t fw_image_setup_store(struct device *dev,
if (ret != 0) {
pr_err("%s: Invalid parsing of FW image files %d",
__func__, ret);
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -EINVAL;
}
penv->fw_image_setup = val;
@@ -1906,9 +1894,8 @@ static ssize_t fw_image_setup_store(struct device *dev,
penv->bmi_test = val;
}
- atomic_set(&penv->fw_store_in_progress, 0);
- complete(&penv->fw_setup_complete);
-
+ pr_info("%s: Firmware setup completed\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return count;
}
@@ -2007,16 +1994,21 @@ int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg)
{
struct codeswap_codeseg_info *cnss_seg_info = penv->cnss_seg_info;
+ mutex_lock(&penv->fw_setup_stat_lock);
if (!cnss_seg_info) {
swap_seg = NULL;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
+
if (!atomic_read(&penv->fw_available)) {
pr_debug("%s: fw is not available\n", __func__);
+ mutex_unlock(&penv->fw_setup_stat_lock);
return -ENOENT;
}
*swap_seg = *cnss_seg_info;
+ mutex_unlock(&penv->fw_setup_stat_lock);
return 0;
}
@@ -2035,15 +2027,6 @@ static void cnss_wlan_memory_expansion(void)
u_int32_t total_length = 0;
struct pci_dev *pdev;
- /* Check for firmware setup trigger by usersapce is in progress
- * and wait for complition of firmware setup.
- */
-
- if (atomic_read(&penv->fw_store_in_progress)) {
- wait_for_completion_timeout(&penv->fw_setup_complete,
- msecs_to_jiffies(FW_SETUP_DELAY));
- }
-
mutex_lock(&penv->fw_setup_stat_lock);
filename = cnss_wlan_get_evicted_data_file();
pdev = penv->pdev;
@@ -2859,6 +2842,7 @@ static int cnss_probe(struct platform_device *pdev)
penv->vreg_info.wlan_reg = NULL;
penv->vreg_info.state = VREG_OFF;
penv->pci_register_again = false;
+ mutex_init(&penv->fw_setup_stat_lock);
ret = cnss_wlan_get_resources(pdev);
if (ret)
@@ -3016,8 +3000,6 @@ skip_ramdump:
memset(phys_to_virt(0), 0, SZ_4K);
#endif
- atomic_set(&penv->fw_store_in_progress, 0);
- mutex_init(&penv->fw_setup_stat_lock);
ret = device_create_file(dev, &dev_attr_fw_image_setup);
if (ret) {
pr_err("cnss: fw_image_setup sys file creation failed\n");
diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c
index 82b90ad00f8b..d0a74744f70a 100644
--- a/drivers/net/wireless/wcnss/wcnss_vreg.c
+++ b/drivers/net/wireless/wcnss/wcnss_vreg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -417,13 +417,18 @@ static void wcnss_vregs_off(struct vregs_info regulators[], uint size,
if (regulators[i].state == VREG_NULL_CONFIG)
continue;
+ if (cfg->wcn_external_gpio_support) {
+ if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA)))
+ continue;
+ }
+
/* Remove PWM mode */
if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) {
- rc = regulator_set_optimum_mode(
- regulators[i].regulator, 0);
- if (rc < 0)
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ rc = regulator_set_load(regulators[i].regulator, 0);
+ if (rc < 0) {
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
+ }
}
/* Set voltage to lowest level */
@@ -478,7 +483,12 @@ static int wcnss_vregs_on(struct device *dev,
}
for (i = 0; i < size; i++) {
- /* Get regulator source */
+ if (cfg->wcn_external_gpio_support) {
+ if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA)))
+ continue;
+ }
+
+ /* Get regulator source */
regulators[i].regulator =
regulator_get(dev, regulators[i].name);
if (IS_ERR(regulators[i].regulator)) {
@@ -518,11 +528,11 @@ static int wcnss_vregs_on(struct device *dev,
/* Vote for PWM/PFM mode if needed */
if (voltage_level[i].uA_load && (reg_cnt > 0)) {
- rc = regulator_set_optimum_mode(regulators[i].regulator,
- voltage_level[i].uA_load);
+ rc = regulator_set_load(regulators[i].regulator,
+ voltage_level[i].uA_load);
if (rc < 0) {
- pr_err("regulator_set_optimum_mode(%s) failed (%d)\n",
- regulators[i].name, rc);
+ pr_err("regulator set load(%s) failed (%d)\n",
+ regulators[i].name, rc);
goto fail;
}
regulators[i].state |= VREG_OPTIMUM_MODE_MASK;
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 9347882fba92..e99d46ca51b0 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -36,6 +36,7 @@
#include <linux/qpnp/qpnp-adc.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_qos.h>
+#include <linux/bitops.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
@@ -56,6 +57,7 @@
#define WCNSS_PM_QOS_TIMEOUT 15000
#define IS_CAL_DATA_PRESENT 0
#define WAIT_FOR_CBC_IND 2
+#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET BIT(8)
/* module params */
#define WCNSS_CONFIG_UNSPECIFIED (-1)
@@ -119,6 +121,8 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define PRONTO_PMU_COM_CSR_OFFSET 0x1040
#define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C
+#define PRONTO_QFUSE_DUAL_BAND_OFFSET 0x0018
+
#define A2XB_CFG_OFFSET 0x00
#define A2XB_INT_SRC_OFFSET 0x0c
#define A2XB_TSTBUS_CTRL_OFFSET 0x14
@@ -381,6 +385,7 @@ static struct {
void __iomem *pronto_saw2_base;
void __iomem *pronto_pll_base;
void __iomem *pronto_mcu_base;
+ void __iomem *pronto_qfuse;
void __iomem *wlan_tx_status;
void __iomem *wlan_tx_phy_aborts;
void __iomem *wlan_brdg_err_source;
@@ -423,6 +428,9 @@ static struct {
int pc_disabled;
struct delayed_work wcnss_pm_qos_del_req;
struct mutex pm_qos_mutex;
+ struct clk *snoc_wcnss;
+ unsigned int snoc_wcnss_clock_freq;
+ bool is_dual_band_disabled;
} *penv = NULL;
static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
@@ -595,7 +603,31 @@ void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type)
}
}
-/* Log pronto debug registers before sending reset interrupt */
+int wcnss_get_dual_band_capability_info(struct platform_device *pdev)
+{
+ u32 reg = 0;
+ struct resource *res;
+
+ res = platform_get_resource_byname(
+ pdev, IORESOURCE_MEM, "pronto_qfuse");
+ if (!res)
+ return -EINVAL;
+
+ penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(penv->pronto_qfuse))
+ return -ENOMEM;
+
+ reg = readl_relaxed(penv->pronto_qfuse +
+ PRONTO_QFUSE_DUAL_BAND_OFFSET);
+ if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET)
+ penv->is_dual_band_disabled = true;
+ else
+ penv->is_dual_band_disabled = false;
+
+ return 0;
+}
+
+/* Log pronto debug registers during SSR Timeout CB */
void wcnss_pronto_log_debug_regs(void)
{
void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr;
@@ -1683,6 +1715,14 @@ int wcnss_wlan_iris_xo_mode(void)
}
EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode);
+int wcnss_wlan_dual_band_disabled(void)
+{
+ if (penv && penv->pdev)
+ return penv->is_dual_band_disabled;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled);
void wcnss_suspend_notify(void)
{
@@ -2717,23 +2757,23 @@ wcnss_trigger_config(struct platform_device *pdev)
int is_pronto_vadc;
int is_pronto_v3;
int pil_retry = 0;
- int has_pronto_hw = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-pronto-hw");
-
- is_pronto_vadc = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-vadc");
+ struct wcnss_wlan_config *wlan_cfg = &penv->wlan_config;
+ struct device_node *node = (&pdev->dev)->of_node;
+ int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw");
- is_pronto_v3 = of_property_read_bool(pdev->dev.of_node,
- "qcom,is-pronto-v3");
+ is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc");
+ is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3");
- penv->is_vsys_adc_channel = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-vsys-adc-channel");
+ penv->is_vsys_adc_channel =
+ of_property_read_bool(node, "qcom,has-vsys-adc-channel");
+ penv->is_a2xb_split_reg =
+ of_property_read_bool(node, "qcom,has-a2xb-split-reg");
- penv->is_a2xb_split_reg = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-a2xb-split-reg");
+ wlan_cfg->wcn_external_gpio_support =
+ of_property_read_bool(node, "qcom,wcn-external-gpio-support");
- if (of_property_read_u32(pdev->dev.of_node,
- "qcom,wlan-rx-buff-count", &penv->wlan_rx_buff_count)) {
+ if (of_property_read_u32(node, "qcom,wlan-rx-buff-count",
+ &penv->wlan_rx_buff_count)) {
penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT;
}
@@ -2794,15 +2834,18 @@ wcnss_trigger_config(struct platform_device *pdev)
goto fail;
}
- index++;
- ret = wcnss_dt_parse_vreg_level(&pdev->dev, index,
- "qcom,iris-vddpa-current",
- "qcom,iris-vddpa-voltage-level",
- penv->wlan_config.iris_vlevel);
-
- if (ret) {
- dev_err(&pdev->dev, "error reading voltage-level property\n");
- goto fail;
+ if (!wlan_cfg->wcn_external_gpio_support) {
+ index++;
+ ret = wcnss_dt_parse_vreg_level(
+ &pdev->dev, index,
+ "qcom,iris-vddpa-current",
+ "qcom,iris-vddpa-voltage-level",
+ penv->wlan_config.iris_vlevel);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "error reading voltage-level property\n");
+ goto fail;
+ }
}
index++;
@@ -2825,8 +2868,8 @@ wcnss_trigger_config(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (WCNSS_CONFIG_UNSPECIFIED == has_48mhz_xo) {
if (has_pronto_hw) {
- has_48mhz_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-48mhz-xo");
+ has_48mhz_xo =
+ of_property_read_bool(node, "qcom,has-48mhz-xo");
} else {
has_48mhz_xo = pdata->has_48mhz_xo;
}
@@ -2837,8 +2880,8 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->wlan_config.is_pronto_v3 = is_pronto_v3;
if (WCNSS_CONFIG_UNSPECIFIED == has_autodetect_xo && has_pronto_hw) {
- has_autodetect_xo = of_property_read_bool(pdev->dev.of_node,
- "qcom,has-autodetect-xo");
+ has_autodetect_xo =
+ of_property_read_bool(node, "qcom,has-autodetect-xo");
}
penv->thermal_mitigation = 0;
@@ -3118,6 +3161,16 @@ wcnss_trigger_config(struct platform_device *pdev)
__func__);
goto fail_ioremap2;
}
+
+ if (of_property_read_bool(node,
+ "qcom,is-dual-band-disabled")) {
+ ret = wcnss_get_dual_band_capability_info(pdev);
+ if (ret) {
+ pr_err(
+ "%s: failed to get dual band info\n", __func__);
+ goto fail_ioremap2;
+ }
+ }
}
penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss");
@@ -3129,6 +3182,21 @@ wcnss_trigger_config(struct platform_device *pdev)
penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED;
}
+ penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss");
+ if (IS_ERR(penv->snoc_wcnss)) {
+ pr_err("%s: couldn't get snoc_wcnss\n", __func__);
+ penv->snoc_wcnss = NULL;
+ } else {
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,snoc-wcnss-clock-freq",
+ &penv->snoc_wcnss_clock_freq)) {
+ pr_debug("%s: wcnss snoc clock frequency is not defined\n",
+ __func__);
+ devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss);
+ penv->snoc_wcnss = NULL;
+ }
+ }
+
if (penv->wlan_config.is_pronto_vadc) {
penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss");
@@ -3191,6 +3259,38 @@ fail:
return ret;
}
+/* Driver requires to directly vote the snoc clocks
+ * To enable and disable snoc clock, it call
+ * wcnss_snoc_vote function
+ */
+void wcnss_snoc_vote(bool clk_chk_en)
+{
+ int rc;
+
+ if (!penv->snoc_wcnss) {
+ pr_err("%s: couldn't get clk snoc_wcnss\n", __func__);
+ return;
+ }
+
+ if (clk_chk_en) {
+ rc = clk_set_rate(penv->snoc_wcnss,
+ penv->snoc_wcnss_clock_freq);
+ if (rc) {
+ pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n",
+ __func__, rc);
+ return;
+ }
+
+ if (clk_prepare_enable(penv->snoc_wcnss)) {
+ pr_err("%s: snoc_wcnss clk enable failed\n", __func__);
+ return;
+ }
+ } else {
+ clk_disable_unprepare(penv->snoc_wcnss);
+ }
+}
+EXPORT_SYMBOL(wcnss_snoc_vote);
+
/* wlan prop driver cannot invoke cancel_work_sync
* function directly, so to invoke this function it
* call wcnss_flush_work function
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 217c7ce3f57b..8e66cd5770b5 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -278,6 +278,7 @@
#define PERST_PROPAGATION_DELAY_US_MIN 1000
#define PERST_PROPAGATION_DELAY_US_MAX 1005
+#define SWITCH_DELAY_MAX 20
#define REFCLK_STABILIZATION_DELAY_US_MIN 1000
#define REFCLK_STABILIZATION_DELAY_US_MAX 1005
#define LINK_UP_TIMEOUT_US_MIN 5000
@@ -626,6 +627,7 @@ struct msm_pcie_dev_t {
bool ext_ref_clk;
bool common_phy;
uint32_t ep_latency;
+ uint32_t switch_latency;
uint32_t wr_halt_size;
uint32_t cpl_timeout;
uint32_t current_bdf;
@@ -1735,7 +1737,8 @@ static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
{
- int ret, scm_ret;
+ int ret;
+ u64 scm_ret;
if (!dev) {
pr_err("PCIe: the input pcie dev is NULL.\n");
@@ -1745,7 +1748,7 @@ static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev)
ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret);
if (ret || scm_ret) {
PCIE_ERR(dev,
- "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n",
+ "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%llu\n",
dev->rc_idx, ret, scm_ret);
return ret ? ret : -EINVAL;
}
@@ -1984,6 +1987,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->common_phy);
PCIE_DBG_FS(dev, "ep_latency: %dms\n",
dev->ep_latency);
+ PCIE_DBG_FS(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
dev->wr_halt_size);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -4675,7 +4680,15 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
goto link_fail;
}
- msleep(500);
+ if (dev->switch_latency) {
+ PCIE_DBG(dev, "switch_latency: %dms\n",
+ dev->switch_latency);
+ if (dev->switch_latency <= SWITCH_DELAY_MAX)
+ usleep_range(dev->switch_latency * 1000,
+ dev->switch_latency * 1000);
+ else
+ msleep(dev->switch_latency);
+ }
msm_pcie_config_controller(dev);
@@ -6279,6 +6292,20 @@ static int msm_pcie_probe(struct platform_device *pdev)
PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n",
rc_idx, msm_pcie_dev[rc_idx].ep_latency);
+ msm_pcie_dev[rc_idx].switch_latency = 0;
+ ret = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,switch-latency",
+ &msm_pcie_dev[rc_idx].switch_latency);
+
+ if (ret)
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency does not exist.\n",
+ rc_idx);
+ else
+ PCIE_DBG(&msm_pcie_dev[rc_idx],
+ "RC%d: switch-latency: 0x%x.\n",
+ rc_idx, msm_pcie_dev[rc_idx].switch_latency);
+
msm_pcie_dev[rc_idx].wr_halt_size = 0;
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,wr-halt-size",
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 63ec68e6ac2a..39400dda27c2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -552,6 +552,7 @@ static void armpmu_init(struct arm_pmu *armpmu)
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
+ .events_across_hotplug = 1,
};
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 51806cec1e4d..49aa7f25347d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -649,8 +649,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
index dd591407d10f..5228b2db1410 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1436,6 +1436,66 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
start_ipv6_filter_idx),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_FILTERS_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ rule_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 011ca300cc09..0531919487d7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1399,7 +1399,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 834712a71ac6..5dbd43b44540 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1878,7 +1878,9 @@ void q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(ipa_rm_q6_workqueue);
+
+ if (ipa_rm_q6_workqueue)
+ destroy_workqueue(ipa_rm_q6_workqueue);
}
static void wake_tx_queue(struct work_struct *work)
@@ -2187,7 +2189,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- q6_deinitialize_rm();
+
+ if (!atomic_read(&is_ssr))
+ q6_deinitialize_rm();
+
q6_init_err:
free_netdev(ipa_netdevs[0]);
ipa_netdevs[0] = NULL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 7c3b5838242e..ce35ba02154d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -426,8 +426,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
ipa_insert_failed:
- if (offset)
- list_move(&offset->link,
+ list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
list_del(&entry->link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 690a9db67ff0..571852c076ea 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -807,6 +807,11 @@ int ipa3_qmi_filter_notify_send(
return -EINVAL;
}
+ if (req->source_pipe_index == -1) {
+ IPAWANERR("Source pipe index invalid\n");
+ return -EINVAL;
+ }
+
mutex_lock(&ipa3_qmi_lock);
if (ipa3_qmi_ctx != NULL) {
/* cache the qmi_filter_request */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index d5d850309696..e6f1e2ce0b75 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -121,6 +121,31 @@ extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_per_client_stats_info_type_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+ extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
/**
* struct ipa3_rmnet_context - IPA rmnet context
* @ipa_rmnet_ssr: support modem SSR
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
index 6a5cb4891c02..746863732dc5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,8 @@
#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_qmi_service.h"
+
/* Type Definitions */
static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
{
@@ -1756,6 +1758,36 @@ struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
rule_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01,
+ .elem_size = sizeof(uint32_t),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct ipa_fltr_installed_notif_req_msg_v01,
+ dst_pipe_id),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2923,3 +2955,435 @@ struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = {
.tlv_type = QMI_COMMON_TLV_TYPE,
},
};
+
+struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint64_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_bytes),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_ul_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv4_pkts),
+
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_per_client_stats_info_type_v01,
+ num_dl_ipv6_pkts),
+
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_rule_type_v01,
+ ip_type),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct ipa_filter_rule_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(struct ipa_ul_firewall_rule_type_v01,
+ filter_rule),
+ .ei_array = ipa3_filter_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ is_success),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ .offset = offsetof(
+ struct ipa_ul_firewall_config_result_type_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ ipa_enable_per_client_stats_req_msg_v01,
+ enable_per_client_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_enable_per_client_stats_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ src_pipe_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_req_msg_v01,
+ reset_stats),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01,
+ .elem_size =
+ sizeof(struct ipa_per_client_stats_info_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_get_stats_per_client_resp_msg_v01,
+ per_client_stats_list),
+ .ei_array =
+ ipa3_per_client_stats_info_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01,
+ .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x1,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ firewall_rules_list),
+ .ei_array =
+ ipa3_ul_firewall_rule_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint32_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x2,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ mux_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ disable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01,
+ are_blacklist_filters),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(
+ struct ipa_ul_firewall_config_result_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01,
+ result),
+ .ei_array =
+ ipa3_ul_firewall_config_result_type_data_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index bc7cc7060545..8e790c89ed13 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1479,7 +1479,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
{
struct ipa3_rt_tbl *entry;
enum ipa_ip_type ip = IPA_IP_MAX;
- int result;
+ int result = 0;
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
@@ -1501,6 +1501,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v6;
else {
WARN_ON(1);
+ result = -EINVAL;
goto ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 039bc7da5153..8fbde6675070 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -702,6 +702,11 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
req->source_pipe_index =
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD);
+ if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) {
+ IPAWANERR("ep mapping failed\n");
+ retval = -EFAULT;
+ }
+
req->install_status = QMI_RESULT_SUCCESS_V01;
req->rule_id_valid = 1;
req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
@@ -1947,7 +1952,9 @@ void ipa3_q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+
+ if (rmnet_ipa3_ctx->rm_q6_wq)
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
@@ -2287,7 +2294,10 @@ timer_init_err:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
create_rsrc_err:
- ipa3_q6_deinitialize_rm();
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ ipa3_q6_deinitialize_rm();
+
q6_init_err:
free_netdev(dev);
rmnet_ipa3_ctx->wwan_priv = NULL;
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index a5936ea5a6aa..ce6d1257cfbb 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -509,7 +509,7 @@ static int __exit mhi_plat_remove(struct platform_device *pdev)
static int __init mhi_init(void)
{
- int r;
+ int r = -EAGAIN;
struct mhi_device_driver *mhi_dev_drv;
mhi_dev_drv = kmalloc(sizeof(*mhi_dev_drv), GFP_KERNEL);
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index b6edf707798b..a95579241524 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -141,7 +141,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
size_t mhi_mem_index = 0, ring_len;
void *dev_mem_start;
dma_addr_t dma_dev_mem_start;
- int i, r;
+ int i;
mhi_dev_ctxt->dev_space.dev_mem_len =
calculate_mhi_space(mhi_dev_ctxt);
@@ -244,7 +244,7 @@ err_ev_alloc:
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
- return r;
+ return -EFAULT;
}
static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index a991a2e68b34..18d0334ce1ec 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -109,7 +109,6 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
u64 pcie_dword_val = 0;
u32 pcie_word_val = 0;
u32 i = 0;
- int ret_val;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"~~~ Initializing MMIO ~~~\n");
@@ -131,7 +130,7 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
if (mhi_dev_ctxt->core.mhi_ver != MHI_VERSION) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Bad MMIO version, 0x%x\n", mhi_dev_ctxt->core.mhi_ver);
- return ret_val;
+ return -ENXIO;
}
/* Enable the channels */
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index 1d9282627d4e..b1434daf1f60 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -329,7 +329,7 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t phy_ptr)
{
- uintptr_t virtual_ptr;
+ uintptr_t virtual_ptr = 0;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
@@ -358,7 +358,7 @@ dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_RING_TYPE type,
u32 chan, uintptr_t va_ptr)
{
- dma_addr_t phy_ptr;
+ dma_addr_t phy_ptr = 0;
struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt;
switch (type) {
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 5b50666d30a2..9c35eeb177d9 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -1030,7 +1030,7 @@ error_dts:
static void process_rs232_state(struct uci_client *ctrl_client,
struct mhi_result *result)
{
- struct rs232_ctrl_msg *rs232_pkt;
+ struct rs232_ctrl_msg *rs232_pkt = result->buf_addr;
struct uci_client *client = NULL;
struct mhi_uci_ctxt_t *uci_ctxt = ctrl_client->uci_ctxt;
u32 msg_id;
@@ -1051,7 +1051,6 @@ static void process_rs232_state(struct uci_client *ctrl_client,
sizeof(struct rs232_ctrl_msg));
goto error_size;
}
- rs232_pkt = result->buf_addr;
MHI_GET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, chan);
for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++)
if (chan == uci_ctxt->client_handles[i].out_attr.chan_id ||
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 0dd6017245fa..cf99eb9c2ba0 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -297,6 +297,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(hw_current_max),
POWER_SUPPLY_ATTR(real_type),
POWER_SUPPLY_ATTR(pr_swap),
+ POWER_SUPPLY_ATTR(cc_step),
+ POWER_SUPPLY_ATTR(cc_step_sel),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 947108c1410e..d3932ca1f338 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -49,7 +49,7 @@
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
#define PROFILE_LOAD "fg_profile_load"
-#define DELTA_SOC "fg_delta_soc"
+#define TTF_PRIMING "fg_ttf_priming"
/* Delta BSOC irq votable reasons */
#define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq"
@@ -81,6 +81,8 @@
#define BATT_THERM_NUM_COEFFS 3
+#define MAX_CC_STEPS 20
+
/* Debug flag definitions */
enum fg_debug_flag {
FG_IRQ = BIT(0), /* Show interrupts */
@@ -228,6 +230,11 @@ enum esr_timer_config {
NUM_ESR_TIMERS,
};
+enum ttf_mode {
+ TTF_MODE_NORMAL = 0,
+ TTF_MODE_QNOVO,
+};
+
/* DT parameters for FG device */
struct fg_dt_props {
bool force_load_profile;
@@ -309,16 +316,31 @@ struct fg_irq_info {
};
struct fg_circ_buf {
- int arr[20];
+ int arr[10];
int size;
int head;
};
+struct fg_cc_step_data {
+ int arr[MAX_CC_STEPS];
+ int sel;
+};
+
struct fg_pt {
s32 x;
s32 y;
};
+struct ttf {
+ struct fg_circ_buf ibatt;
+ struct fg_circ_buf vbatt;
+ struct fg_cc_step_data cc_step;
+ struct mutex lock;
+ int mode;
+ int last_ttf;
+ s64 last_ms;
+};
+
static const struct fg_pt fg_ln_table[] = {
{ 1000, 0 },
{ 2000, 693 },
@@ -358,6 +380,7 @@ struct fg_chip {
struct power_supply *usb_psy;
struct power_supply *dc_psy;
struct power_supply *parallel_psy;
+ struct power_supply *pc_port_psy;
struct iio_channel *batt_id_chan;
struct iio_channel *die_temp_chan;
struct fg_memif *sram;
@@ -374,9 +397,9 @@ struct fg_chip {
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
struct fg_cap_learning cl;
+ struct ttf ttf;
struct mutex bus_lock;
struct mutex sram_rw_lock;
- struct mutex batt_avg_lock;
struct mutex charge_full_lock;
u32 batt_soc_base;
u32 batt_info_base;
@@ -389,6 +412,7 @@ struct fg_chip {
int prev_charge_status;
int charge_done;
int charge_type;
+ int online_status;
int last_soc;
int last_batt_temp;
int health;
@@ -413,11 +437,8 @@ struct fg_chip {
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
- struct work_struct cycle_count_work;
- struct delayed_work batt_avg_work;
+ struct delayed_work ttf_work;
struct delayed_work sram_dump_work;
- struct fg_circ_buf ibatt_circ_buf;
- struct fg_circ_buf vbatt_circ_buf;
};
/* Debugfs data structures are below */
@@ -475,5 +496,6 @@ extern bool is_qnovo_en(struct fg_chip *chip);
extern void fg_circ_buf_add(struct fg_circ_buf *, int);
extern void fg_circ_buf_clr(struct fg_circ_buf *);
extern int fg_circ_buf_avg(struct fg_circ_buf *, int *);
+extern int fg_circ_buf_median(struct fg_circ_buf *, int *);
extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *);
#endif
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 9635044e02a5..0cb1dea7113b 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/sort.h>
#include "fg-core.h"
void fg_circ_buf_add(struct fg_circ_buf *buf, int val)
@@ -39,6 +40,39 @@ int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg)
return 0;
}
+static int cmp_int(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+int fg_circ_buf_median(struct fg_circ_buf *buf, int *median)
+{
+ int *temp;
+
+ if (buf->size == 0)
+ return -ENODATA;
+
+ if (buf->size == 1) {
+ *median = buf->arr[0];
+ return 0;
+ }
+
+ temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ memcpy(temp, buf->arr, buf->size * sizeof(*temp));
+ sort(temp, buf->size, sizeof(*temp), cmp_int, NULL);
+
+ if (buf->size % 2)
+ *median = temp[buf->size / 2];
+ else
+ *median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2;
+
+ kfree(temp);
+ return 0;
+}
+
int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output)
{
int i;
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 265f98288745..cb2c3888ddd7 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -75,6 +75,8 @@
#define ESR_TIMER_CHG_MAX_OFFSET 0
#define ESR_TIMER_CHG_INIT_WORD 18
#define ESR_TIMER_CHG_INIT_OFFSET 2
+#define ESR_EXTRACTION_ENABLE_WORD 19
+#define ESR_EXTRACTION_ENABLE_OFFSET 0
#define PROFILE_LOAD_WORD 24
#define PROFILE_LOAD_OFFSET 0
#define ESR_RSLOW_DISCHG_WORD 34
@@ -1171,6 +1173,42 @@ static bool batt_psy_initialized(struct fg_chip *chip)
return true;
}
+static bool usb_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->usb_psy)
+ return true;
+
+ chip->usb_psy = power_supply_get_by_name("usb");
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool pc_port_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->pc_port_psy)
+ return true;
+
+ chip->pc_port_psy = power_supply_get_by_name("pc_port");
+ if (!chip->pc_port_psy)
+ return false;
+
+ return true;
+}
+
+static bool dc_psy_initialized(struct fg_chip *chip)
+{
+ if (chip->dc_psy)
+ return true;
+
+ chip->dc_psy = power_supply_get_by_name("dc");
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static bool is_parallel_charger_available(struct fg_chip *chip)
{
if (!chip->parallel_psy)
@@ -2131,102 +2169,67 @@ static int fg_esr_timer_config(struct fg_chip *chip, bool sleep)
return 0;
}
-static void fg_batt_avg_update(struct fg_chip *chip)
-{
- if (chip->charge_status == chip->prev_charge_status)
- return;
-
- cancel_delayed_work_sync(&chip->batt_avg_work);
- fg_circ_buf_clr(&chip->ibatt_circ_buf);
- fg_circ_buf_clr(&chip->vbatt_circ_buf);
-
- if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING ||
- chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
- schedule_delayed_work(&chip->batt_avg_work,
- msecs_to_jiffies(2000));
-}
-
-static void status_change_work(struct work_struct *work)
+static void fg_ttf_update(struct fg_chip *chip)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip, status_change_work);
+ int rc;
+ int delay_ms;
union power_supply_propval prop = {0, };
- int rc, batt_temp;
+ int online = 0;
- if (!batt_psy_initialized(chip)) {
- fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- goto out;
- }
+ if (usb_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read usb ONLINE prop rc=%d\n", rc);
+ return;
+ }
- rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
- &prop);
- if (rc < 0) {
- pr_err("Error in getting charging status, rc=%d\n", rc);
- goto out;
+ online = online || prop.intval;
}
- chip->prev_charge_status = chip->charge_status;
- chip->charge_status = prop.intval;
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
- if (rc < 0) {
- pr_err("Error in getting charge type, rc=%d\n", rc);
- goto out;
- }
+ if (pc_port_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->pc_port_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read pc_port ONLINE prop rc=%d\n", rc);
+ return;
+ }
- chip->charge_type = prop.intval;
- rc = power_supply_get_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
- if (rc < 0) {
- pr_err("Error in getting charge_done, rc=%d\n", rc);
- goto out;
+ online = online || prop.intval;
}
- chip->charge_done = prop.intval;
- if (chip->cyc_ctr.en)
- schedule_work(&chip->cycle_count_work);
-
- fg_cap_learning_update(chip);
-
- rc = fg_charge_full_update(chip);
- if (rc < 0)
- pr_err("Error in charge_full_update, rc=%d\n", rc);
-
- rc = fg_adjust_recharge_soc(chip);
- if (rc < 0)
- pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
-
- rc = fg_adjust_ki_coeff_dischg(chip);
- if (rc < 0)
- pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
-
- rc = fg_esr_fcc_config(chip);
- if (rc < 0)
- pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
-
- rc = fg_esr_timer_config(chip, false);
- if (rc < 0)
- pr_err("Error in configuring ESR timer, rc=%d\n", rc);
-
- rc = fg_get_battery_temp(chip, &batt_temp);
- if (!rc) {
- rc = fg_slope_limit_config(chip, batt_temp);
- if (rc < 0)
- pr_err("Error in configuring slope limiter rc:%d\n",
- rc);
+ if (dc_psy_initialized(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0) {
+ pr_err("Couldn't read dc ONLINE prop rc=%d\n", rc);
+ return;
+ }
- rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
- if (rc < 0)
- pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
- rc);
+ online = online || prop.intval;
}
- fg_batt_avg_update(chip);
-out:
- fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
- chip->charge_status, chip->charge_type, chip->charge_done);
- pm_relax(chip->dev);
+ if (chip->online_status == online)
+ return;
+
+ chip->online_status = online;
+ if (online)
+ /* wait 35 seconds for the input to settle */
+ delay_ms = 35000;
+ else
+ /* wait 5 seconds for current to settle during discharge */
+ delay_ms = 5000;
+
+ vote(chip->awake_votable, TTF_PRIMING, true, 0);
+ cancel_delayed_work_sync(&chip->ttf_work);
+ mutex_lock(&chip->ttf.lock);
+ fg_circ_buf_clr(&chip->ttf.ibatt);
+ fg_circ_buf_clr(&chip->ttf.vbatt);
+ chip->ttf.last_ttf = 0;
+ chip->ttf.last_ms = 0;
+ mutex_unlock(&chip->ttf.lock);
+ schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(delay_ms));
}
static void restore_cycle_counter(struct fg_chip *chip)
@@ -2234,6 +2237,9 @@ static void restore_cycle_counter(struct fg_chip *chip)
int rc = 0, i;
u8 data[2];
+ if (!chip->cyc_ctr.en)
+ return;
+
mutex_lock(&chip->cyc_ctr.lock);
for (i = 0; i < BUCKET_COUNT; i++) {
rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
@@ -2287,20 +2293,25 @@ static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2),
CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2,
FG_IMA_DEFAULT);
- if (rc < 0)
+ if (rc < 0) {
pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
bucket, rc);
- else
- chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+ }
+
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ fg_dbg(chip, FG_STATUS, "Stored count %d in bucket %d\n", cyc_count,
+ bucket);
+
return rc;
}
-static void cycle_count_work(struct work_struct *work)
+static void fg_cycle_counter_update(struct fg_chip *chip)
{
int rc = 0, bucket, i, batt_soc;
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- cycle_count_work);
+
+ if (!chip->cyc_ctr.en)
+ return;
mutex_lock(&chip->cyc_ctr.lock);
rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
@@ -2312,45 +2323,30 @@ static void cycle_count_work(struct work_struct *work)
/* We need only the most significant byte here */
batt_soc = (u32)batt_soc >> 24;
- if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
- /* Find out which bucket the SOC falls in */
- bucket = batt_soc / BUCKET_SOC_PCT;
- pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket);
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
- /*
- * If we've started counting for the previous bucket,
- * then store the counter for that bucket if the
- * counter for current bucket is getting started.
- */
- if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
- !chip->cyc_ctr.started[bucket]) {
- rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
- if (rc < 0) {
- pr_err("Error in storing cycle_ctr rc: %d\n",
- rc);
- goto out;
- } else {
- chip->cyc_ctr.started[bucket - 1] = false;
- chip->cyc_ctr.last_soc[bucket - 1] = 0;
- }
- }
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
if (!chip->cyc_ctr.started[bucket]) {
chip->cyc_ctr.started[bucket] = true;
chip->cyc_ctr.last_soc[bucket] = batt_soc;
}
- } else {
+ } else if (chip->charge_done || !is_input_present(chip)) {
for (i = 0; i < BUCKET_COUNT; i++) {
if (chip->cyc_ctr.started[i] &&
- batt_soc > chip->cyc_ctr.last_soc[i]) {
+ batt_soc > chip->cyc_ctr.last_soc[i] + 2) {
rc = fg_inc_store_cycle_ctr(chip, i);
if (rc < 0)
pr_err("Error in storing cycle_ctr rc: %d\n",
rc);
chip->cyc_ctr.last_soc[i] = 0;
+ chip->cyc_ctr.started[i] = false;
}
- chip->cyc_ctr.started[i] = false;
}
}
+
+ fg_dbg(chip, FG_STATUS, "batt_soc: %d bucket: %d chg_status: %d\n",
+ batt_soc, bucket, chip->charge_status);
out:
mutex_unlock(&chip->cyc_ctr.lock);
}
@@ -2371,6 +2367,83 @@ static int fg_get_cycle_count(struct fg_chip *chip)
return count;
}
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip, status_change_work);
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
+
+ if (!batt_psy_initialized(chip)) {
+ fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
+ goto out;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->prev_charge_status = chip->charge_status;
+ chip->charge_status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge type, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_type = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
+
+ chip->charge_done = prop.intval;
+ fg_cycle_counter_update(chip);
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_soc(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+ rc = fg_esr_fcc_config(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (!rc) {
+ rc = fg_slope_limit_config(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring slope limiter rc:%d\n",
+ rc);
+
+ rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp);
+ if (rc < 0)
+ pr_err("Error in configuring ki_coeff_full_soc rc:%d\n",
+ rc);
+ }
+
+ fg_ttf_update(chip);
+
+out:
+ fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
+ chip->charge_status, chip->charge_type, chip->charge_done);
+ pm_relax(chip->dev);
+}
+
static int fg_bp_params_config(struct fg_chip *chip)
{
int rc = 0;
@@ -2731,45 +2804,19 @@ static struct kernel_param_ops fg_restart_ops = {
module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
-#define BATT_AVG_POLL_PERIOD_MS 10000
-static void batt_avg_work(struct work_struct *work)
-{
- struct fg_chip *chip = container_of(work, struct fg_chip,
- batt_avg_work.work);
- int rc, ibatt_now, vbatt_now;
-
- mutex_lock(&chip->batt_avg_lock);
- rc = fg_get_battery_current(chip, &ibatt_now);
- if (rc < 0) {
- pr_err("failed to get battery current, rc=%d\n", rc);
- goto reschedule;
- }
-
- rc = fg_get_battery_voltage(chip, &vbatt_now);
- if (rc < 0) {
- pr_err("failed to get battery voltage, rc=%d\n", rc);
- goto reschedule;
- }
-
- fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now);
- fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now);
-
-reschedule:
- mutex_unlock(&chip->batt_avg_lock);
- schedule_delayed_work(&chip->batt_avg_work,
- msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
-}
-
#define HOURS_TO_SECONDS 3600
#define OCV_SLOPE_UV 10869
#define MILLI_UNIT 1000
#define MICRO_UNIT 1000000
-static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+#define NANO_UNIT 1000000000
+static int fg_get_time_to_full_locked(struct fg_chip *chip, int *val)
{
- int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
- act_cap_uah;
- s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale;
- s64 t_predicted_cc = 0, t_predicted_cv = 0;
+ int rc, ibatt_avg, vbatt_avg, rbatt, msoc, full_soc, act_cap_mah,
+ i_cc2cv, soc_cc2cv, tau, divisor, iterm, ttf_mode,
+ i, soc_per_step, msoc_this_step, msoc_next_step,
+ ibatt_this_step, t_predicted_this_step, ttf_slope,
+ t_predicted_cv, t_predicted = 0;
+ s64 delta_ms;
if (chip->bp.float_volt_uv <= 0) {
pr_err("battery profile is not loaded\n");
@@ -2788,48 +2835,53 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc);
+ /* the battery is considered full if the SOC is 100% */
if (msoc >= 100) {
*val = 0;
return 0;
}
- mutex_lock(&chip->batt_avg_lock);
- rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
- if (rc < 0) {
- /* try to get instantaneous current */
- rc = fg_get_battery_current(chip, &ibatt_avg);
- if (rc < 0) {
- mutex_unlock(&chip->batt_avg_lock);
- pr_err("failed to get battery current, rc=%d\n", rc);
- return rc;
- }
+ if (is_qnovo_en(chip))
+ ttf_mode = TTF_MODE_QNOVO;
+ else
+ ttf_mode = TTF_MODE_NORMAL;
+
+ /* when switching TTF algorithms the TTF needs to be reset */
+ if (chip->ttf.mode != ttf_mode) {
+ fg_circ_buf_clr(&chip->ttf.ibatt);
+ fg_circ_buf_clr(&chip->ttf.vbatt);
+ chip->ttf.last_ttf = 0;
+ chip->ttf.last_ms = 0;
+ chip->ttf.mode = ttf_mode;
}
- rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg);
+ /* at least 10 samples are required to produce a stable IBATT */
+ if (chip->ttf.ibatt.size < 10) {
+ *val = -1;
+ return 0;
+ }
+
+ rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
if (rc < 0) {
- /* try to get instantaneous voltage */
- rc = fg_get_battery_voltage(chip, &vbatt_avg);
- if (rc < 0) {
- mutex_unlock(&chip->batt_avg_lock);
- pr_err("failed to get battery voltage, rc=%d\n", rc);
- return rc;
- }
+ pr_err("failed to get IBATT AVG rc=%d\n", rc);
+ return rc;
}
- mutex_unlock(&chip->batt_avg_lock);
- fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
+ rc = fg_circ_buf_median(&chip->ttf.vbatt, &vbatt_avg);
+ if (rc < 0) {
+ pr_err("failed to get VBATT AVG rc=%d\n", rc);
+ return rc;
+ }
- /* clamp ibatt_avg to -150mA */
- if (ibatt_avg > -150000)
- ibatt_avg = -150000;
- fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+ ibatt_avg = -ibatt_avg / MILLI_UNIT;
+ vbatt_avg /= MILLI_UNIT;
- /* reverse polarity to be consistent with unsigned current settings */
- ibatt_avg = abs(ibatt_avg);
+ /* clamp ibatt_avg to iterm */
+ if (ibatt_avg < abs(chip->dt.sys_term_curr_ma))
+ ibatt_avg = abs(chip->dt.sys_term_curr_ma);
- /* estimated battery current at the CC to CV transition */
- i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv);
- fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
+ fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg);
+ fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg);
rc = fg_get_battery_resistance(chip, &rbatt);
if (rc < 0) {
@@ -2837,19 +2889,14 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
return rc;
}
- /* clamp rbatt to 50mOhms */
- if (rbatt < 50000)
- rbatt = 50000;
-
+ rbatt /= MILLI_UNIT;
fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt);
- rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
if (rc < 0) {
pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc);
return rc;
}
- act_cap_uah *= MILLI_UNIT;
- fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah);
rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
if (rc < 0) {
@@ -2858,69 +2905,148 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
}
full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
FULL_SOC_RAW);
- fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc);
+ act_cap_mah = full_soc * act_cap_mah / 100;
+ fg_dbg(chip, FG_TTF, "act_cap_mah=%d\n", act_cap_mah);
+
+ /* estimated battery current at the CC to CV transition */
+ switch (chip->ttf.mode) {
+ case TTF_MODE_NORMAL:
+ i_cc2cv = ibatt_avg * vbatt_avg /
+ max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT);
+ break;
+ case TTF_MODE_QNOVO:
+ i_cc2cv = min(
+ chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT,
+ ibatt_avg * vbatt_avg /
+ max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT));
+ break;
+ default:
+ pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+ break;
+ }
+ fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv);
/* if we are already in CV state then we can skip estimating CC */
if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
- goto skip_cc_estimate;
-
- /* if the charger is current limited then use power approximation */
- if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000)
- ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT);
- else
- ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT);
- ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv;
- fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv);
+ goto cv_estimate;
- soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV);
/* estimated SOC at the CC to CV transition */
+ soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV);
soc_cc2cv = 100 - soc_cc2cv;
fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv);
- /* the esimated SOC may be lower than the current SOC */
- if (soc_cc2cv - msoc <= 0)
- goto skip_cc_estimate;
+ switch (chip->ttf.mode) {
+ case TTF_MODE_NORMAL:
+ if (soc_cc2cv - msoc <= 0)
+ goto cv_estimate;
+
+ divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100);
+ t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) *
+ HOURS_TO_SECONDS, divisor);
+ break;
+ case TTF_MODE_QNOVO:
+ soc_per_step = 100 / MAX_CC_STEPS;
+ for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) {
+ msoc_next_step = (i + 1) * soc_per_step;
+ if (i == msoc / soc_per_step)
+ msoc_this_step = msoc;
+ else
+ msoc_this_step = i * soc_per_step;
+
+ /* scale ibatt by 85% to account for discharge pulses */
+ ibatt_this_step = min(
+ chip->ttf.cc_step.arr[i] / MILLI_UNIT,
+ ibatt_avg) * 85 / 100;
+ divisor = max(100, ibatt_this_step * 100);
+ t_predicted_this_step = div_s64((s64)act_cap_mah *
+ (msoc_next_step - msoc_this_step) *
+ HOURS_TO_SECONDS, divisor);
+ t_predicted += t_predicted_this_step;
+ fg_dbg(chip, FG_TTF, "[%d, %d] ma=%d t=%d\n",
+ msoc_this_step, msoc_next_step,
+ ibatt_this_step, t_predicted_this_step);
+ }
+ break;
+ default:
+ pr_err("TTF mode %d is not supported\n", chip->ttf.mode);
+ break;
+ }
- t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100);
- t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100);
- t_predicted_cc *= HOURS_TO_SECONDS;
- t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2);
+cv_estimate:
+ fg_dbg(chip, FG_TTF, "t_predicted_cc=%d\n", t_predicted);
-skip_cc_estimate:
- fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc);
+ iterm = max(100, abs(chip->dt.sys_term_curr_ma) + 200);
+ fg_dbg(chip, FG_TTF, "iterm=%d\n", iterm);
- /* CV estimate starts here */
- if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER)
- ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200);
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER)
+ tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm);
else
- ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200);
+ tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm);
- if (msoc < 95)
- centi_tau_scale = 100;
- else
- centi_tau_scale = 20 * (100 - msoc);
-
- fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
- rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
- fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
- t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
- t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100);
- t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
- t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
- fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
- *val = t_predicted_cc + t_predicted_cv;
+ rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), tau, &tau);
+ if (rc < 0) {
+ pr_err("failed to interpolate tau rc=%d\n", rc);
+ return rc;
+ }
+
+ /* tau is scaled linearly from 95% to 100% SOC */
+ if (msoc >= 95)
+ tau = tau * 2 * (100 - msoc) / 10;
+
+ fg_dbg(chip, FG_TTF, "tau=%d\n", tau);
+ t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau *
+ HOURS_TO_SECONDS, NANO_UNIT);
+ fg_dbg(chip, FG_TTF, "t_predicted_cv=%d\n", t_predicted_cv);
+ t_predicted += t_predicted_cv;
+
+ fg_dbg(chip, FG_TTF, "t_predicted_prefilter=%d\n", t_predicted);
+ if (chip->ttf.last_ms != 0) {
+ delta_ms = ktime_ms_delta(ktime_get_boottime(),
+ ms_to_ktime(chip->ttf.last_ms));
+ if (delta_ms > 10000) {
+ ttf_slope = div64_s64(
+ (s64)(t_predicted - chip->ttf.last_ttf) *
+ MICRO_UNIT, delta_ms);
+ if (ttf_slope > -100)
+ ttf_slope = -100;
+ else if (ttf_slope < -2000)
+ ttf_slope = -2000;
+
+ t_predicted = div_s64(
+ (s64)ttf_slope * delta_ms, MICRO_UNIT) +
+ chip->ttf.last_ttf;
+ fg_dbg(chip, FG_TTF, "ttf_slope=%d\n", ttf_slope);
+ } else {
+ t_predicted = chip->ttf.last_ttf;
+ }
+ }
+
+ /* clamp the ttf to 0 */
+ if (t_predicted < 0)
+ t_predicted = 0;
+
+ fg_dbg(chip, FG_TTF, "t_predicted_postfilter=%d\n", t_predicted);
+ *val = t_predicted;
return 0;
}
+static int fg_get_time_to_full(struct fg_chip *chip, int *val)
+{
+ int rc;
+
+ mutex_lock(&chip->ttf.lock);
+ rc = fg_get_time_to_full_locked(chip, val);
+ mutex_unlock(&chip->ttf.lock);
+ return rc;
+}
+
#define CENTI_ICORRECT_C0 105
#define CENTI_ICORRECT_C1 20
static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
{
- int rc, ibatt_avg, msoc, act_cap_uah;
- s32 divisor;
- s64 t_predicted;
+ int rc, ibatt_avg, msoc, full_soc, act_cap_mah, divisor;
- rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg);
+ rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg);
if (rc < 0) {
/* try to get instantaneous current */
rc = fg_get_battery_current(chip, &ibatt_avg);
@@ -2930,31 +3056,36 @@ static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
}
}
- /* clamp ibatt_avg to 150mA */
- if (ibatt_avg < 150000)
- ibatt_avg = 150000;
+ ibatt_avg /= MILLI_UNIT;
+ /* clamp ibatt_avg to 100mA */
+ if (ibatt_avg < 100)
+ ibatt_avg = 100;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
- rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah);
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
if (rc < 0) {
pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
return rc;
}
- act_cap_uah *= MILLI_UNIT;
- rc = fg_get_prop_capacity(chip, &msoc);
+ rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc);
if (rc < 0) {
- pr_err("Error in getting capacity, rc=%d\n", rc);
+ pr_err("failed to get full soc rc=%d\n", rc);
return rc;
}
+ full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY,
+ FULL_SOC_RAW);
+ act_cap_mah = full_soc * act_cap_mah / 100;
- t_predicted = div_s64((s64)msoc * act_cap_uah, 100);
- t_predicted *= HOURS_TO_SECONDS;
divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc;
- divisor = div_s64((s64)divisor * ibatt_avg, 10000);
- if (divisor > 0)
- t_predicted = div_s64(t_predicted, divisor);
-
- *val = t_predicted;
+ divisor = ibatt_avg * divisor / 100;
+ divisor = max(100, divisor);
+ *val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor;
return 0;
}
@@ -3033,6 +3164,150 @@ static int fg_esr_validate(struct fg_chip *chip)
return 0;
}
+static int fg_force_esr_meas(struct fg_chip *chip)
+{
+ int rc;
+ int esr_uohms;
+
+ /* force esr extraction enable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to enable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in configuring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ /* wait 1.5 seconds for hw to measure ESR */
+ msleep(1500);
+ rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip),
+ ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT,
+ 0);
+ if (rc < 0) {
+ pr_err("Error in restoring force ESR rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT, LD_REG_CTRL_BIT);
+ if (rc < 0) {
+ pr_err("Error in restoring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* force esr extraction disable */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to disable esr extn rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_get_battery_resistance(chip, &esr_uohms);
+ fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms);
+
+ return rc;
+}
+
+static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable)
+{
+ int rc;
+
+ /* force esr extraction disable when qnovo enables */
+ rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD,
+ ESR_EXTRACTION_ENABLE_OFFSET,
+ BIT(0), qnovo_enable ? 0 : BIT(0),
+ FG_IMA_DEFAULT);
+ if (rc < 0)
+ pr_err("Error in configuring esr extraction rc=%d\n", rc);
+
+ rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip),
+ LD_REG_CTRL_BIT,
+ qnovo_enable ? LD_REG_CTRL_BIT : 0);
+ if (rc < 0) {
+ pr_err("Error in configuring qnovo_cfg rc=%d\n", rc);
+ return rc;
+ }
+ fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n");
+ return 0;
+}
+
+static void ttf_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ ttf_work.work);
+ int rc, ibatt_now, vbatt_now, ttf;
+ ktime_t ktime_now;
+
+ mutex_lock(&chip->ttf.lock);
+ if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING &&
+ chip->charge_status != POWER_SUPPLY_STATUS_DISCHARGING)
+ goto end_work;
+
+ rc = fg_get_battery_current(chip, &ibatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery current, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ rc = fg_get_battery_voltage(chip, &vbatt_now);
+ if (rc < 0) {
+ pr_err("failed to get battery voltage, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ fg_circ_buf_add(&chip->ttf.ibatt, ibatt_now);
+ fg_circ_buf_add(&chip->ttf.vbatt, vbatt_now);
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_time_to_full_locked(chip, &ttf);
+ if (rc < 0) {
+ pr_err("failed to get ttf, rc=%d\n", rc);
+ goto end_work;
+ }
+
+ /* keep the wake lock and prime the IBATT and VBATT buffers */
+ if (ttf < 0) {
+ /* delay for one FG cycle */
+ schedule_delayed_work(&chip->ttf_work,
+ msecs_to_jiffies(1500));
+ mutex_unlock(&chip->ttf.lock);
+ return;
+ }
+
+ /* update the TTF reference point every minute */
+ ktime_now = ktime_get_boottime();
+ if (ktime_ms_delta(ktime_now,
+ ms_to_ktime(chip->ttf.last_ms)) > 60000 ||
+ chip->ttf.last_ms == 0) {
+ chip->ttf.last_ttf = ttf;
+ chip->ttf.last_ms = ktime_to_ms(ktime_now);
+ }
+ }
+
+ /* recurse every 10 seconds */
+ schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(10000));
+end_work:
+ vote(chip->awake_votable, TTF_PRIMING, false, 0);
+ mutex_unlock(&chip->ttf.lock);
+}
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -3109,6 +3384,20 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->ttf.cc_step.sel >= 0) &&
+ (chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+ pval->intval =
+ chip->ttf.cc_step.arr[chip->ttf.cc_step.sel];
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->ttf.cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ pval->intval = chip->ttf.cc_step.sel;
+ break;
default:
pr_err("unsupported property %d\n", psp);
rc = -EINVAL;
@@ -3141,6 +3430,32 @@ static int fg_psy_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ rc = fg_force_esr_meas(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+ rc = fg_prepare_for_qnovo(chip, pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP:
+ if ((chip->ttf.cc_step.sel >= 0) &&
+ (chip->ttf.cc_step.sel < MAX_CC_STEPS)) {
+ chip->ttf.cc_step.arr[chip->ttf.cc_step.sel] =
+ pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ chip->ttf.cc_step.sel);
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
+ if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) {
+ chip->ttf.cc_step.sel = pval->intval;
+ } else {
+ pr_err("cc_step_sel is out of bounds [0, %d]\n",
+ pval->intval);
+ return -EINVAL;
+ }
+ break;
default:
break;
}
@@ -3154,6 +3469,8 @@ static int fg_property_is_writeable(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CC_STEP:
+ case POWER_SUPPLY_PROP_CC_STEP_SEL:
return 1;
default:
break;
@@ -3214,6 +3531,8 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
POWER_SUPPLY_PROP_DEBUG_BATTERY,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -3424,8 +3743,7 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
- if (chip->cyc_ctr.en)
- restore_cycle_counter(chip);
+ restore_cycle_counter(chip);
if (chip->dt.jeita_hyst_temp >= 0) {
val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
@@ -3699,8 +4017,7 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
int rc;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
- if (chip->cyc_ctr.en)
- schedule_work(&chip->cycle_count_work);
+ fg_cycle_counter_update(chip);
if (chip->cl.active)
fg_cap_learning_update(chip);
@@ -4395,6 +4712,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip->charge_status = -EINVAL;
chip->prev_charge_status = -EINVAL;
chip->ki_coeff_full_soc = -EINVAL;
+ chip->online_status = -EINVAL;
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
dev_err(chip->dev, "Parent regmap is unavailable\n");
@@ -4463,14 +4781,13 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->cl.lock);
- mutex_init(&chip->batt_avg_lock);
+ mutex_init(&chip->ttf.lock);
mutex_init(&chip->charge_full_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
INIT_WORK(&chip->status_change_work, status_change_work);
- INIT_WORK(&chip->cycle_count_work, cycle_count_work);
- INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work);
+ INIT_DELAYED_WORK(&chip->ttf_work, ttf_work);
INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work);
rc = fg_memif_init(chip);
@@ -4567,7 +4884,7 @@ static int fg_gen3_suspend(struct device *dev)
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
- cancel_delayed_work_sync(&chip->batt_avg_work);
+ cancel_delayed_work_sync(&chip->ttf_work);
if (fg_sram_dump)
cancel_delayed_work_sync(&chip->sram_dump_work);
return 0;
@@ -4582,9 +4899,7 @@ static int fg_gen3_resume(struct device *dev)
if (rc < 0)
pr_err("Error in configuring ESR timer, rc=%d\n", rc);
- fg_circ_buf_clr(&chip->ibatt_circ_buf);
- fg_circ_buf_clr(&chip->vbatt_circ_buf);
- schedule_delayed_work(&chip->batt_avg_work, 0);
+ schedule_delayed_work(&chip->ttf_work, 0);
if (fg_sram_dump)
schedule_delayed_work(&chip->sram_dump_work,
msecs_to_jiffies(fg_sram_dump_period_ms));
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index eb97eb0ff2ac..cf90f9041935 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -20,6 +20,7 @@
#include <linux/of_irq.h>
#include <linux/qpnp/qpnp-revid.h>
#include <linux/pmic-voter.h>
+#include <linux/delay.h>
#define QNOVO_REVISION1 0x00
#define QNOVO_REVISION2 0x01
@@ -114,6 +115,17 @@
#define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter"
#define QNOVO_VOTER "qnovo_voter"
+#define FG_AVAILABLE_VOTER "FG_AVAILABLE_VOTER"
+#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER"
+#define QNI_PT_VOTER "QNI_PT_VOTER"
+#define ESR_VOTER "ESR_VOTER"
+
+#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER"
+#define CHG_READY_VOTER "CHG_READY_VOTER"
+#define USB_READY_VOTER "USB_READY_VOTER"
+#define DC_READY_VOTER "DC_READY_VOTER"
+
+#define PT_RESTART_VOTER "PT_RESTART_VOTER"
struct qnovo_dt_props {
bool external_rsense;
@@ -127,6 +139,10 @@ struct qnovo {
struct qnovo_dt_props dt;
struct device *dev;
struct votable *disable_votable;
+ struct votable *pt_dis_votable;
+ struct votable *not_ok_to_qnovo_votable;
+ struct votable *chg_ready_votable;
+ struct votable *awake_votable;
struct class qnovo_class;
struct pmic_revid_data *pmic_rev_id;
u32 wa_flags;
@@ -138,10 +154,18 @@ struct qnovo {
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- bool ok_to_qnovo;
+ int usb_present;
+ int dc_present;
+ struct delayed_work usb_debounce_work;
+ struct delayed_work dc_debounce_work;
+
+ struct delayed_work ptrain_restart_work;
};
static int debug_mask;
@@ -229,6 +253,39 @@ static bool is_batt_available(struct qnovo *chip)
return true;
}
+static bool is_fg_available(struct qnovo *chip)
+{
+ if (!chip->bms_psy)
+ chip->bms_psy = power_supply_get_by_name("bms");
+
+ if (!chip->bms_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_usb_available(struct qnovo *chip)
+{
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (!chip->usb_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_dc_available(struct qnovo *chip)
+{
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (!chip->dc_psy)
+ return false;
+
+ return true;
+}
+
static int qnovo_batt_psy_update(struct qnovo *chip, bool disable)
{
union power_supply_propval pval = {0};
@@ -281,10 +338,86 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
return -EINVAL;
}
+ /*
+ * fg must be available for enable FG_AVAILABLE_VOTER
+ * won't enable it otherwise
+ */
+
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
+
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0);
rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
+static int pt_dis_votable_cb(struct votable *votable, void *data, int disable,
+ const char *client)
+{
+ struct qnovo *chip = data;
+ int rc;
+
+ if (disable) {
+ cancel_delayed_work_sync(&chip->ptrain_restart_work);
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
+ }
+
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ if (!disable) {
+ vote(chip->awake_votable, PT_RESTART_VOTER, true, 0);
+ schedule_delayed_work(&chip->ptrain_restart_work,
+ msecs_to_jiffies(20));
+ }
+
+ return 0;
+}
+
+static int not_ok_to_qnovo_cb(struct votable *votable, void *data,
+ int not_ok_to_qnovo,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0);
+ if (not_ok_to_qnovo)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
+
+ kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
+static int chg_ready_cb(struct votable *votable, void *data, int ready,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0);
+
+ return 0;
+}
+
+static int awake_cb(struct votable *votable, void *data, int awake,
+ const char *client)
+{
+ struct qnovo *chip = data;
+
+ if (awake)
+ pm_stay_awake(chip->dev);
+ else
+ pm_relax(chip->dev);
+
+ return 0;
+}
+
static int qnovo_parse_dt(struct qnovo *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -626,8 +759,9 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
char *buf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ int val = get_effective_result(chip->not_ok_to_qnovo_votable);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
@@ -656,21 +790,10 @@ static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
- int i = attr - qnovo_attributes;
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- u8 buf[2] = {0, 0};
- u16 regval;
- int rc;
-
- rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
- if (rc < 0) {
- pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
- return -EINVAL;
- }
- regval = buf[1] << 8 | buf[0];
+ int val = get_effective_result(chip->pt_dis_votable);
- return snprintf(ubuf, PAGE_SIZE, "%d\n",
- (int)(regval & QNOVO_PTRAIN_EN_BIT));
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
}
static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
@@ -678,21 +801,12 @@ static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- int rc = 0;
-
- if (get_effective_result(chip->disable_votable))
- return -EINVAL;
if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
- if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- (bool)val ? "enable" : "disable", rc);
- return rc;
- }
+ /* val being 0, userspace wishes to disable pt so vote true */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0);
return count;
}
@@ -1116,41 +1230,146 @@ static int qnovo_update_status(struct qnovo *chip)
{
u8 val = 0;
int rc;
- bool ok_to_qnovo;
- bool changed = false;
+ bool hw_ok_to_qnovo;
rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- ok_to_qnovo = false;
+ hw_ok_to_qnovo = false;
} else {
/*
* For CV mode keep qnovo enabled, userspace is expected to
* disable it after few runs
*/
- ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false;
+ hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ?
+ true : false;
}
- if (chip->ok_to_qnovo ^ ok_to_qnovo) {
+ vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER,
+ !hw_ok_to_qnovo, 0);
+ return 0;
+}
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0);
- if (!ok_to_qnovo)
- vote(chip->disable_votable, USER_VOTER, true, 0);
+static void usb_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, usb_debounce_work.work);
- chip->ok_to_qnovo = ok_to_qnovo;
- changed = true;
- }
+ vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+}
- return changed;
+static void dc_debounce_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, dc_debounce_work.work);
+
+ vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
}
+#define DEBOUNCE_MS 15000 /* 15 seconds */
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
+ union power_supply_propval pval;
+ bool usb_present = false, dc_present = false;
+ int rc;
+
+ if (is_fg_available(chip))
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0);
+
+ if (is_usb_available(chip)) {
+ rc = power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+ }
+
+ if (chip->usb_present && !usb_present) {
+ /* removal */
+ chip->usb_present = 0;
+ cancel_delayed_work_sync(&chip->usb_debounce_work);
+ vote(chip->awake_votable, USB_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0);
+ } else if (!chip->usb_present && usb_present) {
+ /* insertion */
+ chip->usb_present = 1;
+ vote(chip->awake_votable, USB_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->usb_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ if (is_dc_available(chip)) {
+ rc = power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT,
+ &pval);
+ dc_present = (rc < 0) ? 0 : pval.intval;
+ }
- if (qnovo_update_status(chip))
- kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
+ if (usb_present)
+ dc_present = 0;
+
+ if (chip->dc_present && !dc_present) {
+ /* removal */
+ chip->dc_present = 0;
+ cancel_delayed_work_sync(&chip->dc_debounce_work);
+ vote(chip->awake_votable, DC_READY_VOTER, false, 0);
+ vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0);
+ } else if (!chip->dc_present && dc_present) {
+ /* insertion */
+ chip->dc_present = 1;
+ vote(chip->awake_votable, DC_READY_VOTER, true, 0);
+ schedule_delayed_work(&chip->dc_debounce_work,
+ msecs_to_jiffies(DEBOUNCE_MS));
+ }
+
+ qnovo_update_status(chip);
+}
+
+static void ptrain_restart_work(struct work_struct *work)
+{
+ struct qnovo *chip = container_of(work,
+ struct qnovo, ptrain_restart_work.work);
+ u8 pt_t1, pt_t2;
+ int rc;
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ /* pttime increments every 2 seconds */
+ msleep(2100);
+
+ rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n",
+ rc);
+ goto clean_up;
+ }
+
+ if (pt_t1 != pt_t2)
+ goto clean_up;
+
+ /* Toggle pt enable to restart pulse train */
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+ msleep(1000);
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ QNOVO_PTRAIN_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc);
+ goto clean_up;
+ }
+
+clean_up:
+ vote(chip->awake_votable, PT_RESTART_VOTER, false, 0);
}
static int qnovo_notifier_call(struct notifier_block *nb,
@@ -1162,7 +1381,10 @@ static int qnovo_notifier_call(struct notifier_block *nb,
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if (strcmp(psy->desc->name, "battery") == 0)
+ if (strcmp(psy->desc->name, "battery") == 0
+ || strcmp(psy->desc->name, "bms") == 0
+ || strcmp(psy->desc->name, "usb") == 0
+ || strcmp(psy->desc->name, "dc") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1171,7 +1393,23 @@ static int qnovo_notifier_call(struct notifier_block *nb,
static irqreturn_t handle_ptrain_done(int irq, void *data)
{
struct qnovo *chip = data;
+ union power_supply_propval pval = {0};
+ /*
+ * hw resets pt_en bit once ptrain_done triggers.
+ * vote on behalf of QNI to disable it such that
+ * once QNI enables it, the votable state changes
+ * and the callback that sets it is indeed invoked
+ */
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, true, 0);
+ if (is_fg_available(chip))
+ power_supply_set_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ &pval);
+
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
@@ -1186,6 +1424,11 @@ static int qnovo_hw_init(struct qnovo *chip)
u8 val;
vote(chip->disable_votable, USER_VOTER, true, 0);
+ vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0);
+
+ vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0);
+ vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0);
+ vote(chip->pt_dis_votable, ESR_VOTER, false, 0);
val = 0;
rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
@@ -1349,12 +1592,45 @@ static int qnovo_probe(struct platform_device *pdev)
goto cleanup;
}
+ chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY,
+ pt_dis_votable_cb, chip);
+ if (IS_ERR(chip->pt_dis_votable)) {
+ rc = PTR_ERR(chip->pt_dis_votable);
+ goto destroy_disable_votable;
+ }
+
+ chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK",
+ VOTE_SET_ANY,
+ not_ok_to_qnovo_cb, chip);
+ if (IS_ERR(chip->not_ok_to_qnovo_votable)) {
+ rc = PTR_ERR(chip->not_ok_to_qnovo_votable);
+ goto destroy_pt_dis_votable;
+ }
+
+ chip->chg_ready_votable = create_votable("QNOVO_CHG_READY",
+ VOTE_SET_ANY,
+ chg_ready_cb, chip);
+ if (IS_ERR(chip->chg_ready_votable)) {
+ rc = PTR_ERR(chip->chg_ready_votable);
+ goto destroy_not_ok_to_qnovo_votable;
+ }
+
+ chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY,
+ awake_cb, chip);
+ if (IS_ERR(chip->awake_votable)) {
+ rc = PTR_ERR(chip->awake_votable);
+ goto destroy_chg_ready_votable;
+ }
+
INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work);
+ INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work);
+ INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work);
rc = qnovo_hw_init(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
- goto destroy_votable;
+ goto destroy_awake_votable;
}
rc = qnovo_register_notifier(chip);
@@ -1390,7 +1666,15 @@ static int qnovo_probe(struct platform_device *pdev)
unreg_notifier:
power_supply_unreg_notifier(&chip->nb);
-destroy_votable:
+destroy_awake_votable:
+ destroy_votable(chip->awake_votable);
+destroy_chg_ready_votable:
+ destroy_votable(chip->chg_ready_votable);
+destroy_not_ok_to_qnovo_votable:
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+destroy_pt_dis_votable:
+ destroy_votable(chip->pt_dis_votable);
+destroy_disable_votable:
destroy_votable(chip->disable_votable);
cleanup:
platform_set_drvdata(pdev, NULL);
@@ -1403,6 +1687,9 @@ static int qnovo_remove(struct platform_device *pdev)
class_unregister(&chip->qnovo_class);
power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->chg_ready_votable);
+ destroy_votable(chip->not_ok_to_qnovo_votable);
+ destroy_votable(chip->pt_dis_votable);
destroy_votable(chip->disable_votable);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 630d02eb7a67..1139f33866c4 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -210,6 +210,9 @@ static int smb2_parse_dt(struct smb2 *chip)
chg->step_chg_enabled = of_property_read_bool(node,
"qcom,step-charging-enable");
+ chg->sw_jeita_enabled = of_property_read_bool(node,
+ "qcom,sw-jeita-enable");
+
rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
&chip->dt.wd_bark_time);
if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 0ed07aa13855..f4d286c6a324 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1571,8 +1571,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
union power_supply_propval pval = {0, };
- bool usb_online, dc_online;
- u8 stat;
+ bool usb_online, dc_online, qnovo_en;
+ u8 stat, pt_en_cmd;
int rc;
rc = smblib_get_prop_usb_online(chg, &pval);
@@ -1640,11 +1640,22 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
rc);
return rc;
- }
+ }
stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT |
ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT;
- if (!stat)
+
+ rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT);
+
+ /* ignore stat7 when qnovo is enabled */
+ if (!qnovo_en && !stat)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
return 0;
@@ -4059,7 +4070,7 @@ irqreturn_t smblib_handle_wdog_bark(int irq, void *data)
if (rc < 0)
smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc);
- if (chg->step_chg_enabled)
+ if (chg->step_chg_enabled || chg->sw_jeita_enabled)
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
@@ -4697,7 +4708,8 @@ int smblib_init(struct smb_charger *chg)
return rc;
}
- rc = qcom_step_chg_init(chg->step_chg_enabled);
+ rc = qcom_step_chg_init(chg->step_chg_enabled,
+ chg->sw_jeita_enabled);
if (rc < 0) {
smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n",
rc);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 8c810352f78f..c91f9eaae86b 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -306,6 +306,7 @@ struct smb_charger {
int dcp_icl_ua;
int fake_capacity;
bool step_chg_enabled;
+ bool sw_jeita_enabled;
bool is_hdc;
bool chg_done;
bool micro_usb_mode;
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index a2c08be960be..cba01608afb3 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -20,7 +20,7 @@
#define MAX_STEP_CHG_ENTRIES 8
#define STEP_CHG_VOTER "STEP_CHG_VOTER"
-#define STATUS_CHANGE_VOTER "STATUS_CHANGE_VOTER"
+#define JEITA_VOTER "JEITA_VOTER"
#define is_between(left, right, value) \
(((left) >= (right) && (left) >= (value) \
@@ -28,23 +28,44 @@
|| ((left) <= (right) && (left) <= (value) \
&& (value) <= (right)))
-struct step_chg_data {
- u32 vbatt_soc_low;
- u32 vbatt_soc_high;
- u32 fcc_ua;
+struct range_data {
+ u32 low_threshold;
+ u32 high_threshold;
+ u32 value;
};
struct step_chg_cfg {
- u32 psy_prop;
- char *prop_name;
- struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES];
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fcc_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES];
+};
+
+struct jeita_fv_cfg {
+ u32 psy_prop;
+ char *prop_name;
+ int hysteresis;
+ struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES];
};
struct step_chg_info {
- ktime_t last_update_time;
+ ktime_t step_last_update_time;
+ ktime_t jeita_last_update_time;
bool step_chg_enable;
+ bool sw_jeita_enable;
+ int jeita_fcc_index;
+ int jeita_fv_index;
+ int step_index;
struct votable *fcc_votable;
+ struct votable *fv_votable;
struct wakeup_source *step_chg_ws;
struct power_supply *batt_psy;
struct delayed_work status_change_work;
@@ -53,32 +74,70 @@ struct step_chg_info {
static struct step_chg_info *the_chip;
+#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
+
/*
* Step Charging Configuration
* Update the table based on the battery profile
* Supports VBATT and SOC based source
+ * range data must be in increasing ranges and shouldn't overlap
*/
static struct step_chg_cfg step_chg_config = {
- .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
- .prop_name = "VBATT",
- .cfg = {
+ .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ .prop_name = "VBATT",
+ .hysteresis = 100000, /* 100mV */
+ .fcc_cfg = {
/* VBAT_LOW VBAT_HIGH FCC */
{3600000, 4000000, 3000000},
- {4000000, 4200000, 2800000},
- {4200000, 4400000, 2000000},
+ {4001000, 4200000, 2800000},
+ {4201000, 4400000, 2000000},
},
+ /*
+ * SOC STEP-CHG configuration example.
+ *
+ * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
+ * .prop_name = "SOC",
+ * .fcc_cfg = {
+ * //SOC_LOW SOC_HIGH FCC
+ * {20, 70, 3000000},
+ * {70, 90, 2750000},
+ * {90, 100, 2500000},
+ * },
+ */
+};
+
/*
- * SOC STEP-CHG configuration example.
- *
- * .psy_prop = POWER_SUPPLY_PROP_CAPACITY,
- * .prop_name = "SOC",
- * .cfg = {
- * //SOC_LOW SOC_HIGH FCC
- * {20, 70, 3000000},
- * {70, 90, 2750000},
- * {90, 100, 2500000},
- * },
+ * Jeita Charging Configuration
+ * Update the table based on the battery profile
+ * Please ensure that the TEMP ranges are programmed in the hw so that
+ * an interrupt is issued and a consequent psy changed will cause us to
+ * react immediately.
+ * range data must be in increasing ranges and shouldn't overlap.
+ * Gaps are okay
*/
+static struct jeita_fcc_cfg jeita_fcc_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fcc_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 600000},
+ {101, 200, 2000000},
+ {201, 450, 3000000},
+ {451, 550, 600000},
+ },
+};
+
+static struct jeita_fv_cfg jeita_fv_config = {
+ .psy_prop = POWER_SUPPLY_PROP_TEMP,
+ .prop_name = "BATT_TEMP",
+ .hysteresis = 10, /* 1degC hysteresis */
+ .fv_cfg = {
+ /* TEMP_LOW TEMP_HIGH FCC */
+ {0, 100, 4200000},
+ {101, 450, 4400000},
+ {451, 550, 4200000},
+ },
};
static bool is_batt_available(struct step_chg_info *chip)
@@ -92,22 +151,67 @@ static bool is_batt_available(struct step_chg_info *chip)
return true;
}
-static int get_fcc(int threshold)
+static int get_val(struct range_data *range, int hysteresis, int current_index,
+ int threshold,
+ int *new_index, int *val)
{
int i;
+ *new_index = -EINVAL;
+ /* first find the matching index without hysteresis */
for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++)
- if (is_between(step_chg_config.cfg[i].vbatt_soc_low,
- step_chg_config.cfg[i].vbatt_soc_high, threshold))
- return step_chg_config.cfg[i].fcc_ua;
+ if (is_between(range[i].low_threshold,
+ range[i].high_threshold, threshold)) {
+ *new_index = i;
+ *val = range[i].value;
+ }
+
+ /* if nothing was found, return -ENODATA */
+ if (*new_index == -EINVAL)
+ return -ENODATA;
+ /*
+ * If we don't have a current_index return this
+ * newfound value. There is no hysterisis from out of range
+ * to in range transition
+ */
+ if (current_index == -EINVAL)
+ return 0;
- return -ENODATA;
+ /*
+ * Check for hysteresis if it in the neighbourhood
+ * of our current index.
+ */
+ if (*new_index == current_index + 1) {
+ if (threshold < range[*new_index].low_threshold + hysteresis) {
+ /*
+ * Stay in the current index, threshold is not higher
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ } else if (*new_index == current_index - 1) {
+ if (threshold > range[*new_index].high_threshold - hysteresis) {
+ /*
+ * stay in the current index, threshold is not lower
+ * by hysteresis amount
+ */
+ *new_index = current_index;
+ *val = range[current_index].value;
+ }
+ }
+ return 0;
}
static int handle_step_chg_config(struct step_chg_info *chip)
{
union power_supply_propval pval = {0, };
int rc = 0, fcc_ua = 0;
+ u64 elapsed_us;
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval);
@@ -119,7 +223,7 @@ static int handle_step_chg_config(struct step_chg_info *chip)
if (!chip->step_chg_enable) {
if (chip->fcc_votable)
vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
- return 0;
+ goto update_time;
}
rc = power_supply_get_property(chip->batt_psy,
@@ -130,48 +234,144 @@ static int handle_step_chg_config(struct step_chg_info *chip)
return rc;
}
- chip->fcc_votable = find_votable("FCC");
- if (!chip->fcc_votable)
- return -EINVAL;
-
- fcc_ua = get_fcc(pval.intval);
- if (fcc_ua < 0) {
+ rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis,
+ chip->step_index,
+ pval.intval,
+ &chip->step_index,
+ &fcc_ua);
+ if (rc < 0) {
/* remove the vote if no step-based fcc is found */
- vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
- return 0;
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0);
+ goto update_time;
}
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ return -EINVAL;
+
vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua);
pr_debug("%s = %d Step-FCC = %duA\n",
step_chg_config.prop_name, pval.intval, fcc_ua);
+update_time:
+ chip->step_last_update_time = ktime_get();
return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
+}
+
+static int handle_jeita(struct step_chg_info *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0, fcc_ua = 0, fv_uv = 0;
+ u64 elapsed_us;
+
+ if (!chip->sw_jeita_enable) {
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ return 0;
+ }
+
+ elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time);
+ if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
+ goto reschedule;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ jeita_fcc_config.psy_prop, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't read %s property rc=%d\n",
+ step_chg_config.prop_name, rc);
+ return rc;
+ }
+
+ rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis,
+ chip->jeita_fcc_index,
+ pval.intval,
+ &chip->jeita_fcc_index,
+ &fcc_ua);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fcc_votable)
+ vote(chip->fcc_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ if (!chip->fcc_votable)
+ chip->fcc_votable = find_votable("FCC");
+ if (!chip->fcc_votable)
+ /* changing FCC is a must */
+ return -EINVAL;
+
+ vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua);
+
+ rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis,
+ chip->jeita_fv_index,
+ pval.intval,
+ &chip->jeita_fv_index,
+ &fv_uv);
+ if (rc < 0) {
+ /* remove the vote if no step-based fcc is found */
+ if (chip->fv_votable)
+ vote(chip->fv_votable, JEITA_VOTER, false, 0);
+ goto update_time;
+ }
+
+ chip->fv_votable = find_votable("FV");
+ if (!chip->fv_votable)
+ goto update_time;
+
+ vote(chip->fv_votable, JEITA_VOTER, true, fv_uv);
+
+ pr_debug("%s = %d FCC = %duA FV = %duV\n",
+ step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv);
+
+update_time:
+ chip->jeita_last_update_time = ktime_get();
+ return 0;
+
+reschedule:
+ /* reschedule 1000uS after the remaining time */
+ return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000);
}
-#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */
static void status_change_work(struct work_struct *work)
{
struct step_chg_info *chip = container_of(work,
struct step_chg_info, status_change_work.work);
int rc = 0;
- u64 elapsed_us;
-
- elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time);
- if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US)
- goto release_ws;
+ int reschedule_us;
+ int reschedule_jeita_work_us = 0;
+ int reschedule_step_work_us = 0;
if (!is_batt_available(chip))
- goto release_ws;
+ return;
+
+ /* skip elapsed_us debounce for handling battery temperature */
+ rc = handle_jeita(chip);
+ if (rc > 0)
+ reschedule_jeita_work_us = rc;
+ else if (rc < 0)
+ pr_err("Couldn't handle sw jeita rc = %d\n", rc);
rc = handle_step_chg_config(chip);
+ if (rc > 0)
+ reschedule_step_work_us = rc;
if (rc < 0)
- goto release_ws;
+ pr_err("Couldn't handle step rc = %d\n", rc);
- chip->last_update_time = ktime_get();
-
-release_ws:
- __pm_relax(chip->step_chg_ws);
+ reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us);
+ if (reschedule_us == 0)
+ __pm_relax(chip->step_chg_ws);
+ else
+ schedule_delayed_work(&chip->status_change_work,
+ usecs_to_jiffies(reschedule_us));
}
static int step_chg_notifier_call(struct notifier_block *nb,
@@ -205,7 +405,7 @@ static int step_chg_register_notifier(struct step_chg_info *chip)
return 0;
}
-int qcom_step_chg_init(bool step_chg_enable)
+int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable)
{
int rc;
struct step_chg_info *chip;
@@ -226,6 +426,11 @@ int qcom_step_chg_init(bool step_chg_enable)
}
chip->step_chg_enable = step_chg_enable;
+ chip->sw_jeita_enable = sw_jeita_enable;
+
+ chip->step_index = -EINVAL;
+ chip->jeita_fcc_index = -EINVAL;
+ chip->jeita_fv_index = -EINVAL;
if (step_chg_enable && (!step_chg_config.psy_prop ||
!step_chg_config.prop_name)) {
@@ -234,6 +439,20 @@ int qcom_step_chg_init(bool step_chg_enable)
return -ENODATA;
}
+ if (sw_jeita_enable && (!jeita_fcc_config.psy_prop ||
+ !jeita_fcc_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ return -ENODATA;
+ }
+
+ if (sw_jeita_enable && (!jeita_fv_config.psy_prop ||
+ !jeita_fv_config.prop_name)) {
+ /* fail if step-chg configuration is invalid */
+ pr_err("Jeita TEMP configuration not defined - fail\n");
+ return -ENODATA;
+ }
+
INIT_DELAYED_WORK(&chip->status_change_work, status_change_work);
rc = step_chg_register_notifier(chip);
diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h
index 928eeb7a670b..53335c3c2c70 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.h
+++ b/drivers/power/supply/qcom/step-chg-jeita.h
@@ -12,6 +12,6 @@
#ifndef __STEP_CHG_H__
#define __STEP_CHG_H__
-int qcom_step_chg_init(bool);
+int qcom_step_chg_init(bool, bool);
void qcom_step_chg_deinit(void);
#endif /* __STEP_CHG_H__ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index aae796678ffe..47106f937371 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -2416,7 +2416,8 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
*/
static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
{
- int ret = 0, scm_ret = 0;
+ int ret = 0;
+ u64 scm_ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* scm command buffer structrue */
@@ -2457,7 +2458,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
cbuf.device_id = UFS_TZ_DEV_ID;
ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
if (ret || scm_ret) {
- dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
__func__, ret, scm_ret);
if (!ret)
ret = scm_ret;
@@ -2466,7 +2467,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
}
out:
- dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %d\n",
+ dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
__func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
return ret;
}
diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c
index 2c15f7896c82..63432e6026e2 100644
--- a/drivers/soc/qcom/jtagv8-etm.c
+++ b/drivers/soc/qcom/jtagv8-etm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1503,6 +1503,7 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
+ u64 version = 0;
if (!etm[cpu])
goto out;
@@ -1524,8 +1525,8 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb,
goto out;
}
if (etm_arch_supported(etm[cpu]->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && version < TZ_DBG_ETM_VER)
etm[cpu]->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
@@ -1615,8 +1616,10 @@ static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu)
mutex_lock(&etmdata->mutex);
if (etmdata->init && !etmdata->enable) {
if (etm_arch_supported(etmdata->arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) <
- TZ_DBG_ETM_VER)
+ u64 version = 0;
+
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version)
+ && (version < TZ_DBG_ETM_VER))
etmdata->save_restore_enabled = true;
else
pr_info("etm save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/jtagv8.c b/drivers/soc/qcom/jtagv8.c
index 94c391eabaea..f09ccce8f9c3 100644
--- a/drivers/soc/qcom/jtagv8.c
+++ b/drivers/soc/qcom/jtagv8.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -974,7 +974,7 @@ static struct notifier_block jtag_cpu_pm_notifier = {
static int __init msm_jtag_dbg_init(void)
{
int ret;
-
+ u64 version = 0;
if (msm_jtag_fuse_apps_access_disabled())
return -EPERM;
@@ -982,7 +982,8 @@ static int __init msm_jtag_dbg_init(void)
dbg_init_arch_data();
if (dbg_arch_supported(dbg.arch)) {
- if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) {
+ if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) &&
+ version < TZ_DBG_ETM_VER) {
dbg.save_restore_enabled = true;
} else {
pr_info("dbg save-restore supported by TZ\n");
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 31de6e5c173c..cc3f5d6a7c89 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -1117,54 +1117,55 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id)
ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
sizeof(svc_cmd), &ret_val, sizeof(ret_val));
- if (ret)
- return ret;
+ if (!ret && ret_val)
+ return 1;
+ else
+ return 0;
- return ret_val;
}
desc.arginfo = SCM_ARGS(1);
desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+ desc.ret[0] = 0;
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
- if (ret)
- return ret;
+ if (!ret && desc.ret[0])
+ return 1;
+ else
+ return 0;
- return desc.ret[0];
}
EXPORT_SYMBOL(scm_is_call_available);
#define GET_FEAT_VERSION_CMD 3
-int scm_get_feat_version(u32 feat)
+int scm_get_feat_version(u32 feat, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
if (!is_scm_armv8()) {
if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
- u32 version;
- if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
- sizeof(feat), &version, sizeof(version)))
- return version;
+ ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD,
+ &feat, sizeof(feat), scm_ret, sizeof(*scm_ret));
+ return ret;
}
- return 0;
}
ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
if (ret <= 0)
- return 0;
+ return -EAGAIN;
desc.args[0] = feat;
desc.arginfo = SCM_ARGS(1);
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
&desc);
- if (!ret)
- return desc.ret[0];
- return 0;
+ *scm_ret = desc.ret[0];
+
+ return ret;
}
EXPORT_SYMBOL(scm_get_feat_version);
#define RESTORE_SEC_CFG 2
-int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
{
struct scm_desc desc = {0};
int ret;
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 4307937d9f6d..e7a00cdb5b03 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -424,13 +424,14 @@ const char *msm_secure_vmid_to_string(int secure_vmid)
bool msm_secure_v2_is_supported(void)
{
- int version = scm_get_feat_version(FEATURE_ID_CP);
+ u64 version;
+ int ret = scm_get_feat_version(FEATURE_ID_CP, &version);
/*
* if the version is < 1.1.0 then dynamic buffer allocation is
* not supported
*/
- return version >= MAKE_CP_VERSION(1, 1, 0);
+ return (ret == 0) && (version >= MAKE_CP_VERSION(1, 1, 0));
}
static int __init alloc_secure_shared_memory(void)
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index dfb6d2f77af1..62d2667ee2f6 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -237,10 +237,12 @@ void ion_cma_heap_destroy(struct ion_heap *heap)
static void ion_secure_cma_free(struct ion_buffer *buffer)
{
- int ret = 0;
+ int i, ret = 0;
int source_vm;
int dest_vmid;
int dest_perms;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
struct ion_cma_buffer_info *info = buffer->priv_virt;
source_vm = get_secure_vmid(buffer->flags);
@@ -251,14 +253,17 @@ static void ion_secure_cma_free(struct ion_buffer *buffer)
dest_vmid = VMID_HLOS;
dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
- ret = hyp_assign_table(info->table, &source_vm, 1,
- &dest_vmid, &dest_perms, 1);
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vmid, &dest_perms, 1);
if (ret) {
pr_err("%s: Not freeing memory since assign failed\n",
__func__);
return;
}
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ ClearPagePrivate(sg_page(sgt->sgl));
+
ion_cma_free(buffer);
}
@@ -266,11 +271,13 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags)
{
- int ret = 0;
+ int i, ret = 0;
int source_vm;
int dest_vm;
int dest_perms;
struct ion_cma_buffer_info *info;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
source_vm = VMID_HLOS;
dest_vm = get_secure_vmid(flags);
@@ -292,12 +299,17 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
}
info = buffer->priv_virt;
- ret = hyp_assign_table(info->table, &source_vm, 1,
- &dest_vm, &dest_perms, 1);
+ sgt = info->table;
+ ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1);
if (ret) {
pr_err("%s: Assign call failed\n", __func__);
goto err;
}
+
+ /* Set the private bit to indicate that we've secured this */
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ SetPagePrivate(sg_page(sgt->sgl));
+
return ret;
err:
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 3f903d4776b4..19fe6c8cb25a 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1579,6 +1579,12 @@ static void gsi_rndis_command_complete(struct usb_ep *ep,
struct f_gsi *rndis = req->context;
int status;
+ if (req->status != 0) {
+ log_event_err("RNDIS command completion error %d\n",
+ req->status);
+ return;
+ }
+
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
if (status < 0)
log_event_err("RNDIS command error %d, %d/%d",
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 434af820e827..a28bcd084dc3 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -545,6 +545,12 @@ static void rndis_qc_command_complete(struct usb_ep *ep,
rndis_init_msg_type *buf;
u32 ul_max_xfer_size, dl_max_xfer_size;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error %d\n",
+ __func__, req->status);
+ return;
+ }
+
spin_lock(&rndis_lock);
rndis = _rndis_qc;
if (!rndis || !rndis->notify || !rndis->notify->driver_data) {
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 13888821109d..0917bc500023 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -463,6 +463,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
int status;
rndis_init_msg_type *buf;
+ if (req->status != 0) {
+ pr_err("%s: RNDIS command completion error:%d\n",
+ __func__, req->status);
+ return;
+ }
+
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
// spin_lock(&dev->lock);
status = rndis_msg_parser(rndis->params, (u8 *) req->buf);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index f9a71375c207..5f7e7c6bcde0 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -3266,21 +3266,6 @@ end:
return rc;
}
-static void mdss_dsi_ctrl_validate_lane_swap_config(
- struct mdss_dsi_ctrl_pdata *ctrl)
-{
- struct mipi_panel_info *mipi = &ctrl->panel_data.panel_info.mipi;
-
- if (!mipi->data_lane0)
- ctrl->lane_map[DSI_LOGICAL_LANE_0] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane1)
- ctrl->lane_map[DSI_LOGICAL_LANE_1] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane2)
- ctrl->lane_map[DSI_LOGICAL_LANE_2] = DSI_PHYSICAL_LANE_INVALID;
- if (!mipi->data_lane3)
- ctrl->lane_map[DSI_LOGICAL_LANE_3] = DSI_PHYSICAL_LANE_INVALID;
-}
-
static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl)
{
int rc = 0;
@@ -3290,8 +3275,6 @@ static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl)
goto error;
}
- mdss_dsi_ctrl_validate_lane_swap_config(ctrl);
-
/*
* check to make sure that the byte interface clock is specified for
* DSI ctrl version 2 and above.
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index 9ee0c27b225e..bda93bf0558a 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -272,20 +272,6 @@ static void hdmi_tx_cable_notify_work(struct work_struct *work)
mutex_unlock(&hdmi_ctrl->tx_lock);
} /* hdmi_tx_cable_notify_work */
-static bool hdmi_tx_is_cea_format(int mode)
-{
- bool cea_fmt;
-
- if ((mode > 0) && (mode <= HDMI_EVFRMT_END))
- cea_fmt = true;
- else
- cea_fmt = false;
-
- DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No");
-
- return cea_fmt;
-}
-
static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl)
{
return hdmi_ctrl->hdcp_feature_on &&
@@ -3192,9 +3178,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl)
}
hdmi_ctrl->panel.vic = hdmi_ctrl->vic;
-
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
- hdmi_tx_is_cea_format(hdmi_ctrl->vic))
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
hdmi_ctrl->panel.infoframe = true;
else
hdmi_ctrl->panel.infoframe = false;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6fb32761a767..4f8c399762fe 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1797,7 +1797,8 @@ static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
{
- int ret, scm_ret = 0;
+ int ret;
+ u64 scm_ret = 0;
if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map))
return;
@@ -1808,7 +1809,7 @@ static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata)
ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret);
if (ret || scm_ret)
- pr_warn("scm_restore_sec_cfg failed %d %d\n",
+ pr_warn("scm_restore_sec_cfg failed %d %llu\n",
ret, scm_ret);
__mdss_mdp_reg_access_clk_enable(mdata, false);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b584e353306d..6721be921d87 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -249,6 +249,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_REAL_TYPE,
POWER_SUPPLY_PROP_PR_SWAP,
+ POWER_SUPPLY_PROP_CC_STEP,
+ POWER_SUPPLY_PROP_CC_STEP_SEL,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index c93364b861d9..e0c47f8b06bf 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@ struct vregs_level {
};
struct wcnss_wlan_config {
+ bool wcn_external_gpio_support;
int use_48mhz_xo;
int is_pronto_vadc;
int is_pronto_v3;
@@ -74,6 +75,8 @@ enum {
#define HAVE_WCNSS_CAL_DOWNLOAD 1
#define HAVE_CBC_DONE 1
#define HAVE_WCNSS_RX_BUFF_COUNT 1
+#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1
+#define HAVE_WCNSS_5G_DISABLE 1
#define WLAN_MAC_ADDR_SIZE (6)
#define WLAN_RF_REG_ADDR_START_OFFSET 0x3
#define WLAN_RF_REG_DATA_START_OFFSET 0xf
@@ -132,12 +135,14 @@ void wcnss_riva_dump_pmic_regs(void);
int wcnss_xo_auto_detect_enabled(void);
u32 wcnss_get_wlan_rx_buff_count(void);
int wcnss_wlan_iris_xo_mode(void);
+int wcnss_wlan_dual_band_disabled(void);
void wcnss_flush_work(struct work_struct *work);
void wcnss_flush_delayed_work(struct delayed_work *dwork);
void wcnss_init_work(struct work_struct *work , void *callbackptr);
void wcnss_init_delayed_work(struct delayed_work *dwork , void *callbackptr);
int wcnss_get_iris_name(char *iris_version);
void wcnss_dump_stack(struct task_struct *task);
+void wcnss_snoc_vote(bool clk_chk_en);
#ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE
void wcnss_log_debug_regs_on_bite(void);
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index af389305207f..f0a3124dae00 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -121,9 +121,9 @@ extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
extern u32 scm_get_version(void);
extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
-extern int scm_get_feat_version(u32 feat);
+extern int scm_get_feat_version(u32 feat, u64 *scm_ret);
extern bool is_scm_armv8(void);
-extern int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret);
+extern int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret);
extern u32 scm_io_read(phys_addr_t address);
extern int scm_io_write(phys_addr_t address, u32 val);
extern bool scm_is_secure_device(void);
@@ -205,7 +205,7 @@ static inline int scm_is_call_available(u32 svc_id, u32 cmd_id)
return 0;
}
-static inline int scm_get_feat_version(u32 feat)
+static inline int scm_get_feat_version(u32 feat, u64 *scm_ret)
{
return 0;
}
@@ -215,7 +215,7 @@ static inline bool is_scm_armv8(void)
return true;
}
-static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret)
{
return 0;
}
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
index 60867630e1a1..dc46ee0f29a2 100644
--- a/include/uapi/linux/ipa_qmi_service_v01.h
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,6 +47,12 @@
#define QMI_IPA_MAX_FILTERS_EX_V01 128
#define QMI_IPA_MAX_PIPES_V01 20
#define QMI_IPA_MAX_APN_V01 8
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
#define IPA_INT_MAX ((int)(~0U>>1))
#define IPA_INT_MIN (-IPA_INT_MAX - 1)
@@ -984,6 +990,16 @@ struct ipa_fltr_installed_notif_req_msg_v01 {
* failure, the Rule Ids in this list must be set to a reserved
* index (255).
*/
+
+ /* Optional */
+ /* List of destination pipe IDs. */
+ uint8_t dst_pipe_id_valid;
+ /* Must be set to true if dst_pipe_id is being passed. */
+ uint32_t dst_pipe_id_len;
+ /* Must be set to # of elements in dst_pipe_id. */
+ uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+ /* Provides the list of destination pipe IDs for a source pipe. */
+
}; /* Message */
/* Response Message; This is the message that is exchanged between the
@@ -1622,6 +1638,273 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
*/
}; /* Message */
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+ /* Mandatory */
+ /* Collect statistics per client; */
+ uint8_t enable_per_client_stats;
+ /*
+ * Indicates whether to start or stop collecting
+ * per client statistics.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+}; /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics.
+ */
+
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem.
+ */
+
+ uint64_t num_ul_ipv4_bytes;
+ /*
+ * Accumulated number of uplink IPv4 bytes for a client.
+ */
+
+ uint64_t num_ul_ipv6_bytes;
+ /*
+ * Accumulated number of uplink IPv6 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv4_bytes;
+ /*
+ * Accumulated number of downlink IPv4 bytes for a client.
+ */
+
+ uint64_t num_dl_ipv6_bytes;
+ /*
+ * Accumulated number of downlink IPv6 byes for a client.
+ */
+
+
+ uint32_t num_ul_ipv4_pkts;
+ /*
+ * Accumulated number of uplink IPv4 packets for a client.
+ */
+
+ uint32_t num_ul_ipv6_pkts;
+ /*
+ * Accumulated number of uplink IPv6 packets for a client.
+ */
+
+ uint32_t num_dl_ipv4_pkts;
+ /*
+ * Accumulated number of downlink IPv4 packets for a client.
+ */
+
+ uint32_t num_dl_ipv6_pkts;
+ /*
+ * Accumulated number of downlink IPv6 packets for a client.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+ /* Mandatory */
+ /* Client id */
+ uint32_t client_id;
+ /*
+ * Id of the client on APPS processor side for which Modem processor
+ * needs to send uplink/downlink statistics. if client id is specified
+ * as 0xffffffff, then Q6 will send the stats for all the clients of
+ * the specified source pipe.
+ */
+
+ /* Mandatory */
+ /* Source pipe id */
+ uint32_t src_pipe_id;
+ /*
+ * IPA consumer pipe on which client on APPS side sent uplink
+ * data to modem. In future, this implementation can be extended
+ * to provide 0xffffffff as the source pipe id, where Q6 will send
+ * the stats of all the clients across all different tethered-pipes.
+ */
+
+ /* Optional */
+ /* Reset client statistics. */
+ uint8_t reset_stats_valid;
+ /* Must be set to true if reset_stats is being passed. */
+ uint8_t reset_stats;
+ /*
+ * Option to reset the statistics currently collected by modem for this
+ * particular client.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /* Standard response type. */
+
+ /* Optional */
+ /* Per clients Statistics List */
+ uint8_t per_client_stats_list_valid;
+ /* Must be set to true if per_client_stats_list is being passed. */
+ uint32_t per_client_stats_list_len;
+ /* Must be set to # of elements in per_client_stats_list. */
+ struct ipa_per_client_stats_info_type_v01
+ per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+ /*
+ * List of all per client statistics that are retrieved.
+ */
+}; /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+ enum ipa_ip_type_enum_v01 ip_type;
+ /*
+ * IP type for which this rule is applicable.
+ * The driver must identify the filter table (v6 or v4), and this
+ * field is essential for that. Values:
+ * - QMI_IPA_IP_TYPE_INVALID (0) -- Invalid IP type identifier
+ * - QMI_IPA_IP_TYPE_V4 (1) -- IPv4 type
+ * - QMI_IPA_IP_TYPE_V6 (2) -- IPv6 type
+ */
+
+ struct ipa_filter_rule_type_v01 filter_rule;
+ /*
+ * Rules in the filter specification. These rules are the
+ * ones that are matched against fields in the packet.
+ * Currently we only send IPv6 whitelist rules to Q6.
+ */
+}; /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+ /* Optional */
+ /* Uplink Firewall Specification */
+ uint32_t firewall_rules_list_len;
+ /* Must be set to # of elements in firewall_rules_list. */
+ struct ipa_ul_firewall_rule_type_v01
+ firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+ /*
+ * List of uplink firewall specifications of filters that must be
+ * installed.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+
+ /* Optional */
+ uint8_t disable_valid;
+ /* Must be set to true if enable is being passed. */
+ uint8_t disable;
+ /*
+ * Indicates whether uplink firewall needs to be enabled or disabled.
+ */
+
+ /* Optional */
+ uint8_t are_blacklist_filters_valid;
+ /* Must be set to true if are_blacklist_filters is being passed. */
+ uint8_t are_blacklist_filters;
+ /*
+ * Indicates whether the filters received as part of this message are
+ * blacklist filters. i.e. drop uplink packets matching these rules.
+ */
+}; /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+ /* Mandatory */
+ /* Result Code */
+ struct ipa_qmi_response_type_v01 resp;
+ /*
+ * Standard response type.
+ * Standard response type. Contains the following data members:
+ * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+ * qmi_error_type -- Error code. Possible error code values are
+ * described in the error codes section of each message definition.
+ */
+}; /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+ IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ /* To force a 32 bit signed enum. Do not change or use*/
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+ /* Indicates that the uplink firewall rules
+ * are configured successfully.
+ */
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+ /* Indicates that the uplink firewall rules
+ * are not configured successfully.
+ */
+ IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+ /* To force a 32 bit signed enum. Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+ enum ipa_ul_firewall_status_enum_v01 is_success;
+ /*
+ * Indicates whether the uplink firewall rules are configured
+ * successfully.
+ */
+
+ uint32_t mux_id;
+ /*
+ * QMAP Mux ID. As a part of the QMAP protocol,
+ * several data calls may be multiplexed over the same physical
+ * transport channel. This identifier is used to identify one
+ * such data call. The maximum value for this identifier is 255.
+ */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+
+ struct ipa_ul_firewall_config_result_type_v01 result;
+}; /* Message */
+
+
/*Service Message Definition*/
#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
@@ -1655,6 +1938,13 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
/* add for max length*/
#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
@@ -1663,7 +1953,7 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
-#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870
#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
@@ -1696,6 +1986,15 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 {
#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685
#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
/* Service Object Accessor */
#endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7fee87daac56..69f8f683138a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1693,7 +1693,33 @@ static int __perf_remove_from_context(void *info)
return 0;
}
-/*
+
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+ int up_ret;
+ /*
+ * CPU was offline. Bring it online so we can
+ * gracefully exit a perf context.
+ */
+ up_ret = cpu_up(event->cpu);
+ if (!up_ret)
+ /* Try the remove call once again. */
+ cpu_function_call(event->cpu, __perf_remove_from_context,
+ rep);
+ else
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+}
+#endif
+
+ /*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
@@ -1728,6 +1754,9 @@ static void __ref perf_remove_from_context(struct perf_event *event,
*/
ret = cpu_function_call(event->cpu, __perf_remove_from_context,
&re);
+ if (ret == -ENXIO)
+ perf_retry_remove(event, &re);
+
return;
}
@@ -3408,22 +3437,27 @@ u64 perf_event_read_local(struct perf_event *event)
static int perf_event_read(struct perf_event *event, bool group)
{
- int ret = 0;
+ int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
+ event_cpu = READ_ONCE(event->oncpu);
+
if (event->state == PERF_EVENT_STATE_ACTIVE &&
- !cpu_isolated(event->oncpu)) {
+ !cpu_isolated(event_cpu)) {
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
+
+ if ((unsigned int)event_cpu >= nr_cpu_ids)
+ return 0;
if (!event->attr.exclude_idle ||
- !per_cpu(is_idle, event->oncpu)) {
- smp_call_function_single(event->oncpu,
+ !per_cpu(is_idle, event_cpu)) {
+ smp_call_function_single(event_cpu,
__perf_event_read, &data, 1);
ret = data.ret;
}
@@ -7109,6 +7143,8 @@ static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
@@ -7230,6 +7266,8 @@ static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
@@ -7517,6 +7555,8 @@ static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .events_across_hotplug = 1,
};
/*
@@ -7598,6 +7638,8 @@ static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 92ce5f4ccc26..7da5b674d16e 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -614,6 +614,8 @@ static struct pmu perf_breakpoint = {
.start = hw_breakpoint_start,
.stop = hw_breakpoint_stop,
.read = hw_breakpoint_pmu_read,
+
+ .events_across_hotplug = 1,
};
int __init init_hw_breakpoint(void)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c03d51a017bf..ee095f4e7230 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1823,6 +1823,7 @@ static int find_lowest_rq_hmp(struct task_struct *task)
* the best one based on our affinity and topology.
*/
+retry:
for_each_sched_cluster(cluster) {
if (boost_on_big && cluster->capacity != max_possible_capacity)
continue;
@@ -1830,6 +1831,15 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
cpumask_andnot(&candidate_mask, &candidate_mask,
cpu_isolated_mask);
+ /*
+ * When placement boost is active, if there is no eligible CPU
+ * in the highest capacity cluster, we fallback to the other
+ * clusters. So clear the CPUs of the traversed cluster from
+ * the lowest_mask.
+ */
+ if (unlikely(boost_on_big))
+ cpumask_andnot(lowest_mask, lowest_mask,
+ &cluster->cpus);
if (cpumask_empty(&candidate_mask))
continue;
@@ -1869,6 +1879,11 @@ static int find_lowest_rq_hmp(struct task_struct *task)
break;
}
+ if (unlikely(boost_on_big && best_cpu == -1)) {
+ boost_on_big = 0;
+ goto retry;
+ }
+
return best_cpu;
}
#endif /* CONFIG_SCHED_HMP */
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index ae60f35b363d..b17556c346ce 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -476,10 +476,12 @@ static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
if (likely((ckresult == RMNET_MAP_CHECKSUM_OK)
|| (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
skb->ip_summed |= CHECKSUM_UNNECESSARY;
- else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION
- && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT
- && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET
- && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
+ else if (ckresult !=
+ RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
+ ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED &&
+ ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
+ ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
+ ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
rmnet_kfree_skb(skb,
RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
return RX_HANDLER_CONSUMED;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d0d09c290ff8..2a9ec3e05c73 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
- [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
- .len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
@@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
+ [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
@@ -6124,6 +6124,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
+ nla_for_each_nested(attr1, freqs, tmp1)
+ if (nla_len(attr1) != sizeof(u32))
+ return 0;
+
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*
diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
index e312a879b86a..1286d3185780 100644
--- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c
@@ -155,7 +155,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -183,7 +183,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -211,7 +211,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT OUT_TYPE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -239,7 +239,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"VIRT GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_VIRTUALIZER;
*updt_params++ =
@@ -318,7 +318,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -346,7 +346,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -374,7 +374,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_PRESET", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -402,7 +402,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_WET_MIX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -430,7 +430,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_GAIN_ADJUST", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -458,7 +458,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -486,7 +486,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_ROOM_HF_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -514,7 +514,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_TIME", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -542,7 +542,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DECAY_HF_RATIO", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -570,7 +570,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -598,7 +598,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_REFLECTIONS_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -626,7 +626,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_LEVEL", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -654,7 +654,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DELAY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -682,7 +682,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DIFFUSION", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -710,7 +710,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"REVERB_DENSITY", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_REVERB;
*updt_params++ =
@@ -790,7 +790,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -818,7 +818,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_MODE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -846,7 +846,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"BASS_BOOST_STRENGTH", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_BASS_BOOST;
*updt_params++ =
@@ -924,7 +924,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"PBE_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -950,7 +950,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"PBE_PARAM", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_PBE;
*updt_params++ =
@@ -1035,7 +1035,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_ENABLE", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1103,7 +1103,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_CONFIG", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1154,7 +1154,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_BAND_INDEX", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1186,7 +1186,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac,
MAX_INBAND_PARAM_SZ,
"EQ_SINGLE_BAND_FREQ", rc);
if (rc != 0)
- break;
+ goto invalid_config;
*updt_params++ =
AUDPROC_MODULE_ID_POPLESS_EQUALIZER;
*updt_params++ =
@@ -1276,7 +1276,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
"VOLUME/VOLUME2_GAIN_2CH",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;
@@ -1325,7 +1325,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac,
"VOLUME/VOLUME2_GAIN_MASTER",
rc);
if (rc != 0)
- break;
+ goto invalid_config;
if (instance == SOFT_VOLUME_INSTANCE_2)
*updt_params++ =
ASM_MODULE_ID_VOL_CTRL2;