summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt17
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-qrd.dtsi2
-rw-r--r--arch/arm64/configs/msm-auto_defconfig20
-rw-r--r--drivers/android/binder.c1
-rw-r--r--drivers/crypto/msm/ice.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c13
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c25
-rw-r--r--drivers/iio/adc/qcom-rradc.c53
-rw-r--r--drivers/iommu/arm-smmu.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c4
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/core/core.c24
-rw-r--r--drivers/mmc/core/sd.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h35
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.c43
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c15
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c1
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c14
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/spcom.c16
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c19
-rw-r--r--drivers/usb/pd/policy_engine.c603
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c48
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c144
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h10
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/uapi/linux/msm_mdp_ext.h22
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/locking/osq_lock.c26
-rw-r--r--kernel/rcu/tree.c44
-rw-r--r--kernel/rcu/tree_plugin.h14
-rw-r--r--kernel/sched/core.c9
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/wireless/db.txt2
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c3
-rw-r--r--sound/soc/codecs/wcd9335.c26
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c28
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c9
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c2
64 files changed, 1585 insertions, 301 deletions
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
index acc850773210..c1a8d1bd697d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
@@ -11,13 +11,24 @@ Required properties:
- compatible: "qcom,wcn3990-wifi";
- reg: Memory regions defined as starting address and size
- reg-names: Names of the memory regions defined in reg entry
+ - clocks: List of clock phandles
+ - clock-names: List of clock names corresponding to the "clocks" property
- interrupts: Copy engine interrupt table
+Optional properties:
+ - <supply-name>-supply: phandle to the regulator device tree node
+ optional "supply-name" is "vdd-0.8-cx-mx".
+ - qcom,<supply>-config: Specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV. There can
+ be optional load in uA and Regulator settle delay in
+ uS.
Example:
msm_ath10k_wlan: qcom,msm_ath10k_wlan@18800000 {
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_aggre2_noc_clk>;
+ clock-names = "smmu_aggre2_noc_clk";
interrupts =
<0 130 0 /* CE0 */ >,
<0 131 0 /* CE1 */ >,
@@ -31,4 +42,10 @@ Example:
<0 139 0 /* CE9 */ >,
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 588393412271..22b546e0f845 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1776,7 +1776,7 @@ source "mm/Kconfig"
choice
prompt "Virtual Memory Reclaim"
- default NO_VM_RECLAIM
+ default ENABLE_VMALLOC_SAVING
help
Select the method of reclaiming virtual memory
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index 3a7514397139..75aea7280e6c 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -383,6 +383,7 @@
qcom,msm-cpudai-auxpcm-data = <0>, <0>;
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
qcom,msm-auxpcm-interface = "primary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
};
dai_sec_auxpcm: qcom,msm-sec-auxpcm {
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index c156e91dfcf9..fa21dd7995eb 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -88,7 +88,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
@@ -129,7 +129,7 @@
};
};
-&smb138x_parallel_slave {
+&smb1381_charger {
smb138x_vbus: qcom,smb138x-vbus {
status = "disabled";
regulator-name = "smb138x-vbus";
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index b0688668e667..11c45606f6c2 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -183,7 +183,7 @@
};
smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
- compatible = "qcom,smmu_kms_unsec";
+ compatible = "qcom,smmu_sde_unsec";
iommus = <&mdp_smmu 0>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index eafa6b841c17..76e3282d327e 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -3106,6 +3106,8 @@
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "cxo_ref_clk_pin";
interrupts =
<0 413 0 /* CE0 */ >,
<0 414 0 /* CE1 */ >,
@@ -3119,6 +3121,12 @@
<0 423 0 /* CE9 */ >,
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
qcom,icnss@18800000 {
diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
index f4a9592bf4ff..ccc1be75f39b 100644
--- a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
index 5f44b4c32c98..0d7b6c0341b5 100644
--- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
index af3c5d1b51da..384e24d221c4 100644
--- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
@@ -92,7 +92,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index 85916a545d95..8f8e696f8866 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -232,6 +233,8 @@ CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
@@ -311,7 +314,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_QUP=y
@@ -348,7 +350,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_MAX20010=y
@@ -380,15 +381,11 @@ CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=y
CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_MSM_DBA=y
+CONFIG_MSM_DBA_ADV7533=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -472,7 +469,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_SW_SYNC_USER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -521,7 +518,6 @@ CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -576,7 +572,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -593,6 +588,7 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 13598d807de0..34f45abe0181 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3253,6 +3253,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 490f8d9ddb9f..68b6a26f00b8 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -869,7 +869,7 @@ static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
static int qcom_ice_init_clocks(struct ice_device *ice)
{
int ret = -EINVAL;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -913,7 +913,7 @@ out:
static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
{
int ret = 0;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -1590,12 +1590,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
if (ice_dev->pdev->of_node == node) {
pr_info("%s: found ice device %pK\n", __func__,
ice_dev);
+ ice_pdev = to_platform_device(ice_dev->pdev);
break;
}
}
- ice_pdev = to_platform_device(ice_dev->pdev);
- pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
+ if (ice_pdev)
+ pr_info("%s: matching platform device %pK\n", __func__,
+ ice_pdev);
out:
return ice_pdev;
}
@@ -1615,11 +1617,11 @@ static struct ice_device *get_ice_device_from_storage_type
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
pr_debug("%s: found ice device %pK\n",
__func__, ice_dev);
- break;
+ return ice_dev;
}
}
out:
- return ice_dev;
+ return NULL;
}
static int enable_ice_setup(struct ice_device *ice_dev)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7d660ba56594..9dbd86eff816 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -424,7 +424,7 @@ static struct hdmi_platform_config hdmi_tx_8994_config = {
static struct hdmi_platform_config hdmi_tx_8996_config = {
.phy_init = NULL,
HDMI_CFG(pwr_reg, none),
- HDMI_CFG(hpd_reg, none),
+ HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 83b34a071ced..c61753311771 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -185,9 +185,14 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ struct kthread_worker *worker = work->worker;
+ struct msm_drm_commit *commit = container_of(worker,
+ struct msm_drm_commit, worker);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ if (vbl_ev->crtc_id != commit->crtc_id)
+ continue;
list_del(&vbl_ev->node);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
@@ -673,10 +678,10 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (ctx)
+ if (ctx) {
INIT_LIST_HEAD(&ctx->counters);
-
- msm_submitqueue_init(ctx);
+ msm_submitqueue_init(ctx);
+ }
file->driver_priv = ctx;
@@ -2146,7 +2151,9 @@ static int msm_pdev_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
+#ifndef CONFIG_QCOM_KGSL
add_components(&pdev->dev, &match, "gpus");
+#endif
#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 90a6b19ccf40..6a741a7ce0f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -534,13 +534,7 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector,
idx = msm_property_index(&c_conn->property_info, property);
if (idx == CONNECTOR_PROP_RETIRE_FENCE)
- /*
- * Set a fence offset if not a virtual connector, so that the
- * fence signals after one additional commit rather than at the
- * end of the current one.
- */
- rc = sde_fence_create(&c_conn->retire_fence, val,
- c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ rc = sde_fence_create(&c_conn->retire_fence, val, 0);
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index f9b8c3966d74..0f563ac25da8 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -390,5 +390,22 @@ enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
*/
int sde_connector_get_dpms(struct drm_connector *connector);
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a0417a0dd12e..30e9d688396f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1674,19 +1674,28 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
+ bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
} else {
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(
+ cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
int offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
- ret = sde_fence_create(
- &sde_crtc->output_fence, val, offset);
+ ret = sde_fence_create(&sde_crtc->output_fence, val,
+ offset + conn_offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 3fb13c7a0814..78f74b883877 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -65,8 +65,8 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
};
static void a5xx_irq_storm_worker(struct work_struct *work);
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor);
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor);
static void a5xx_gpmu_reset(struct work_struct *work);
static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
@@ -709,6 +709,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
if (data[1] != GPMU_FIRMWARE_ID)
goto err;
ret = _read_fw2_block_header(&data[2],
+ data[0] - 2,
GPMU_FIRMWARE_ID,
adreno_dev->gpucore->gpmu_major,
adreno_dev->gpucore->gpmu_minor);
@@ -1231,8 +1232,8 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
}
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor)
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor)
{
uint32_t header_size;
int i = 1;
@@ -1242,7 +1243,8 @@ static int _read_fw2_block_header(uint32_t *header, uint32_t id,
header_size = header[0];
/* Headers have limited size and always occur as pairs of words */
- if (header_size > MAX_HEADER_SIZE || header_size % 2)
+ if (header_size > MAX_HEADER_SIZE || header_size >= remain ||
+ header_size % 2 || header_size == 0)
return -EINVAL;
/* Sequences must have an identifying id first thing in their header */
if (id == GPMU_SEQUENCE_ID) {
@@ -1306,8 +1308,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct firmware *fw;
- uint32_t block_size = 0, block_total = 0, fw_size;
- uint32_t *block;
+ uint64_t block_size = 0, block_total = 0;
+ uint32_t fw_size, *block;
int ret = -EINVAL;
if (!adreno_dev->gpucore->regfw_name)
@@ -1329,7 +1331,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* All offset numbers calculated from file description */
while (block_total < fw_size) {
block_size = block[0];
- if (block_size >= fw_size || block_size < 2)
+ if (((block_total + block_size) >= fw_size)
+ || block_size < 5)
goto err;
if (block[1] != GPMU_SEQUENCE_ID)
goto err;
@@ -1337,6 +1340,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* For now ignore blocks other than the LM sequence */
if (block[4] == LM_SEQUENCE_ID) {
ret = _read_fw2_block_header(&block[2],
+ block_size - 2,
GPMU_SEQUENCE_ID,
adreno_dev->gpucore->lm_major,
adreno_dev->gpucore->lm_minor);
@@ -1344,6 +1348,9 @@ static void _load_regfile(struct adreno_device *adreno_dev)
goto err;
adreno_dev->lm_fw = fw;
+
+ if (block[2] > (block_size - 2))
+ goto err;
adreno_dev->lm_sequence = block + block[2] + 3;
adreno_dev->lm_size = block_size - block[2] - 2;
}
@@ -1356,7 +1363,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
err:
release_firmware(fw);
KGSL_PWR_ERR(device,
- "Register file failed to load sz=%d bsz=%d header=%d\n",
+ "Register file failed to load sz=%d bsz=%llu header=%d\n",
fw_size, block_size, ret);
return;
}
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 28ab4e52dab5..b3aa73f1a5a1 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -192,8 +193,7 @@
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
#define FG_RR_ADC_STS_CHANNEL_STS 0x2
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50
#define FG_RR_CONV_MAX_RETRY_CNT 50
#define FG_RR_TP_REV_VERSION1 21
#define FG_RR_TP_REV_VERSION2 29
@@ -235,6 +235,7 @@ struct rradc_chip {
struct device_node *revid_dev_node;
struct pmic_revid_data *pmic_fab_id;
int volt;
+ struct power_supply *usb_trig;
};
struct rradc_channels {
@@ -726,6 +727,24 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip)
return rc;
}
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+ union power_supply_propval pval;
+ int rc;
+ bool usb_present = false;
+
+ if (!chip->usb_trig) {
+ pr_debug("USB property not present\n");
+ return usb_present;
+ }
+
+ rc = power_supply_get_property(chip->usb_trig,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+
+ return usb_present;
+}
+
static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf, u16 status)
{
@@ -745,8 +764,18 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
pr_debug("%s is not ready; nothing to read:0x%x\n",
rradc_chans[prop->channel].datasheet_name, buf[0]);
- usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
- FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+
+ if (((prop->channel == RR_ADC_CHG_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TEMP) ||
+ (prop->channel == RR_ADC_USBIN_I) ||
+ (prop->channel == RR_ADC_DIE_TEMP)) &&
+ ((!rradc_is_usb_present(chip)))) {
+ pr_debug("USB not present for %d\n", prop->channel);
+ rc = -ENODATA;
+ break;
+ }
+
+ msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
retry_cnt++;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
@@ -764,7 +793,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf)
{
- int rc = 0;
+ int rc = 0, ret = 0;
u16 status = 0;
rc = rradc_enable_continuous_mode(chip);
@@ -777,23 +806,25 @@ static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
pr_err("status read failed:%d\n", rc);
- return rc;
+ ret = rc;
+ goto disable;
}
rc = rradc_check_status_ready_with_retry(chip, prop,
buf, status);
if (rc < 0) {
pr_err("Status read failed:%d\n", rc);
- return rc;
+ ret = rc;
}
+disable:
rc = rradc_disable_continuous_mode(chip);
if (rc < 0) {
pr_err("Failed to switch to non continuous mode\n");
- return rc;
+ ret = rc;
}
- return rc;
+ return ret;
}
static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
@@ -1149,6 +1180,10 @@ static int rradc_probe(struct platform_device *pdev)
indio_dev->channels = chip->iio_chans;
indio_dev->num_channels = chip->nchannels;
+ chip->usb_trig = power_supply_get_by_name("usb");
+ if (!chip->usb_trig)
+ pr_debug("Error obtaining usb power supply\n");
+
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b30739de79e7..62230d30d101 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1956,10 +1956,20 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
-
arm_smmu_disable_clocks(smmu_domain->smmu);
+ if (smmu_domain->pgtbl_ops) {
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+ /* unassign any freed page table memory */
+ if (arm_smmu_is_master_side_secure(smmu_domain)) {
+ arm_smmu_secure_domain_lock(smmu_domain);
+ arm_smmu_secure_pool_destroy(smmu_domain);
+ arm_smmu_unassign_table(smmu_domain);
+ arm_smmu_secure_domain_unlock(smmu_domain);
+ }
+ smmu_domain->pgtbl_ops = NULL;
+ }
+
free_irqs:
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
index f6d7f5fb8d32..8a49c7cf9f4a 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
@@ -424,7 +424,7 @@ int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
curr_vreg = &cam_vreg[j];
reg_ptr[j] = regulator_get(dev,
curr_vreg->reg_name);
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s get failed\n",
__func__,
curr_vreg->reg_name);
@@ -531,7 +531,7 @@ int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
continue;
} else
j = i;
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s null regulator\n",
__func__, cam_vreg[j].reg_name);
goto disable_vreg;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index d053409b67e5..063e00517660 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1224,16 +1224,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
- mmc_put_card(card);
-
- err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-
if (mmc_card_cmdq(card)) {
if (mmc_cmdq_halt(card->host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(card->host), __func__);
}
+ mmc_put_card(card);
+
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0da9c5caea13..372f1fbbde4c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3301,6 +3301,13 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
+ /*
+ * Change in cd_gpio state, so make sure detection part is
+ * not overided because of manual resume.
+ */
+ if (cd_irq && mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -4165,6 +4172,18 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+ flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
void mmc_rescan(struct work_struct *work)
{
unsigned long flags;
@@ -4199,6 +4218,8 @@ void mmc_rescan(struct work_struct *work)
host->bus_ops->detect(host);
host->detect_change = 0;
+ if (host->ignore_bus_resume_flags)
+ host->ignore_bus_resume_flags = false;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -4456,7 +4477,8 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
- if (mmc_bus_manual_resume(host)) {
+ if (mmc_bus_manual_resume(host) &&
+ !host->ignore_bus_resume_flags) {
spin_unlock_irqrestore(&host->lock, flags);
break;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 9bef77ba29fd..21836eac001e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1237,7 +1237,10 @@ static int mmc_sd_suspend(struct mmc_host *host)
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
- }
+ /* if suspend fails, force mmc_detect_change during resume */
+ } else if (mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index df3fce93b6d1..45d2f69f5f1a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -4747,6 +4747,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
mmc_hostname(host->mmc), __func__, ret);
device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
}
+ if (sdhci_msm_is_bootdevice(&pdev->dev))
+ mmc_flush_detect_work(host->mmc);
+
/* Successful initialization */
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f5aa88a76f17..c42d7eebf465 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -27,6 +27,8 @@
#include "qmi.h"
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
#define WCN3990_MAX_IRQ 12
@@ -48,6 +50,7 @@ const char *ce_name[WCN3990_MAX_IRQ] = {
#define ATH10K_SNOC_TARGET_WAIT 3000
#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3
#define SNOC_HIF_POWER_DOWN_DELAY 30
+#define ATH10K_MAX_PROP_SIZE 32
static void ath10k_snoc_buffer_cleanup(struct ath10k *ar);
static int ath10k_snoc_request_irq(struct ath10k *ar);
@@ -1248,6 +1251,326 @@ int ath10k_snoc_pm_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+ int ret = 0;
+ char prop_name[ATH10K_MAX_PROP_SIZE];
+ struct regulator *reg;
+ const __be32 *prop;
+ int len = 0;
+ int i;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
+ ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ ret = PTR_ERR(reg);
+ goto out;
+ }
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (vreg_info->required) {
+ ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, ATH10K_MAX_PROP_SIZE,
+ "qcom,%s-config", vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len < (2 * sizeof(__be32))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ goto done;
+ }
+
+ for (i = 0; (i * sizeof(__be32)) < len; i++) {
+ switch (i) {
+ case 0:
+ vreg_info->min_v = be32_to_cpup(&prop[0]);
+ break;
+ case 1:
+ vreg_info->max_v = be32_to_cpup(&prop[1]);
+ break;
+ case 2:
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ break;
+ case 3:
+ vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n",
+ prop_name, i);
+ break;
+ }
+ }
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ ath10k_err(ar, "Clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n",
+ clk_info->name,
+ ret);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n",
+ clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+out:
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "vreg %s, set failed:min:%u,max:%u,ret: %d\n",
+ vreg_info->name, vreg_info->min_v,
+ vreg_info->max_v, ret);
+ break;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar,
+ "Reg %s, can't set load:%u,ret: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua, ret);
+ break;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "Regulator %s, can't enable: %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ if (!ret)
+ return 0;
+
+ for (; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't disable: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "Regulator %s, can't set load: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't set voltage: %d\n",
+ vreg_info->name, ret);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ int ret = 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n",
+ clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n",
+ clk_info->name, clk_info->freq,
+ ret);
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ ath10k_err(ar, "Clock %s, can't enable: %d\n",
+ clk_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ for (; i >= 0; i--) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n",
+ clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n");
+
+ ret = ath10k_wcn3990_vreg_on(ar);
+ if (ret)
+ goto out;
+
+ ret = ath10k_wcn3990_clk_init(ar);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ ath10k_wcn3990_vreg_off(ar);
+out:
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n");
+
+ ath10k_wcn3990_clk_deinit(ar);
+
+ ret = ath10k_wcn3990_vreg_off(ar);
+
+ return ret;
+}
+
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.tx_sg = ath10k_snoc_hif_tx_sg,
.start = ath10k_snoc_hif_start,
@@ -1275,6 +1598,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
struct device *dev;
u32 chip_id;
+ u32 i;
dev = &pdev->dev;
hw_rev = ATH10K_HW_WCN3990;
@@ -1308,22 +1632,43 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry,
(unsigned long)ar);
+ memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg));
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg));
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ goto err_stop_qmi_service;
+ }
+
ret = ath10k_snoc_claim(ar);
if (ret) {
ath10k_err(ar, "failed to claim device: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
+
ret = ath10k_snoc_bus_configure(ar);
if (ret) {
ath10k_err(ar, "failed to configure bus: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
ret = ath10k_snoc_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
@@ -1359,6 +1704,9 @@ err_free_irq:
err_free_pipes:
ath10k_snoc_free_pipes(ar);
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
err_stop_qmi_service:
ath10k_snoc_stop_qmi_service(ar);
@@ -1389,6 +1737,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_snoc_release_resource(ar);
ath10k_snoc_free_pipes(ar);
ath10k_snoc_stop_qmi_service(ar);
+ ath10k_hw_power_off(ar);
ath10k_core_destroy(ar);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d6e05ba18cb8..a02cb2ad928e 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -17,6 +17,7 @@
#include "ce.h"
#include "pci.h"
#include "qmi.h"
+#include <linux/kernel.h>
#include <soc/qcom/service-locator.h>
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -112,6 +113,38 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
+struct ath10k_wcn3990_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ATH10K_WCN3990_VREG_INFO_SIZE ARRAY_SIZE(vreg_cfg)
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ATH10K_WCN3990_CLK_INFO_SIZE ARRAY_SIZE(clk_cfg)
+
/* struct ath10k_snoc: SNOC info struct
* @dev: device structure
* @ar:ath10k base structure
@@ -157,6 +190,8 @@ struct ath10k_snoc {
atomic_t fw_crashed;
atomic_t pm_ops_inprogress;
struct ath10k_snoc_qmi_config qmi_cfg;
+ struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE];
+ struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE];
};
struct ath10k_event_pd_down_data {
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
index 6891a38d7a59..d856e091a5de 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.c
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +38,9 @@
/* initial token to use on non-secure FTM measurement */
#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2
+/* maximum AOA burst period, limited by FW */
+#define WIL_AOA_MAX_BURST_PERIOD 255
+
#define WIL_TOF_FTM_MAX_LCI_LENGTH (240)
#define WIL_TOF_FTM_MAX_LCR_LENGTH (240)
@@ -62,6 +65,7 @@ nla_policy wil_nl80211_ftm_peer_policy[
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD] = { .type = NLA_U16 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ] = { .type = NLA_U32 },
};
@@ -311,8 +315,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
struct wmi_tof_session_start_cmd *cmd;
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.session_started) {
- wil_err(wil, "FTM session already running\n");
+ if (wil->ftm.session_started || wil->ftm.aoa_started) {
+ wil_err(wil, "FTM or AOA session already running\n");
rc = -EAGAIN;
goto out;
}
@@ -356,6 +360,7 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
}
cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID);
+ cmd->aoa_type = request->aoa_type;
cmd->num_of_dest = cpu_to_le16(request->n_peers);
for (i = 0; i < request->n_peers; i++) {
ether_addr_copy(cmd->ftm_dest_info[i].dst_mac,
@@ -398,6 +403,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
request->peers[i].params.burst_duration;
cmd->ftm_dest_info[i].burst_period =
cpu_to_le16(request->peers[i].params.burst_period);
+ cmd->ftm_dest_info[i].num_burst_per_aoa_meas =
+ request->peers[i].aoa_burst_period;
}
rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len);
@@ -482,8 +489,8 @@ wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.aoa_started) {
- wil_err(wil, "AOA measurement already running\n");
+ if (wil->ftm.aoa_started || wil->ftm.session_started) {
+ wil_err(wil, "AOA or FTM measurement already running\n");
rc = -EAGAIN;
goto out;
}
@@ -524,8 +531,8 @@ void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (!wil->ftm.aoa_started) {
- wil_info(wil, "AOA not started, not sending result\n");
+ if (!wil->ftm.aoa_started && !wil->ftm.session_started) {
+ wil_info(wil, "AOA/FTM not started, not sending result\n");
goto out;
}
@@ -749,6 +756,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1];
struct nlattr *peer;
int rc, n_peers = 0, index = 0, tmp;
+ u32 aoa_type = 0;
if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
return -ENOTSUPP;
@@ -770,6 +778,14 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
return -EINVAL;
}
+ if (tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) {
+ aoa_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]);
+ if (aoa_type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) {
+ wil_err(wil, "invalid AOA type: %d\n", aoa_type);
+ return -EINVAL;
+ }
+ }
+
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp)
n_peers++;
@@ -793,6 +809,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
request->session_cookie =
nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]);
+ request->aoa_type = aoa_type;
request->n_peers = n_peers;
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp) {
@@ -821,6 +838,18 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID])
request->peers[index].secure_token_id = nla_get_u8(
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]) {
+ request->peers[index].aoa_burst_period = nla_get_u16(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]);
+ if (request->peers[index].aoa_burst_period >
+ WIL_AOA_MAX_BURST_PERIOD) {
+ wil_err(wil, "Invalid AOA burst period at index: %d\n",
+ index);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
rc = wil_ftm_parse_meas_params(
wil,
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS],
diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h
index 8efa292d5ff4..21923c27ec06 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.h
+++ b/drivers/net/wireless/ath/wil6210/ftm.h
@@ -437,12 +437,14 @@ struct wil_ftm_meas_peer_info {
u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */
struct wil_ftm_meas_params params;
u8 secure_token_id;
+ u16 aoa_burst_period; /* 0 if no AOA, >0 every <value> bursts */
};
/* session request, passed to wil_ftm_cfg80211_start_session */
struct wil_ftm_session_request {
u64 session_cookie;
u32 n_peers;
+ u32 aoa_type; /* enum qca_wlan_vendor_attr_aoa_type */
/* keep last, variable size according to n_peers */
struct wil_ftm_meas_peer_info peers[0];
};
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 4275e3d26157..8b8ed72c2076 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -812,10 +812,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
@@ -842,10 +843,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
- else
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
rt_tbl_idx = ~0;
+ else
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index c01c43330d5f..12b43882ed5b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -411,12 +411,15 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
if (rule_req->filter_spec_list_valid == true) {
num_q6_rule = rule_req->filter_spec_list_len;
IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
} else {
num_q6_rule = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
}
@@ -610,9 +613,11 @@ failure:
num_q6_rule = 0;
memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&add_mux_channel_lock);
return 0;
}
@@ -1622,9 +1627,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* already got Q6 UL filter rules*/
if (ipa_qmi_ctx &&
ipa_qmi_ctx->modem_cfg_emb_pipe_flt
- == false)
+ == false) {
+ /* protect num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
rc = wwan_add_ul_flt_rule_to_ipa();
- else
+ mutex_unlock(&add_mux_channel_lock);
+ } else
rc = 0;
egress_set = true;
if (rc)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index c7ab616cb5b8..c686dc6a407c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -867,10 +867,11 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index c2fb87ab757b..a03d8978c6c2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1157,6 +1157,13 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
+ entry->cookie, rules->add_after_hdl);
+ result = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index ff57e3bd48f0..b7ba04519a33 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1152,6 +1152,13 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u rule %d in rt tbls\n",
+ entry->cookie, rules->add_after_hdl);
+ ret = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index e2ecfd715038..8b0a4223e4d3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -423,6 +423,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
if (rule_req->filter_spec_ex_list_valid == true) {
rmnet_ipa3_ctx->num_q6_rules =
rule_req->filter_spec_ex_list_len;
@@ -431,6 +433,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
} else {
rmnet_ipa3_ctx->num_q6_rules = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
}
@@ -633,9 +637,13 @@ failure:
rmnet_ipa3_ctx->num_q6_rules = 0;
memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return 0;
}
@@ -1437,8 +1445,13 @@ static int handle3_egress_format(struct net_device *dev,
if (rmnet_ipa3_ctx->num_q6_rules != 0) {
/* already got Q6 UL filter rules*/
- if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+ /* prevent multi-threads accessing num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
+ }
if (rc)
IPAWANERR("install UL rules failed\n");
else
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index ea01a38015f3..79a80b6e6c7c 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3727,6 +3727,7 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "usb") == 0)) {
/*
* We cannot vote for awake votable here as that takes
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 06ecc7ea6e8a..acc0d772d44d 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -356,11 +356,21 @@ static void status_change_work(struct work_struct *work)
int reschedule_us;
int reschedule_jeita_work_us = 0;
int reschedule_step_work_us = 0;
+ union power_supply_propval pval = {0, };
+
+ if (!is_batt_available(chip)) {
+ __pm_relax(chip->step_chg_ws);
+ return;
+ }
- if (!is_batt_available(chip))
+ /* skip jeita and step if not charging */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &pval);
+ if (pval.intval != POWER_SUPPLY_STATUS_CHARGING) {
+ __pm_relax(chip->step_chg_ws);
return;
+ }
- /* skip elapsed_us debounce for handling battery temperature */
rc = handle_jeita(chip);
if (rc > 0)
reschedule_jeita_work_us = rc;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index f3debd14c27b..ad9bf3a2232d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1667,6 +1667,8 @@ void ch_purge_intent_lists(struct channel_ctx *ctx)
&ctx->local_rx_intent_list, list) {
ctx->notify_rx_abort(ctx, ctx->user_priv,
ptr_intent->pkt_priv);
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, ptr_intent);
list_del(&ptr_intent->list);
kfree(ptr_intent);
}
@@ -3765,6 +3767,8 @@ static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
xprt_ctx->name,
xprt_ctx->edge);
+ kfree(xprt_ctx->ops);
+ xprt_ctx->ops = NULL;
kfree(xprt_ctx);
}
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index a49848808078..68199d9adb02 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -493,13 +493,10 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
ch->glink_state = event;
- /*
- * if spcom_notify_state() is called within glink_open()
- * then ch->glink_handle is not updated yet.
- */
- if (!ch->glink_handle) {
- pr_debug("update glink_handle, ch [%s].\n", ch->name);
- ch->glink_handle = handle;
+ if (!handle) {
+ pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+ mutex_unlock(&ch->lock);
+ return;
}
/* signal before unlock mutex & before calling glink */
@@ -512,8 +509,7 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
*/
pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
- ret = glink_queue_rx_intent(ch->glink_handle,
- ch, ch->rx_buf_size);
+ ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
if (ret) {
pr_err("glink_queue_rx_intent() err [%d]\n", ret);
} else {
@@ -1028,10 +1024,12 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
ch->name, ch->actual_rx_size);
goto exit_ready;
}
+ mutex_unlock(&ch->lock); /* unlock while waiting */
pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
wait_for_completion(&ch->rx_done);
+ mutex_lock(&ch->lock); /* re-lock after waiting */
/* Check Rx Abort on SP reset */
if (ch->rx_abort) {
pr_err("rx aborted.\n");
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 85c2b92f5474..ee88a8aaf850 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -634,6 +634,21 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size);
payload += ch_cfg_size;
+ /* check ch name is valid string or not */
+ for (j = 0; j < WDSP_CH_NAME_MAX_LEN; j++) {
+ if (ch[i]->ch_cfg.name[j] == '\0')
+ break;
+ }
+
+ if (j == WDSP_CH_NAME_MAX_LEN) {
+ dev_err_ratelimited(wpriv->dev, "%s: Wrong channel name\n",
+ __func__);
+ kfree(ch[i]);
+ ch[i] = NULL;
+ ret = -EINVAL;
+ goto err_ch_mem;
+ }
+
mutex_init(&ch[i]->mutex);
ch[i]->wpriv = wpriv;
INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
@@ -906,8 +921,6 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
- dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
- __func__, cpkt->ch_name, pkt_max_size);
for (i = 0; i < wpriv->no_of_channels; i++) {
if (wpriv->ch && wpriv->ch[i] &&
(!strcmp(cpkt->ch_name,
@@ -922,6 +935,8 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
+ dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
+ __func__, cpkt->ch_name, pkt_max_size);
ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
(tx_buf->ch->channel_state ==
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f9f47da8a88b..3c0386ee5875 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -125,6 +125,12 @@ enum usbpd_control_msg_type {
MSG_VCONN_SWAP,
MSG_WAIT,
MSG_SOFT_RESET,
+ MSG_NOT_SUPPORTED = 0x10,
+ MSG_GET_SOURCE_CAP_EXTENDED,
+ MSG_GET_STATUS,
+ MSG_FR_SWAP,
+ MSG_GET_PPS_STATUS,
+ MSG_GET_COUNTRY_CODES,
};
enum usbpd_data_msg_type {
@@ -132,9 +138,29 @@ enum usbpd_data_msg_type {
MSG_REQUEST,
MSG_BIST,
MSG_SINK_CAPABILITIES,
+ MSG_BATTERY_STATUS,
+ MSG_ALERT,
+ MSG_GET_COUNTRY_INFO,
MSG_VDM = 0xF,
};
+enum usbpd_ext_msg_type {
+ MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
+ MSG_STATUS,
+ MSG_GET_BATTERY_CAP,
+ MSG_GET_BATTERY_STATUS,
+ MSG_BATTERY_CAPABILITIES,
+ MSG_GET_MANUFACTURER_INFO,
+ MSG_MANUFACTURER_INFO,
+ MSG_SECURITY_REQUEST,
+ MSG_SECURITY_RESPONSE,
+ MSG_FIRMWARE_UPDATE_REQUEST,
+ MSG_FIRMWARE_UPDATE_RESPONSE,
+ MSG_PPS_STATUS,
+ MSG_COUNTRY_INFO,
+ MSG_COUNTRY_CODES,
+};
+
enum vdm_state {
VDM_NONE,
DISCOVERED_ID,
@@ -198,13 +224,30 @@ static void *usbpd_ipc_log;
#define PD_MAX_DATA_OBJ 7
+#define PD_SRC_CAP_EXT_DB_LEN 24
+#define PD_STATUS_DB_LEN 5
+#define PD_BATTERY_CAP_DB_LEN 9
+
+#define PD_MAX_EXT_MSG_LEN 260
+#define PD_MAX_EXT_MSG_LEGACY_LEN 26
+
#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
- (((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
+ (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
((pr) << 8) | ((id) << 9) | ((cnt) << 12))
-#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
-#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF)
-#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F)
+#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_EXTENDED BIT(15)
+#define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED)
+
+#define PD_MSG_EXT_HDR(chunked, num, req, size) \
+ (((chunked) << 15) | (((num) & 0xF) << 11) | \
+ ((req) << 10) | ((size) & 0x1FF))
+#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000)
+#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF)
+#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400)
+#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF)
#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
@@ -291,19 +334,24 @@ static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */
struct vdm_tx {
- u32 data[7];
+ u32 data[PD_MAX_DATA_OBJ];
int size;
};
struct rx_msg {
- u8 type;
- u8 len;
- u32 payload[7];
+ u16 hdr;
+ u16 data_len; /* size of payload in bytes */
struct list_head entry;
+ u8 payload[];
};
-#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t)))
-#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t)))
+#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
struct usbpd {
struct device dev;
@@ -318,8 +366,10 @@ struct usbpd {
bool hard_reset_recvd;
struct list_head rx_q;
spinlock_t rx_lock;
+ struct rx_msg *rx_ext_msg;
u32 received_pdos[PD_MAX_DATA_OBJ];
+ u32 received_ado;
u16 src_cap_id;
u8 selected_pdo;
u8 requested_pdo;
@@ -351,6 +401,8 @@ struct usbpd {
bool pd_phy_opened;
bool send_request;
struct completion is_ready;
+ struct completion tx_chunk_request;
+ u8 next_tx_chunk;
struct mutex swap_lock;
struct dual_role_phy_instance *dual_role;
@@ -377,6 +429,19 @@ struct usbpd {
struct list_head svid_handlers;
struct list_head instance;
+
+ /* ext msg support */
+ bool send_get_src_cap_ext;
+ u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
+ bool send_get_pps_status;
+ u32 pps_status_db;
+ u8 status_db[PD_STATUS_DB_LEN];
+ bool send_get_battery_cap;
+ u8 get_battery_cap_db;
+ u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN];
+ u8 get_battery_status_db;
+ bool send_get_battery_status;
+ u32 battery_sts_dobj;
};
static LIST_HEAD(_usbpd); /* useful for debugging */
@@ -498,6 +563,57 @@ static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
return 0;
}
+static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
+ const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+ int ret;
+ size_t len_remain, chunk_len;
+ u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
+ u16 hdr;
+ u16 ext_hdr;
+ u8 num_objs;
+
+ if (data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ data_len = PD_MAX_EXT_MSG_LEN;
+ }
+
+ pd->next_tx_chunk = 0;
+ len_remain = data_len;
+ do {
+ ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
+ memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
+
+ chunk_len = min_t(size_t, len_remain,
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+ memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
+
+ num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
+ len_remain -= chunk_len;
+
+ reinit_completion(&pd->tx_chunk_request);
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, num_objs, pd->spec_rev) |
+ PD_MSG_HDR_EXTENDED;
+ ret = pd_phy_write(hdr, chunked_payload,
+ num_objs * sizeof(u32), sop);
+ if (ret)
+ return ret;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+
+ /* Wait for request chunk */
+ if (len_remain &&
+ !wait_for_completion_timeout(&pd->tx_chunk_request,
+ msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
+ usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
+ return -EPROTO;
+ }
+ } while (len_remain);
+
+ return 0;
+}
+
static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
{
int curr;
@@ -629,6 +745,150 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
kick_sm(pd, 0);
}
+struct pd_request_chunk {
+ struct work_struct w;
+ struct usbpd *pd;
+ u8 msg_type;
+ u8 chunk_num;
+ enum pd_sop_type sop;
+};
+
+static void pd_request_chunk_work(struct work_struct *w)
+{
+ struct pd_request_chunk *req =
+ container_of(w, struct pd_request_chunk, w);
+ struct usbpd *pd = req->pd;
+ unsigned long flags;
+ int ret;
+ u8 payload[4] = {0}; /* ext_hdr + padding */
+ u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, 1, pd->spec_rev) | PD_MSG_HDR_EXTENDED;
+
+ *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
+
+ ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop);
+ if (!ret) {
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+ } else {
+ usbpd_err(&pd->dev, "could not send chunk request\n");
+
+ /* queue what we have anyway */
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ pd->rx_ext_msg = NULL;
+ }
+
+ kfree(req);
+}
+
+static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
+ size_t len, enum pd_sop_type sop)
+{
+ struct rx_msg *rx_msg;
+ u16 bytes_to_copy;
+ u16 ext_hdr = *(u16 *)buf;
+ u8 chunk_num;
+
+ if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
+ usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
+ return NULL;
+ }
+
+ /* request for next Tx chunk */
+ if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
+ if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
+ PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
+ pd->next_tx_chunk) {
+ usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
+ ext_hdr);
+ return NULL;
+ }
+
+ if (!completion_done(&pd->tx_chunk_request))
+ complete(&pd->tx_chunk_request);
+
+ return NULL;
+ }
+
+ chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
+ if (!chunk_num) {
+ /* allocate new message if first chunk */
+ rx_msg = kzalloc(sizeof(*rx_msg) +
+ PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
+ GFP_KERNEL);
+ if (!rx_msg)
+ return NULL;
+
+ rx_msg->hdr = header;
+ rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
+
+ if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
+ }
+ } else {
+ if (!pd->rx_ext_msg) {
+ usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
+ return NULL;
+ }
+
+ rx_msg = pd->rx_ext_msg;
+ }
+
+ /*
+ * The amount to copy is derived as follows:
+ *
+ * - if extended data_len < 26, then copy data_len bytes
+ * - for chunks 0..N-2, copy 26 bytes
+ * - for the last chunk (N-1), copy the remainder
+ */
+ bytes_to_copy =
+ min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+
+ /* check against received length to avoid overrun */
+ if (bytes_to_copy > len - sizeof(ext_hdr)) {
+ usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%zu\n",
+ bytes_to_copy, len - sizeof(ext_hdr));
+ bytes_to_copy = len - sizeof(ext_hdr);
+ }
+
+ memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
+ bytes_to_copy);
+
+ /* request next chunk? */
+ if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
+ PD_MAX_EXT_MSG_LEGACY_LEN) {
+ struct pd_request_chunk *req;
+
+ if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
+ usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
+ kfree(pd->rx_ext_msg);
+ }
+
+ pd->rx_ext_msg = rx_msg;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ goto queue_rx; /* return what we have anyway */
+
+ INIT_WORK(&req->w, pd_request_chunk_work);
+ req->pd = pd;
+ req->msg_type = PD_MSG_HDR_TYPE(header);
+ req->chunk_num = chunk_num + 1;
+ req->sop = sop;
+ queue_work(pd->wq, &req->w);
+
+ return NULL;
+ }
+
+queue_rx:
+ pd->rx_ext_msg = NULL;
+ return rx_msg; /* queue it for usbpd_sm */
+}
+
static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len)
{
@@ -676,21 +936,31 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
return;
}
- rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
- if (!rx_msg)
- return;
+ /* if spec rev differs (i.e. is older), update PHY */
+ if (PD_MSG_HDR_REV(header) < pd->spec_rev)
+ pd->spec_rev = PD_MSG_HDR_REV(header);
+
+ usbpd_dbg(&pd->dev, "received message: type(%d) num_objs(%d)\n",
+ PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header));
+
+ if (!PD_MSG_HDR_IS_EXTENDED(header)) {
+ rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_KERNEL);
+ if (!rx_msg)
+ return;
- rx_msg->type = PD_MSG_HDR_TYPE(header);
- rx_msg->len = PD_MSG_HDR_COUNT(header);
- memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
+ rx_msg->hdr = header;
+ rx_msg->data_len = len;
+ memcpy(rx_msg->payload, buf, len);
+ } else {
+ rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
+ if (!rx_msg)
+ return;
+ }
spin_lock_irqsave(&pd->rx_lock, flags);
list_add_tail(&rx_msg->entry, &pd->rx_q);
spin_unlock_irqrestore(&pd->rx_lock, flags);
- usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n",
- rx_msg->type, rx_msg->len);
-
kick_sm(pd, 0);
}
@@ -1140,11 +1410,13 @@ EXPORT_SYMBOL(usbpd_send_svdm);
static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
{
- u32 vdm_hdr = rx_msg->payload[0];
- u32 *vdos = &rx_msg->payload[1];
+ u32 vdm_hdr =
+ rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
+
+ u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
u16 svid = VDM_HDR_SVID(vdm_hdr);
u16 *psvid;
- u8 i, num_vdos = rx_msg->len - 1; /* num objects minus header */
+ u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
u8 cmd = SVDM_HDR_CMD(vdm_hdr);
u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
bool has_dp = false;
@@ -1757,7 +2029,7 @@ static void usbpd_sm(struct work_struct *w)
case PE_SRC_SEND_CAPABILITIES_WAIT:
if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (rx_msg) {
usbpd_err(&pd->dev, "Unexpected message received\n");
@@ -1780,7 +2052,7 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
}
} else if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
if (pd->vdm_state == MODE_ENTERED) {
@@ -1822,6 +2094,15 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_pr_swap) {
pd->send_pr_swap = false;
ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
@@ -2062,7 +2343,8 @@ static void usbpd_sm(struct work_struct *w)
usbpd_err(&pd->dev, "Error sending Sink Caps\n");
usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
}
- } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) &&
+ pd->spec_rev == USBPD_REV_20) {
ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
default_src_caps,
ARRAY_SIZE(default_src_caps), SOP_MSG);
@@ -2085,7 +2367,8 @@ static void usbpd_sm(struct work_struct *w)
}
dr_swap(pd);
- } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/* lock in current mode */
set_power_role(pd, pd->current_pr);
@@ -2103,7 +2386,8 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_PR_SWAP, &val);
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
- } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/*
* if VCONN is connected to VBUS, make sure we are
* not in high voltage contract, otherwise reject.
@@ -2131,6 +2415,120 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) {
+ pd->send_get_src_cap_ext = false;
+ ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_src_cap_ext\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) {
+ if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid src cap ext db\n");
+ break;
+ }
+ memcpy(&pd->src_cap_ext_db, rx_msg->payload,
+ sizeof(pd->src_cap_ext_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) {
+ pd->send_get_pps_status = false;
+ ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_pps_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_PPS_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
+ usbpd_err(&pd->dev, "Invalid pps status db\n");
+ break;
+ }
+ memcpy(&pd->pps_status_db, rx_msg->payload,
+ sizeof(pd->pps_status_db));
+ complete(&pd->is_ready);
+ } else if (IS_DATA(rx_msg, MSG_ALERT)) {
+ if (rx_msg->data_len != sizeof(pd->received_ado)) {
+ usbpd_err(&pd->dev, "Invalid ado\n");
+ break;
+ }
+ memcpy(&pd->received_ado, rx_msg->payload,
+ sizeof(pd->received_ado));
+ ret = pd_send_msg(pd, MSG_GET_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_STATUS)) {
+ if (rx_msg->data_len != PD_STATUS_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid status db\n");
+ break;
+ }
+ memcpy(&pd->status_db, rx_msg->payload,
+ sizeof(pd->status_db));
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_cap = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
+ &pd->get_battery_cap_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_cap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) {
+ if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid battery cap db\n");
+ break;
+ }
+ memcpy(&pd->battery_cap_db, rx_msg->payload,
+ sizeof(pd->battery_cap_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_status = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
+ &pd->get_battery_status_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
+ usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
+ break;
+ }
+ memcpy(&pd->battery_sts_dobj, rx_msg->payload,
+ sizeof(pd->battery_sts_dobj));
+ complete(&pd->is_ready);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_request) {
pd->send_request = false;
usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
@@ -2779,6 +3177,10 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
"explicit" : "implicit");
add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+ add_uevent_var(env, "ADO=%08x", pd->received_ado);
+ for (i = 0; i < PD_STATUS_DB_LEN; i++)
+ add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]);
+
return 0;
}
@@ -3126,6 +3528,145 @@ static ssize_t hard_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(hard_reset);
+static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
+{
+ int ret = 0;
+
+ /* Only allowed if we are already in explicit sink contract */
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "%s: Cannot send msg\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ reinit_completion(&pd->is_ready);
+ *msg_tx_flag = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(1000))) {
+ usbpd_err(&pd->dev, "%s: request timed out\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ *msg_tx_flag = false;
+ return ret;
+
+}
+
+static ssize_t get_src_cap_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->src_cap_ext_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RO(get_src_cap_ext);
+
+static ssize_t get_pps_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db);
+}
+static DEVICE_ATTR_RO(get_pps_status);
+
+static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ /* dump the ADO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado);
+}
+static DEVICE_ATTR_RO(rx_ado);
+
+static ssize_t get_battery_cap_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_cap_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_cap_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_cap_db == -EINVAL)
+ return -EINVAL;
+
+ for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->battery_cap_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RW(get_battery_cap);
+
+static ssize_t get_battery_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_status_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_status_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_status_db == -EINVAL)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj);
+}
+static DEVICE_ATTR_RW(get_battery_status);
+
static struct attribute *usbpd_attrs[] = {
&dev_attr_contract.attr,
&dev_attr_initial_pr.attr,
@@ -3145,6 +3686,11 @@ static struct attribute *usbpd_attrs[] = {
&dev_attr_rdo.attr,
&dev_attr_rdo_h.attr,
&dev_attr_hard_reset.attr,
+ &dev_attr_get_src_cap_ext.attr,
+ &dev_attr_get_pps_status.attr,
+ &dev_attr_rx_ado.attr,
+ &dev_attr_get_battery_cap.attr,
+ &dev_attr_get_battery_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(usbpd);
@@ -3375,6 +3921,7 @@ struct usbpd *usbpd_create(struct device *parent)
INIT_LIST_HEAD(&pd->rx_q);
INIT_LIST_HEAD(&pd->svid_handlers);
init_completion(&pd->is_ready);
+ init_completion(&pd->tx_chunk_request);
pd->psy_nb.notifier_call = psy_changed;
ret = power_supply_reg_notifier(&pd->psy_nb);
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 4c6a5e73406b..bc325a91a9bf 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -68,6 +68,7 @@ static int mdss_dp_process_phy_test_pattern_request(
struct mdss_dp_drv_pdata *dp);
static int mdss_dp_send_audio_notification(
struct mdss_dp_drv_pdata *dp, int val);
+static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp);
static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp)
{
@@ -1489,7 +1490,12 @@ static int mdss_dp_setup_main_link(struct mdss_dp_drv_pdata *dp, bool train)
pr_debug("enter\n");
mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
- mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON);
+ ret = mdss_dp_aux_send_psm_request(dp, false);
+ if (ret) {
+ pr_err("Failed to exit low power mode, rc=%d\n", ret);
+ goto end;
+ }
+
reinit_completion(&dp->video_comp);
if (mdss_dp_is_phy_test_pattern_requested(dp))
@@ -1576,15 +1582,6 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_drv->power_on = true;
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n",
- ret);
- goto exit_loop;
- }
- }
-
ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
exit_loop:
@@ -1653,15 +1650,6 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
mdss_dp_configure_source_params(dp_drv, ln_map);
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n", ret);
- goto exit;
- }
- }
-
-
link_training:
dp_drv->power_on = true;
@@ -2989,6 +2977,7 @@ static int mdss_dp_sysfs_create(struct mdss_dp_drv_pdata *dp,
static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
{
+ bool cable_connected;
struct mdss_dp_drv_pdata *dp_drv = NULL;
const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
@@ -3009,6 +2998,14 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
return;
}
+ /* power down the sink if cable is still connected */
+ mutex_lock(&dp_drv->attention_lock);
+ cable_connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+ if (cable_connected && dp_drv->alt_mode.dp_status.hpd_high) {
+ if (mdss_dp_aux_send_psm_request(dp_drv, true))
+ pr_err("Failed to enter low power mode\n");
+ }
reinit_completion(&dp_drv->idle_comp);
mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (!wait_for_completion_timeout(&dp_drv->idle_comp,
@@ -3129,6 +3126,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
pr_err("DP Controller not powered on\n");
break;
}
+ if (!atomic_read(&dp->notification_pending)) {
+ pr_debug("blank when cable is connected\n");
+ kthread_park(dp->ev_thread);
+ }
if (dp_is_hdcp_enabled(dp)) {
dp->hdcp_status = HDCP_STATE_INACTIVE;
@@ -3168,8 +3169,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
* when you connect DP sink while the
* device is in suspend state.
*/
- if ((!dp->power_on) && (dp->dp_initialized))
+ if ((!dp->power_on) && (dp->dp_initialized)) {
rc = mdss_dp_host_deinit(dp);
+ kthread_park(dp->ev_thread);
+ }
/*
* For DP suspend/resume use case, CHECK_PARAMS is
@@ -3181,8 +3184,11 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
dp->suspend_vic = dp->vic;
break;
case MDSS_EVENT_RESUME:
- if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN)
+ if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN) {
dp_init_panel_info(dp, dp->suspend_vic);
+ mdss_dp_reset_sw_state(dp);
+ kthread_unpark(dp->ev_thread);
+ }
break;
default:
pr_debug("unhandled event=%d\n", event);
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index afa8e3db590f..983f5e34a515 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -218,10 +218,6 @@ struct dp_alt_mode {
#define ST_SEND_VIDEO BIT(7)
#define ST_PUSH_IDLE BIT(8)
-/* sink power state */
-#define SINK_POWER_ON 1
-#define SINK_POWER_OFF 2
-
#define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
#define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
#define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */
@@ -1181,11 +1177,9 @@ void dp_aux_native_handler(struct mdss_dp_drv_pdata *dp, u32 isr);
void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep);
void mdss_dp_fill_link_cfg(struct mdss_dp_drv_pdata *ep);
-void mdss_dp_sink_power_down(struct mdss_dp_drv_pdata *ep);
void mdss_dp_lane_power_ctrl(struct mdss_dp_drv_pdata *ep, int up);
void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *ep);
char mdss_dp_gen_link_clk(struct mdss_dp_drv_pdata *dp);
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state);
int mdss_dp_aux_send_psm_request(struct mdss_dp_drv_pdata *dp, bool enable);
void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *ep);
void *mdss_dp_get_hdcp_data(struct device *dev);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index c0632e8241a0..86946adfeeb0 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -2556,15 +2556,6 @@ static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep)
return ret;
}
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state)
-{
- int ret;
-
- ret = dp_aux_write_buf(ep, 0x600, &state, 1, 0);
- pr_debug("state=%d ret=%d\n", state, ret);
- return ret;
-}
-
static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
{
int usleep_time;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index af95a4a6dccd..a5a407708334 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -113,6 +113,7 @@ static void hdmi_tx_fps_work(struct work_struct *work);
static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
enum hdmi_tx_power_module_type module, bool active);
static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params);
static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
@@ -1276,6 +1277,7 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
{
int ret = 0;
struct hdmi_tx_ctrl *ctrl = NULL;
+ u8 hdr_op;
ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
if (!ctrl) {
@@ -1296,36 +1298,43 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
goto end;
}
- memcpy(&ctrl->hdr_data, buf, sizeof(struct mdp_hdr_stream));
+ memcpy(&ctrl->hdr_ctrl, buf, sizeof(struct mdp_hdr_stream_ctrl));
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.eotf,
- ctrl->hdr_data.display_primaries_x[0],
- ctrl->hdr_data.display_primaries_y[0],
- ctrl->hdr_data.display_primaries_x[1],
- ctrl->hdr_data.display_primaries_y[1],
- ctrl->hdr_data.display_primaries_x[2],
- ctrl->hdr_data.display_primaries_y[2]);
+ ctrl->hdr_ctrl.hdr_stream.eotf,
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[2]);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.white_point_x,
- ctrl->hdr_data.white_point_y,
- ctrl->hdr_data.max_luminance,
- ctrl->hdr_data.min_luminance,
- ctrl->hdr_data.max_content_light_level,
- ctrl->hdr_data.max_average_light_level);
+ ctrl->hdr_ctrl.hdr_stream.white_point_x,
+ ctrl->hdr_ctrl.hdr_stream.white_point_y,
+ ctrl->hdr_ctrl.hdr_stream.max_luminance,
+ ctrl->hdr_ctrl.hdr_stream.min_luminance,
+ ctrl->hdr_ctrl.hdr_stream.max_content_light_level,
+ ctrl->hdr_ctrl.hdr_stream.max_average_light_level);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.pixel_encoding,
- ctrl->hdr_data.colorimetry,
- ctrl->hdr_data.range,
- ctrl->hdr_data.bits_per_component,
- ctrl->hdr_data.content_type);
+ ctrl->hdr_ctrl.hdr_stream.pixel_encoding,
+ ctrl->hdr_ctrl.hdr_stream.colorimetry,
+ ctrl->hdr_ctrl.hdr_stream.range,
+ ctrl->hdr_ctrl.hdr_stream.bits_per_component,
+ ctrl->hdr_ctrl.hdr_stream.content_type);
+ hdr_op = hdmi_hdr_get_ops(ctrl->curr_hdr_state,
+ ctrl->hdr_ctrl.hdr_state);
- hdmi_panel_set_hdr_infoframe(ctrl);
+ if (hdr_op == HDR_SEND_INFO)
+ hdmi_panel_set_hdr_infoframe(ctrl);
+ else if (hdr_op == HDR_CLEAR_INFO)
+ hdmi_panel_clear_hdr_infoframe(ctrl);
+
+ ctrl->curr_hdr_state = ctrl->hdr_ctrl.hdr_state;
ret = strnlen(buf, PAGE_SIZE);
end:
@@ -2113,6 +2122,8 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
goto err;
}
+ /* reset HDR state */
+ hdmi_ctrl->curr_hdr_state = HDR_DISABLE;
return 0;
err:
hdmi_tx_deinit_features(hdmi_ctrl, deinit_features);
@@ -2878,11 +2889,12 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
packet_header = type_code | (version << 8) | (length << 16);
DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header);
- packet_payload = (ctrl->hdr_data.eotf << 8);
+ packet_payload = (ctrl->hdr_ctrl.hdr_stream.eotf << 8);
if (hdmi_tx_metadata_type_one(ctrl)) {
- packet_payload |= (descriptor_id << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[0])
- << 24);
+ packet_payload |=
+ (descriptor_id << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[0]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload);
} else {
pr_debug("%s: Metadata Type 1 not supported\n", __func__);
@@ -2891,44 +2903,56 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
}
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[0]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[1]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_1, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[1]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[2]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_2, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[2]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_x) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_x) << 24);
DSS_REG_W(io, HDMI_GENERIC0_3, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.white_point_x))
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_y) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.white_point_y) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_luminance) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_x))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.max_luminance) << 24);
DSS_REG_W(io, HDMI_GENERIC0_4, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_luminance))
- | (HDMI_GET_LSB(ctrl->hdr_data.min_luminance) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.min_luminance) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_content_light_level) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.max_luminance))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level) << 24);
DSS_REG_W(io, HDMI_GENERIC0_5, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_content_light_level))
- | (HDMI_GET_LSB(ctrl->hdr_data.max_average_light_level) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.max_average_light_level) << 16);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 16);
DSS_REG_W(io, HDMI_GENERIC0_6, packet_payload);
enable_packet_control:
@@ -2943,6 +2967,32 @@ enable_packet_control:
DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
}
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
+{
+ u32 packet_control = 0;
+ struct dss_io_data *io = NULL;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!hdmi_tx_is_hdr_supported(ctrl)) {
+ pr_err("%s: Sink does not support HDR\n", __func__);
+ return;
+ }
+
+ io = &ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ pr_err("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+ DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params)
{
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index 3469b8a5819f..ad02003631f6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -21,6 +21,7 @@
#include "mdss_hdmi_audio.h"
#define MAX_SWITCH_NAME_SIZE 5
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7
enum hdmi_tx_io_type {
HDMI_TX_CORE_IO,
@@ -90,7 +91,7 @@ struct hdmi_tx_ctrl {
struct msm_ext_disp_audio_setup_params audio_params;
struct msm_ext_disp_init_data ext_audio_data;
struct work_struct fps_work;
- struct mdp_hdr_stream hdr_data;
+ struct mdp_hdr_stream_ctrl hdr_ctrl;
spinlock_t hpd_state_lock;
@@ -116,6 +117,7 @@ struct hdmi_tx_ctrl {
u8 hdcp_status;
u8 spd_vendor_name[9];
u8 spd_product_description[17];
+ u8 curr_hdr_state;
bool hdcp_feature_on;
bool hpd_disabled;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 827013d06412..5bc46d8c8f92 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/msm_mdp.h>
+#include <linux/msm_mdp_ext.h>
#include "mdss_hdmi_util.h"
#define RESOLUTION_NAME_STR_LEN 30
@@ -1811,3 +1812,51 @@ int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl)
return rc;
}
+
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state)
+{
+
+ /** There could be 3 valid state transitions:
+ * 1. HDR_DISABLE -> HDR_ENABLE
+ *
+ * In this transition, we shall start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 2. HDR_ENABLE -> HDR_RESET
+ *
+ * In this transition, we will keep sending
+ * HDR metadata but with EOTF and metadata as 0
+ *
+ * 3. HDR_RESET -> HDR_ENABLE
+ *
+ * In this transition, we will start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 4. HDR_RESET -> HDR_DISABLE
+ *
+ * In this transition, we will stop sending
+ * metadata to the sink and clear PKT_CTRL register
+ * bits.
+ */
+
+ if ((curr_state == HDR_DISABLE)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_DISABLE ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_RESET)) {
+ pr_debug("State changed HDR_ENABLE ---> HDR_RESET\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_DISABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_DISABLE\n");
+ return HDR_CLEAR_INFO;
+ }
+
+ pr_debug("Unsupported OR no state change\n");
+ return HDR_UNSUPPORTED_OP;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index 4fd659616bcc..fe554f8e9e67 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -425,6 +425,12 @@ enum hdmi_tx_hdcp2p2_rxstatus_intr_mask {
RXSTATUS_REAUTH_REQ = BIT(14),
};
+enum hdmi_hdr_op {
+ HDR_UNSUPPORTED_OP,
+ HDR_SEND_INFO,
+ HDR_CLEAR_INFO
+};
+
struct hdmi_tx_hdcp2p2_ddc_data {
enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
u32 timeout_ms;
@@ -518,5 +524,5 @@ void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
u32 timeout_ms);
-
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state);
#endif /* __HDMI_UTIL_H__ */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0065ffc9322b..08b3b8348fd7 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -170,6 +170,7 @@ extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aea4c0f2ef5f..65a188eeeeb6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -519,6 +519,7 @@ struct mmc_host {
unsigned int bus_resume_flags;
#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+ bool ignore_bus_resume_flags;
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 60d15a080d7c..9d3eda39bcd2 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
- * to save a few bytes.
+ * to save a few bytes. The caller must have disabled interrupts.
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index da9ee3bcc525..61b5f8eaa7f9 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -821,4 +821,26 @@ struct mdp_hdr_stream {
uint32_t content_type;
uint32_t reserved[5];
};
+
+/* hdr hdmi state takes possible values of 1, 2 and 4 respectively */
+#define HDR_ENABLE (1 << 0)
+#define HDR_DISABLE (1 << 1)
+#define HDR_RESET (1 << 2)
+
+/*
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values HDR_ENABLE, HDR_DISABLE
+ * and HDR_RESET.
+ * hdr_meta: Metadata sent by the userspace for the HDR clip.
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct mdp_hdr_stream_ctrl {
+ __u8 hdr_state; /* HDR state */
+ struct mdp_hdr_stream hdr_stream; /* HDR metadata */
+};
+
#endif
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 077bb52e2d47..3fdb7545852e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2799,6 +2799,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0;
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) {
struct cgroup *from_cgrp;
@@ -2813,6 +2814,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (retval)
break;
}
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return retval;
@@ -4072,6 +4074,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
/* all tasks in @from are being moved, all csets are source */
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
@@ -4100,6 +4104,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
} while (task && !ret);
out_err:
cgroup_migrate_finish(&preloaded_csets);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return ret;
}
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 1e6a51cc25c4..99b8d991126f 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -106,32 +106,6 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
-
- /*
- * We need to avoid reordering of link updation sequence of osq.
- * A case in which the status of optimistic spin queue is
- * CPU6->CPU2 in which CPU6 has acquired the lock. At this point
- * if CPU0 comes in to acquire osq_lock, it will update the tail
- * count. After tail count update if CPU2 starts to unqueue itself
- * from optimistic spin queue, it will find updated tail count with
- * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If
- * reordering of following stores happen then prev->next where prev
- * being CPU2 would be updated to point to CPU0 node:
- * node->prev = prev;
- * WRITE_ONCE(prev->next, node);
- *
- * At this point if next instruction
- * WRITE_ONCE(next->prev, prev);
- * in CPU2 path is committed before the update of CPU0 node->prev =
- * prev then CPU0 node->prev will point to CPU6 node. At this point
- * if CPU0 path's node->prev = prev is committed resulting in change
- * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if
- * CPU0 gets into unqueue path of osq_lock it will keep spinning
- * in infinite loop as condition prev->next == node will never be
- * true.
- */
- smp_mb();
-
WRITE_ONCE(prev->next, node);
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2cb46d51d715..1ba183e7987c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -248,24 +248,17 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
*/
void rcu_sched_qs(void)
{
- unsigned long flags;
-
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
- trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gpnum),
- TPS("cpuqs"));
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
- return;
- local_irq_save(flags);
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data),
- true);
- }
- local_irq_restore(flags);
- }
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
+ return;
+ trace_rcu_grace_period(TPS("rcu_sched"),
+ __this_cpu_read(rcu_sched_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+ return;
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+ rcu_report_exp_rdp(&rcu_sched_state,
+ this_cpu_ptr(&rcu_sched_data), true);
}
void rcu_bh_qs(void)
@@ -302,17 +295,16 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
* We inform the RCU core by emulating a zero-duration dyntick-idle
* period, which we in turn do by incrementing the ->dynticks counter
* by two.
+ *
+ * The caller must have disabled interrupts.
*/
static void rcu_momentary_dyntick_idle(void)
{
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_dynticks *rdtp;
int resched_mask;
struct rcu_state *rsp;
- local_irq_save(flags);
-
/*
* Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay.
@@ -342,13 +334,12 @@ static void rcu_momentary_dyntick_idle(void)
smp_mb__after_atomic(); /* Later stuff after QS. */
break;
}
- local_irq_restore(flags);
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
- * The caller must have disabled preemption.
+ * The caller must have disabled interrupts.
*/
void rcu_note_context_switch(void)
{
@@ -378,9 +369,14 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
*/
void rcu_all_qs(void)
{
+ unsigned long flags;
+
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+ local_irq_save(flags);
rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
this_cpu_inc(rcu_qs_ctr);
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 32cbe72bf545..c6fc11d626f8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -147,8 +147,8 @@ static void __init rcu_bootup_announce(void)
* the corresponding expedited grace period will also be the end of the
* normal grace period.
*/
-static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long flags) __releases(rnp->lock)
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
+ __releases(rnp->lock) /* But leaves rrupts disabled. */
{
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
@@ -236,7 +236,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
- raw_spin_unlock(&rnp->lock);
+ raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
/*
* Report the quiescent state for the expedited GP. This expedited
@@ -251,7 +251,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
} else {
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
}
- local_irq_restore(flags);
}
/*
@@ -286,12 +285,11 @@ static void rcu_preempt_qs(void)
* predating the current grace period drain, in other words, until
* rnp->gp_tasks becomes NULL.
*
- * Caller must disable preemption.
+ * Caller must disable interrupts.
*/
static void rcu_preempt_note_context_switch(void)
{
struct task_struct *t = current;
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -301,7 +299,7 @@ static void rcu_preempt_note_context_switch(void)
/* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
+ raw_spin_lock(&rnp->lock);
smp_mb__after_unlock_lock();
t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
@@ -318,7 +316,7 @@ static void rcu_preempt_note_context_switch(void)
(rnp->qsmask & rdp->grpmask)
? rnp->gpnum
: rnp->gpnum + 1);
- rcu_preempt_ctxt_queue(rnp, rdp, flags);
+ rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4ecca604e64b..2dbe599d34d5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,8 +621,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
- return;
+ raw_spin_lock_irqsave(&rq->lock, flags);
resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -3512,7 +3511,6 @@ static void __sched notrace __schedule(bool preempt)
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_note_context_switch();
prev = rq->curr;
/*
@@ -3531,13 +3529,16 @@ static void __sched notrace __schedule(bool preempt)
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ local_irq_disable();
+ rcu_note_context_switch();
+
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_lock(&rq->lock);
lockdep_pin_lock(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 8a2a489b2cd3..ede54061c554 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -237,7 +237,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
sock_gen_put(sk);
@@ -419,7 +419,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 07dd98607413..86005410a22f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -1014,7 +1014,7 @@ country MY: DFS-FCC
(5170 - 5250 @ 80), (24), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5650 @ 160), (24), DFS
- (5735 - 5815 @ 80), (24)
+ (5735 - 5835 @ 80), (24)
# 60 gHz band channels 1-3
(57240 - 63720 @ 2160), (40)
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 1613c5baa9c7..f995bf22c1c3 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -25,7 +25,8 @@
static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
/* Component related macros */
-#define WDSP_GET_COMPONENT(wdsp, x) (&(wdsp->cmpnts[x]))
+#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \
+ NULL : (&(wdsp->cmpnts[x])))
#define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x)
/*
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 10883b0939d6..2bc911e63e12 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -4073,6 +4073,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
set_bit(HPH_PA_DELAY, &tasha->status_mask);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if (!(strcmp(w->name, "ANC HPHR PA"))) {
@@ -4127,6 +4129,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4166,6 +4170,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tasha->status_mask))) {
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tasha->status_mask);
break;
case SND_SOC_DAPM_POST_PMU:
@@ -4222,6 +4228,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4544,6 +4552,10 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x10, 0x10);
+ }
if (tasha->anc_func) {
ret = tasha_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -4582,6 +4594,8 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x30, 0x00);
if ((hph_mode == CLS_H_LP) &&
(TASHA_IS_1_1(wcd9xxx))) {
snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
@@ -11094,12 +11108,12 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
0, 0, tasha_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD9335_ANA_HPH,
- 5, 0, tasha_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD9335_ANA_HPH,
- 4, 0, tasha_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -11114,11 +11128,11 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM,
0, 0, tasha_codec_lineout_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD9335_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD9335_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 26320fd01a5a..bfe471e73503 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -2014,6 +2014,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
0x02, 0x02);
}
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if ((!(strcmp(w->name, "ANC HPHR PA")))) {
@@ -2112,6 +2114,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
0x10, 0x10);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2161,6 +2165,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tavil->status_mask)))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0xC0, 0xC0);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2266,6 +2272,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2418,6 +2426,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x10, 0x10);
+ }
if (tavil->anc_func) {
ret = tavil_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -2458,6 +2470,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX2_RX_PATH_CFG0,
0x10, 0x10);
break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x30, 0x00);
+ break;
case SND_SOC_DAPM_POST_PMD:
/* 1000us required as per HW requirement */
usleep_range(1000, 1100);
@@ -7365,12 +7381,12 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
0, 0, tavil_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD934X_ANA_HPH,
- 5, 0, tavil_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD934X_ANA_HPH,
- 4, 0, tavil_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -7383,11 +7399,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_PGA_E("EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
tavil_codec_enable_ear_pa,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD934X_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD934X_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index c462f682e160..471be3294881 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -397,12 +397,9 @@ static int msm_compr_set_volume(struct snd_compr_stream *cstream,
} else {
gain_list[0] = volume_l;
gain_list[1] = volume_r;
- /* force sending FR/FL/FC volume for mono */
- if (prtd->num_channels == 1) {
- gain_list[2] = volume_l;
- num_channels = 3;
- use_default = true;
- }
+ gain_list[2] = volume_l;
+ num_channels = 3;
+ use_default = true;
rc = q6asm_set_multich_gain(prtd->audio_client, num_channels,
gain_list, chmap, use_default);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 35270e3340ec..ae6767d26921 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1167,7 +1167,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
u32 mode;
- if (copy_from_user(&mode, arg, sizeof(mode))) {
+ if (copy_from_user(&mode, (void __user *) arg, sizeof(mode))) {
dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
__func__, "LSM_SET_FWK_MODE_CONFIG");
return -EFAULT;