summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@quicinc.com>2017-06-07 20:57:53 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2017-06-07 20:57:53 -0700
commit9b5ee09c398abca451d2a5550f07726b99492eed (patch)
tree665b97bab9db21c201e0815aad266483f01d6553
parent04587e2ca3ba1211f8cebb393ecb871c9d97cf01 (diff)
parent90bf661847845b2e3e9c52cdbb257ff3f707ca1c (diff)
Merge "mhi: core: Add support for host triggered device ram dump"
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c117
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.h1
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c105
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c5
-rw-r--r--include/linux/msm_mhi.h2
5 files changed, 198 insertions, 32 deletions
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index e1c50e1273ac..4354b2600472 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -249,6 +249,13 @@ int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+ struct bhie_mem_info *bhie_mem_info;
+ u32 rx_sequence, val, current_seq;
+ u32 timeout = (bhi_ctxt->poll_timeout * 1000) / BHIE_RDDM_DELAY_TIME_US;
+ int i;
+ u32 cur_exec, prev_exec = 0;
+ u32 state, prev_state = 0;
+ u32 rx_status, prev_status = 0;
if (!rddm_table->bhie_mem_info) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "RDDM table == NULL\n");
@@ -258,9 +265,93 @@ int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
if (!in_panic)
return bhi_rddm_graceful(mhi_dev_ctxt);
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "RDDM collection in panic not yet supported\n");
- return -EINVAL;
+ /*
+ * Below code should only be executed during kernel panic,
+ * we expect other cores to be shutting down while we're
+ * executing rddm transfer. After returning from this function,
+ * we expect device to reset.
+ */
+
+ /* Trigger device into RDDM */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "pm_state:0x%x mhi_state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Register access not allowed\n");
+ return -EIO;
+ }
+
+ /*
+ * Normally we only set mhi_pm_state after grabbing pm_xfer_lock as a
+ * write, by function mhi_tryset_pm_state. Since we're in a kernel
+ * panic, we will set pm state w/o grabbing xfer lock. We're setting
+ * pm_state to LD as a safety precautions. If another core in middle
+ * of register access this should deter it. However, there is no
+ * no gurantee change will take effect.
+ */
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
+ /* change should take effect immediately */
+ smp_wmb();
+
+ bhie_mem_info = &rddm_table->
+ bhie_mem_info[rddm_table->segment_count - 1];
+ rx_sequence = rddm_table->sequence++;
+
+ /* program the vector table */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Programming RXVEC table\n");
+ val = HIGH_WORD(bhie_mem_info->phys_addr);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
+ BHIE_RXVECADDR_HIGH_OFFS, val);
+ val = LOW_WORD(bhie_mem_info->phys_addr);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECADDR_LOW_OFFS,
+ val);
+ val = (u32)bhie_mem_info->size;
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECSIZE_OFFS,
+ val);
+ mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHIE_RXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
+ rx_sequence);
+
+ /* trigger device into rddm */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Triggering Device into RDDM mode\n");
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_SYS_ERR);
+ i = 0;
+
+ while (timeout--) {
+ cur_exec = mhi_reg_read(bhi_ctxt->bhi_base, BHI_EXECENV);
+ state = mhi_get_m_state(mhi_dev_ctxt);
+ rx_status = mhi_reg_read(bhi_ctxt->bhi_base,
+ BHIE_RXVECSTATUS_OFFS);
+ /* if reg. values changed or each sec (udelay(1000)) log it */
+ if (cur_exec != prev_exec || state != prev_state ||
+ rx_status != prev_status || !(i & (SZ_1K - 1))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "EXECENV:0x%x MHISTATE:0x%x RXSTATUS:0x%x\n",
+ cur_exec, state, rx_status);
+ prev_exec = cur_exec;
+ prev_state = state;
+ prev_status = rx_status;
+ };
+ current_seq = (rx_status & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
+ BHIE_TXVECSTATUS_SEQNUM_SHFT;
+ rx_status = (rx_status & BHIE_TXVECSTATUS_STATUS_BMSK) >>
+ BHIE_TXVECSTATUS_STATUS_SHFT;
+
+ if ((rx_status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
+ (current_seq == rx_sequence)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "rddm transfer completed\n");
+ return 0;
+ }
+ udelay(BHIE_RDDM_DELAY_TIME_US);
+ i++;
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "rddm transfer timeout\n");
+
+ return -EIO;
}
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -439,7 +530,6 @@ void bhi_firmware_download(struct work_struct *work)
struct bhi_ctxt_t *bhi_ctxt;
struct bhie_mem_info mem_info;
int ret;
- long timeout;
mhi_dev_ctxt = container_of(work, struct mhi_device_ctxt,
bhi_ctxt.fw_load_work);
@@ -448,7 +538,14 @@ void bhi_firmware_download(struct work_struct *work)
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT);
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT ||
+ mhi_dev_ctxt->mhi_state != MHI_STATE_BHI) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "MHI is not in valid state for firmware download\n");
+ return;
+ }
/* PBL image is the first segment in firmware vector table */
mem_info = *bhi_ctxt->fw_table.bhie_mem_info;
@@ -462,10 +559,12 @@ void bhi_firmware_download(struct work_struct *work)
mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_RESET);
- timeout = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE,
- msecs_to_jiffies(bhi_ctxt->poll_timeout));
- if (!timeout) {
+ wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
+ msecs_to_jiffies(bhi_ctxt->poll_timeout));
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT ||
+ mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_BHIE) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Enter EXEC_ENV_BHIE\n");
return;
diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h
index 8f7b3d69347c..8f9bc52bbbe0 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.h
+++ b/drivers/platform/msm/mhi/mhi_bhi.h
@@ -87,6 +87,7 @@
#define BHI_POLL_SLEEP_TIME_MS 100
#define BHI_POLL_TIMEOUT_MS 2000
+#define BHIE_RDDM_DELAY_TIME_US (1000)
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
void bhi_firmware_download(struct work_struct *work);
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index caa34eadf8ea..ad9a6fd6b278 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -22,6 +22,22 @@
#include "mhi_hwio.h"
#include "mhi_bhi.h"
+static const char *const mhi_dev_ctrl_str[MHI_DEV_CTRL_MAXCMD] = {
+ [MHI_DEV_CTRL_INIT] = "INIT",
+ [MHI_DEV_CTRL_DE_INIT] = "DE-INIT",
+ [MHI_DEV_CTRL_SUSPEND] = "SUSPEND",
+ [MHI_DEV_CTRL_RESUME] = "RESUME",
+ [MHI_DEV_CTRL_POWER_OFF] = "OFF",
+ [MHI_DEV_CTRL_POWER_ON] = "ON",
+ [MHI_DEV_CTRL_TRIGGER_RDDM] = "TRIGGER RDDM",
+ [MHI_DEV_CTRL_RDDM] = "RDDM",
+ [MHI_DEV_CTRL_RDDM_KERNEL_PANIC] = "RDDM IN PANIC",
+ [MHI_DEV_CTRL_NOTIFY_LINK_ERROR] = "LD",
+};
+
+#define TO_MHI_DEV_CTRL_STR(cmd) ((cmd >= MHI_DEV_CTRL_MAXCMD) ? "INVALID" : \
+ mhi_dev_ctrl_str[cmd])
+
/* Write only sysfs attributes */
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
@@ -97,12 +113,14 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (!r) {
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Failed to get M0||M1 event, timeout, current state:%s\n",
+ "Failed to get M0||M1 event or LD pm_state:0x%x state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
}
@@ -122,9 +140,10 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- if (!r) {
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
+ msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
+ if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M3 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -158,12 +177,13 @@ static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (!r) {
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1 ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r || mhi_dev_ctxt->mhi_pm_state == MHI_PM_LD_ERR_FATAL_DETECT) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Failed to get M0 event, timeout\n");
+ "Failed to get M0 event, timeout or LD\n");
r = -EIO;
} else
r = 0;
@@ -295,13 +315,16 @@ static int mhi_pm_slave_mode_power_on(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- ret_val = wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
- msecs_to_jiffies(timeout));
- if (!ret_val || mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
+ wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
+ msecs_to_jiffies(timeout));
+ if (mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
ret_val = -EIO;
else
ret_val = 0;
+ /* wait for firmware download to complete */
+ flush_work(&mhi_dev_ctxt->bhi_ctxt.fw_load_work);
+
if (ret_val) {
read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
@@ -537,16 +560,16 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
}
}
-int mhi_pm_control_device(struct mhi_device *mhi_device,
- enum mhi_dev_ctrl ctrl)
+int mhi_pm_control_device(struct mhi_device *mhi_device, enum mhi_dev_ctrl ctrl)
{
struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
+ unsigned long flags;
if (!mhi_dev_ctxt)
return -EINVAL;
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Entered with cmd:%d\n", ctrl);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered with cmd:%s\n",
+ TO_MHI_DEV_CTRL_STR(ctrl));
switch (ctrl) {
case MHI_DEV_CTRL_INIT:
@@ -560,12 +583,46 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
case MHI_DEV_CTRL_POWER_OFF:
mhi_pm_slave_mode_power_off(mhi_dev_ctxt);
break;
+ case MHI_DEV_CTRL_TRIGGER_RDDM:
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "failed to trigger rddm, no register access in state:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state);
+ return -EIO;
+ }
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_SYS_ERR);
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ break;
case MHI_DEV_CTRL_RDDM:
return bhi_rddm(mhi_dev_ctxt, false);
+ case MHI_DEV_CTRL_RDDM_KERNEL_PANIC:
+ return bhi_rddm(mhi_dev_ctxt, true);
case MHI_DEV_CTRL_DE_INIT:
- if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE) {
+ enum MHI_PM_STATE cur_state;
+ /*
+ * If bus master calls DE_INIT before calling POWER_OFF
+ * means a critical failure occurred during POWER_ON
+ * state transition and external PCIe device may not
+ * respond to host. Force PM state to PCIe linkdown
+ * state prior to starting shutdown process to avoid
+ * accessing PCIe link.
+ */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
+ }
process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
mhi_dev_ctxt);
+ }
bhi_exit(mhi_dev_ctxt);
break;
case MHI_DEV_CTRL_NOTIFY_LINK_ERROR:
@@ -580,6 +637,12 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Failed to transition to state 0x%x from 0x%x\n",
MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
+
+ /* wake up all threads that's waiting for state change events */
+ complete(&mhi_dev_ctxt->cmd_complete);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
break;
}
default:
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index 2906393cbd5c..ea2a91bd2d06 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -147,7 +147,8 @@ void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
* M1 -> M3_ENTER --> M3
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> DISABLE -> SSR_PENDING (via SSR Notification only)
- * L3: LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
*/
static const struct mhi_pm_transitions const mhi_state_transitions[] = {
/* L0 States */
@@ -216,7 +217,7 @@ static const struct mhi_pm_transitions const mhi_state_transitions[] = {
/* L3 States */
{
MHI_PM_LD_ERR_FATAL_DETECT,
- MHI_PM_SHUTDOWN_PROCESS
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
},
/* From SSR notification only */
{
diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h
index 01fe2e78b9d5..1704cb93e6a3 100644
--- a/include/linux/msm_mhi.h
+++ b/include/linux/msm_mhi.h
@@ -160,9 +160,11 @@ enum mhi_dev_ctrl {
MHI_DEV_CTRL_RESUME,
MHI_DEV_CTRL_POWER_OFF,
MHI_DEV_CTRL_POWER_ON,
+ MHI_DEV_CTRL_TRIGGER_RDDM,
MHI_DEV_CTRL_RDDM,
MHI_DEV_CTRL_RDDM_KERNEL_PANIC,
MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
+ MHI_DEV_CTRL_MAXCMD,
};
enum mhi_rddm_segment {