summaryrefslogtreecommitdiff
path: root/drivers/platform
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2017-04-04 03:08:08 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2017-04-04 03:08:07 -0700
commita2440d7fa4422edc3aed637b31dcc755245d51bd (patch)
tree211e08354932dd1b723568928ef712e2da18a0b3 /drivers/platform
parent123020a779fd3c7744212e2dffa56853d780e2fb (diff)
parentf13bb7cfebc2357881fb3c81d43691639db62dd0 (diff)
Merge "mhi: core: add support for subsystem restart and shutdown"
Diffstat (limited to 'drivers/platform')
-rw-r--r--drivers/platform/msm/mhi/mhi.h81
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c176
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.h3
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c8
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c1
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c117
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c183
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c171
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c368
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c399
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c18
11 files changed, 1094 insertions, 431 deletions
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index 4bce96102525..60e02fcb5e4b 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -95,9 +95,12 @@ struct bhi_ctxt_t {
u32 poll_timeout;
/* BHI/E vector table */
bool manage_boot; /* fw download done by MHI host */
+ bool support_rddm;
struct work_struct fw_load_work;
struct firmware_info firmware_info;
struct bhie_vec_table fw_table;
+ struct bhie_vec_table rddm_table;
+ size_t rddm_size;
};
enum MHI_CHAN_DIR {
@@ -140,12 +143,6 @@ enum MHI_CHAIN {
MHI_TRE_CHAIN_reserved = 0x80000000
};
-enum MHI_EVENT_RING_STATE {
- MHI_EVENT_RING_UINIT = 0x0,
- MHI_EVENT_RING_INIT = 0x1,
- MHI_EVENT_RING_reserved = 0x80000000
-};
-
enum MHI_STATE {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
@@ -154,9 +151,8 @@ enum MHI_STATE {
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
- MHI_STATE_SYS_ERR = 0x8,
- MHI_STATE_LIMIT = 0x9,
- MHI_STATE_reserved = 0x80000000
+ MHI_STATE_SYS_ERR = 0xFF,
+ MHI_STATE_LIMIT,
};
enum MHI_BRSTMODE {
@@ -168,22 +164,36 @@ enum MHI_BRSTMODE {
};
enum MHI_PM_STATE {
- MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
- MHI_PM_POR = 0x1, /* Power On Reset State */
- MHI_PM_M0 = 0x2,
- MHI_PM_M1 = 0x4,
- MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
- MHI_PM_M2 = 0x10,
- MHI_PM_M3_ENTER = 0x20,
- MHI_PM_M3 = 0x40,
- MHI_PM_M3_EXIT = 0x80,
+ MHI_PM_DISABLE = BIT(0), /* MHI is not enabled */
+ MHI_PM_POR = BIT(1), /* Power On Reset State */
+ MHI_PM_M0 = BIT(2),
+ MHI_PM_M1 = BIT(3),
+ MHI_PM_M1_M2_TRANSITION = BIT(4), /* Register access not allowed */
+ MHI_PM_M2 = BIT(5),
+ MHI_PM_M3_ENTER = BIT(6),
+ MHI_PM_M3 = BIT(7),
+ MHI_PM_M3_EXIT = BIT(8),
+ MHI_PM_SYS_ERR_DETECT = BIT(9),
+ MHI_PM_SYS_ERR_PROCESS = BIT(10),
+ MHI_PM_SHUTDOWN_PROCESS = BIT(11),
+ MHI_PM_LD_ERR_FATAL_DETECT = BIT(12), /* Link not accessible */
+ MHI_PM_SSR_PENDING = BIT(13)
+};
+
+struct mhi_pm_transitions {
+ enum MHI_PM_STATE from_state;
+ u32 to_states;
};
#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
MHI_PM_M1 | MHI_PM_M2))
-#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
- (pm_state < MHI_PM_M3_EXIT))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
+ MHI_PM_M1 | MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
+ MHI_PM_SHUTDOWN_PROCESS)))
+#define MHI_EVENT_ACCESS_INVALID(pm_state) (pm_state == MHI_PM_DISABLE || \
+ pm_state >= MHI_PM_SYS_ERR_DETECT)
struct __packed mhi_event_ctxt {
u32 mhi_intmodt;
u32 mhi_event_er_type;
@@ -239,7 +249,6 @@ enum MHI_PKT_TYPE {
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
MHI_PKT_TYPE_STALE_EVENT, /* Internal event */
- MHI_PKT_TYPE_SYS_ERR_EVENT = 0xFF,
};
struct __packed mhi_tx_pkt {
@@ -393,7 +402,8 @@ enum STATE_TRANSITION {
STATE_TRANSITION_LINK_DOWN,
STATE_TRANSITION_WAKE,
STATE_TRANSITION_BHIE,
- STATE_TRANSITION_SYS_ERR,
+ STATE_TRANSITION_RDDM,
+ STATE_TRANSITION_SYS_ERR = MHI_STATE_SYS_ERR,
STATE_TRANSITION_MAX
};
@@ -402,7 +412,8 @@ enum MHI_EXEC_ENV {
MHI_EXEC_ENV_SBL = 0x1,
MHI_EXEC_ENV_AMSS = 0x2,
MHI_EXEC_ENV_BHIE = 0x3,
- MHI_EXEC_ENV_reserved = 0x80000000
+ MHI_EXEC_ENV_RDDM = 0x4,
+ MHI_EXEC_ENV_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
};
struct mhi_chan_info {
@@ -480,7 +491,7 @@ struct mhi_counters {
};
struct mhi_flags {
- u32 mhi_initialized;
+ bool mhi_initialized;
u32 link_up;
bool bb_required;
};
@@ -546,6 +557,7 @@ struct mhi_device_ctxt {
struct mhi_event_ring_cfg *ev_ring_props;
struct work_struct st_thread_worker;
struct work_struct process_m1_worker;
+ struct work_struct process_sys_err_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
@@ -587,7 +599,8 @@ struct mhi_device_ctxt {
void (*assert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_set);
void (*deassert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt);
-
+ void (*status_cb)(enum MHI_CB_REASON, void *priv);
+ void *priv_data; /* private data for bus master */
struct completion cmd_complete;
};
@@ -612,7 +625,6 @@ struct mhi_event_ring_cfg {
*/
u32 priority;
enum MHI_RING_CLASS class;
- enum MHI_EVENT_RING_STATE state;
irqreturn_t (*mhi_handler_ptr)(int , void *);
};
#define MHI_EV_PRIORITY_TASKLET (1)
@@ -673,13 +685,12 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt **cmd_pkt, u32 event_index);
int parse_cmd_event(struct mhi_device_ctxt *ctxt,
union mhi_event_pkt *event, u32 event_index);
-int mhi_test_for_device_ready(
- struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_test_for_device_reset(
- struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
void mhi_state_change_worker(struct work_struct *work);
+void mhi_sys_err_worker(struct work_struct *work);
int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt);
@@ -709,7 +720,7 @@ int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt, bool graceful);
int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
@@ -757,5 +768,13 @@ void mhi_ev_task(unsigned long data);
void process_event_ring(struct work_struct *work);
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(struct mhi_device_ctxt *,
+ enum MHI_PM_STATE);
+void mhi_reset_chan(struct mhi_device_ctxt *mhi_dev_ctxt, int chan);
+void free_tre_ring(struct mhi_device_ctxt *mhi_dev_ctxt, int chan);
+void process_disable_transition(enum MHI_PM_STATE transition_state,
+ struct mhi_device_ctxt *mhi_dev_ctxt);
+bool mhi_in_sys_err(struct mhi_device_ctxt *mhi_dev_ctxt);
+void bhi_exit(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 0cc8967757ec..3bc8205b5f0f 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -137,17 +137,36 @@ static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
-/* Load firmware via bhie protocol */
-static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
+/* transfer firmware or ramdump via bhie protocol */
+static int bhi_bhie_transfer(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct bhie_vec_table *vec_table,
+ bool tx_vec_table)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
- struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ /* last element is the vector table */
const struct bhie_mem_info *bhie_mem_info =
- &fw_table->bhie_mem_info[fw_table->segment_count - 1];
+ &vec_table->bhie_mem_info[vec_table->segment_count - 1];
u32 val;
- const u32 tx_sequence = fw_table->sequence++;
+ const u32 tx_sequence = vec_table->sequence++;
unsigned long timeout;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned bhie_vecaddr_high_offs, bhie_vecaddr_low_offs,
+ bhie_vecsize_offs, bhie_vecdb_offs,
+ bhie_vecstatus_offs;
+
+ if (tx_vec_table) {
+ bhie_vecaddr_high_offs = BHIE_TXVECADDR_HIGH_OFFS;
+ bhie_vecaddr_low_offs = BHIE_TXVECADDR_LOW_OFFS;
+ bhie_vecsize_offs = BHIE_TXVECSIZE_OFFS;
+ bhie_vecdb_offs = BHIE_TXVECDB_OFFS;
+ bhie_vecstatus_offs = BHIE_TXVECSTATUS_OFFS;
+ } else {
+ bhie_vecaddr_high_offs = BHIE_RXVECADDR_HIGH_OFFS;
+ bhie_vecaddr_low_offs = BHIE_RXVECADDR_LOW_OFFS;
+ bhie_vecsize_offs = BHIE_RXVECSIZE_OFFS;
+ bhie_vecdb_offs = BHIE_RXVECDB_OFFS;
+ bhie_vecstatus_offs = BHIE_RXVECSTATUS_OFFS;
+ }
/* Program TX/RX Vector table */
read_lock_bh(pm_xfer_lock);
@@ -157,27 +176,17 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
}
val = HIGH_WORD(bhie_mem_info->phys_addr);
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECADDR_HIGH_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
+ bhie_vecaddr_high_offs, val);
val = LOW_WORD(bhie_mem_info->phys_addr);
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECADDR_LOW_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base,
+ bhie_vecaddr_low_offs, val);
val = (u32)bhie_mem_info->size;
- mhi_reg_write(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECSIZE_OFFS,
- val);
+ mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, bhie_vecsize_offs, val);
/* Ring DB to begin Xfer */
- mhi_reg_write_field(mhi_dev_ctxt,
- bhi_ctxt->bhi_base,
- BHIE_TXVECDB_OFFS,
- BHIE_TXVECDB_SEQNUM_BMSK,
- BHIE_TXVECDB_SEQNUM_SHFT,
+ mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, bhie_vecdb_offs,
+ BHIE_TXVECDB_SEQNUM_BMSK, BHIE_TXVECDB_SEQNUM_SHFT,
tx_sequence);
read_unlock_bh(pm_xfer_lock);
@@ -190,10 +199,10 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
read_unlock_bh(pm_xfer_lock);
return -EIO;
}
- val = mhi_reg_read(bhi_ctxt->bhi_base, BHIE_TXVECSTATUS_OFFS);
+ val = mhi_reg_read(bhi_ctxt->bhi_base, bhie_vecstatus_offs);
read_unlock_bh(pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "TXVEC_STATUS:0x%x\n", val);
+ "%sVEC_STATUS:0x%x\n", tx_vec_table ? "TX" : "RX", val);
current_seq = (val & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
BHIE_TXVECSTATUS_SEQNUM_SHFT;
status = (val & BHIE_TXVECSTATUS_STATUS_BMSK) >>
@@ -201,17 +210,60 @@ static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
if ((status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
(current_seq == tx_sequence)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Image transfer complete\n");
+ "%s transfer complete\n",
+ tx_vec_table ? "image" : "rddm");
return 0;
}
msleep(BHI_POLL_SLEEP_TIME_MS);
}
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Error xfering image via BHIE\n");
+ "Error xfer %s via BHIE\n", tx_vec_table ? "image" : "rddm");
return -EIO;
}
+static int bhi_rddm_graceful(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int ret;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+ enum MHI_EXEC_ENV exec_env = mhi_dev_ctxt->dev_exec_env;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with pm_state:0x%x exec_env:0x%x mhi_state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state, exec_env,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ if (exec_env != MHI_EXEC_ENV_RDDM) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Not in RDDM exec env, exec_env:0x%x\n", exec_env);
+ return -EIO;
+ }
+
+ ret = bhi_bhie_transfer(mhi_dev_ctxt, rddm_table, false);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "rddm transfer status:%d\n", ret);
+ return ret;
+}
+
+/* collect ramdump from device using bhie protocol */
+int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+
+ if (!rddm_table->bhie_mem_info) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "RDDM table == NULL\n");
+ return -ENOMEM;
+ }
+
+ if (!in_panic)
+ return bhi_rddm_graceful(mhi_dev_ctxt);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "RDDM collection in panic not yet supported\n");
+ return -EINVAL;
+}
+
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
@@ -425,7 +477,8 @@ void bhi_firmware_download(struct work_struct *work)
return;
}
- ret = bhi_load_bhie_firmware(mhi_dev_ctxt);
+ ret = bhi_bhie_transfer(mhi_dev_ctxt, &mhi_dev_ctxt->bhi_ctxt.fw_table,
+ true);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Load amss firmware\n");
@@ -437,6 +490,7 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
const struct firmware *firmware;
struct scatterlist *itr;
int ret, i;
@@ -503,7 +557,75 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
fw_table->sequence++;
release_firmware(firmware);
+ /* allocate memory and setup rddm table */
+ if (bhi_ctxt->support_rddm) {
+ ret = bhi_alloc_bhie_xfer(mhi_dev_ctxt, bhi_ctxt->rddm_size,
+ rddm_table);
+ if (!ret) {
+ for (i = 0, itr = &rddm_table->sg_list[1];
+ i < rddm_table->segment_count - 1; i++, itr++) {
+ size_t size = rddm_table->bhie_mem_info[i].size;
+
+ rddm_table->bhi_vec_entry[i].phys_addr =
+ rddm_table->bhie_mem_info[i].phys_addr;
+ rddm_table->bhi_vec_entry[i].size = size;
+ sg_set_buf(itr, rddm_table->
+ bhie_mem_info[i].aligned, size);
+ sg_dma_address(itr) =
+ rddm_table->bhie_mem_info[i].phys_addr;
+ sg_dma_len(itr) = size;
+ }
+ rddm_table->sequence++;
+ } else {
+ /* out of memory for rddm, not fatal error */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Could not successfully allocate mem for rddm\n");
+ }
+ }
+
/* Schedule a worker thread and wait for BHI Event */
schedule_work(&bhi_ctxt->fw_load_work);
return 0;
}
+
+void bhi_exit(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ struct bhie_vec_table *rddm_table = &bhi_ctxt->rddm_table;
+ struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
+ struct bhie_mem_info *bhie_mem_info;
+ int i;
+
+ if (bhi_ctxt->manage_boot == false)
+ return;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "freeing firmware and rddm memory\n");
+
+ /* free memory allocated for firmware */
+ kfree(fw_table->sg_list);
+ fw_table->sg_list = NULL;
+ bhie_mem_info = fw_table->bhie_mem_info;
+ for (i = 0; i < fw_table->segment_count; i++, bhie_mem_info++)
+ dma_free_coherent(dev, bhie_mem_info->alloc_size,
+ bhie_mem_info->pre_aligned,
+ bhie_mem_info->dma_handle);
+ fw_table->bhie_mem_info = NULL;
+ /* vector table is the last entry in bhie_mem_info */
+ fw_table->bhi_vec_entry = NULL;
+
+ if (!rddm_table->bhie_mem_info)
+ return;
+
+ /* free memory allocated for rddm */
+ kfree(rddm_table->sg_list);
+ rddm_table->sg_list = NULL;
+ bhie_mem_info = rddm_table->bhie_mem_info;
+ for (i = 0; i < rddm_table->segment_count; i++, bhie_mem_info++)
+ dma_free_coherent(dev, bhie_mem_info->alloc_size,
+ bhie_mem_info->pre_aligned,
+ bhie_mem_info->dma_handle);
+ rddm_table->bhie_mem_info = NULL;
+ rddm_table->bhi_vec_entry = NULL;
+}
diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h
index 15137ba5dfdf..8f7b3d69347c 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.h
+++ b/drivers/platform/msm/mhi/mhi_bhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -90,5 +90,6 @@
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
void bhi_firmware_download(struct work_struct *work);
+int bhi_rddm(struct mhi_device_ctxt *mhi_dev_ctxt, bool in_panic);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index ae677bae63dc..ea324339eac7 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -226,8 +226,7 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
if (GET_EV_PROPS(EV_TYPE,
- mhi_dev_ctxt->ev_ring_props[i].flags) == type &&
- !mhi_dev_ctxt->ev_ring_props[i].state) {
+ mhi_dev_ctxt->ev_ring_props[i].flags) == type) {
ret_val = mhi_init_local_event_ring(mhi_dev_ctxt,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
i);
@@ -292,7 +291,6 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
break;
}
}
- mhi_dev_ctxt->ev_ring_props[ring_index].state = MHI_EVENT_RING_INIT;
spin_unlock_irqrestore(lock, flags);
return ret_val;
}
@@ -309,6 +307,7 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[index];
+ spin_lock_irq(&local_ev_ctxt->ring_lock);
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
local_ev_ctxt->rp = local_ev_ctxt->base;
@@ -317,6 +316,5 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr;
ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr;
- /* Flush writes to MMIO */
- wmb();
+ spin_unlock_irq(&local_ev_ctxt->ring_lock);
}
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index f1c562974816..64a09a2f9fbb 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -189,6 +189,7 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
+ INIT_WORK(&mhi_dev_ctxt->process_sys_err_worker, mhi_sys_err_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index 9aa9aeb7e646..70e4393f2f59 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -23,16 +23,18 @@ static int mhi_process_event_ring(
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
union mhi_event_pkt event_to_process;
- int ret_val = 0;
+ int count = 0;
struct mhi_event_ctxt *ev_ctxt = NULL;
unsigned long flags;
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
- mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "enter ev_index:%u\n", ev_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter ev_index:%u\n", ev_index);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_dev_ctxt->mhi_pm_state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "No event access, PM_STATE:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EIO;
}
@@ -98,6 +100,7 @@ static int mhi_process_event_ring(
{
u32 chan;
struct mhi_ring *ring;
+ unsigned long flags;
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
@@ -107,12 +110,12 @@ static int mhi_process_event_ring(
break;
}
ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- spin_lock_bh(&ring->ring_lock);
+ spin_lock_irqsave(&ring->ring_lock, flags);
if (ring->ch_state == MHI_CHAN_STATE_ENABLED)
parse_xfer_event(mhi_dev_ctxt,
&event_to_process,
ev_index);
- spin_unlock_bh(&ring->ring_lock);
+ spin_unlock_irqrestore(&ring->ring_lock, flags);
__pm_relax(&mhi_dev_ctxt->w_lock);
event_quota--;
break;
@@ -136,18 +139,41 @@ static int mhi_process_event_ring(
mhi_dev_ctxt->mhi_state =
mhi_get_m_state(mhi_dev_ctxt);
if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
- mhi_dev_ctxt->counters.m0_m1++;
- schedule_work(&mhi_dev_ctxt->
- process_m1_worker);
+ enum MHI_PM_STATE state;
+
+ state = mhi_tryset_pm_state
+ (mhi_dev_ctxt, MHI_PM_M1);
+ if (state == MHI_PM_M1) {
+ mhi_dev_ctxt->counters.m0_m1++;
+ schedule_work
+ (&mhi_dev_ctxt->
+ process_m1_worker);
+ }
}
write_unlock_irqrestore(&mhi_dev_ctxt->
- pm_xfer_lock,
- flags);
+ pm_xfer_lock, flags);
break;
case STATE_TRANSITION_M3:
process_m3_transition(mhi_dev_ctxt);
break;
+ case STATE_TRANSITION_SYS_ERR:
+ {
+ enum MHI_PM_STATE new_state;
+ unsigned long flags;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected\n");
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ new_state = mhi_tryset_pm_state
+ (mhi_dev_ctxt, MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irqrestore
+ (&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_dev_ctxt->
+ process_sys_err_worker);
+ break;
+ }
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported STE received ring 0x%x State:%s\n",
@@ -158,28 +184,36 @@ static int mhi_process_event_ring(
}
case MHI_PKT_TYPE_EE_EVENT:
{
- enum STATE_TRANSITION new_state;
+ enum STATE_TRANSITION new_state = 0;
+ enum MHI_EXEC_ENV event =
+ MHI_READ_EXEC_ENV(&event_to_process);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI EEE received ring 0x%x\n", ev_index);
+ "MHI EE received ring 0x%x event:0x%x\n",
+ ev_index, event);
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- switch (MHI_READ_EXEC_ENV(&event_to_process)) {
+ switch (event) {
case MHI_EXEC_ENV_SBL:
new_state = STATE_TRANSITION_SBL;
- mhi_init_state_transition(mhi_dev_ctxt,
- new_state);
break;
case MHI_EXEC_ENV_AMSS:
new_state = STATE_TRANSITION_AMSS;
- mhi_init_state_transition(mhi_dev_ctxt,
- new_state);
break;
case MHI_EXEC_ENV_BHIE:
new_state = STATE_TRANSITION_BHIE;
+ break;
+ case MHI_EXEC_ENV_RDDM:
+ new_state = STATE_TRANSITION_RDDM;
+ break;
+ default:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Invalid EE Event 0x%x received\n",
+ event);
+ }
+ if (new_state)
mhi_init_state_transition(mhi_dev_ctxt,
new_state);
- }
break;
}
case MHI_PKT_TYPE_STALE_EVENT:
@@ -187,11 +221,6 @@ static int mhi_process_event_ring(
"Stale Event received for chan:%u\n",
MHI_EV_READ_CHID(EV_CHID, local_rp));
break;
- case MHI_PKT_TYPE_SYS_ERR_EVENT:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI System Error Detected. Triggering Reset\n");
- BUG();
- break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
@@ -207,13 +236,13 @@ static int mhi_process_event_ring(
ev_index,
ev_ctxt->mhi_event_read_ptr);
spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
- ret_val = 0;
+ count++;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "exit ev_index:%u\n", ev_index);
- return ret_val;
+ return count;
}
void mhi_ev_task(unsigned long data)
@@ -222,10 +251,40 @@ void mhi_ev_task(unsigned long data)
struct mhi_device_ctxt *mhi_dev_ctxt =
mhi_ring->mhi_dev_ctxt;
int ev_index = mhi_ring->index;
+ const int CTRL_EV = 0; /* event ring for ctrl events */
+ int ret;
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
+
/* Process event ring */
- mhi_process_event_ring(mhi_dev_ctxt, ev_index, U32_MAX);
+ ret = mhi_process_event_ring(mhi_dev_ctxt, ev_index, U32_MAX);
+ /*
+ * If we received MSI for primary event ring with no events to process
+ * check status register to see if device enter SYSERR status
+ */
+ if (ev_index == CTRL_EV && !ret) {
+ bool in_sys_err = false;
+ unsigned long flags;
+ enum MHI_PM_STATE new_state;
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ in_sys_err = mhi_in_sys_err(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ if (in_sys_err) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected\n");
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ new_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_SYS_ERR_DETECT);
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ if (new_state == MHI_PM_SYS_ERR_DETECT)
+ schedule_work(&mhi_dev_ctxt->
+ process_sys_err_worker);
+ }
+ }
enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
@@ -258,7 +317,7 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
ret_val = mhi_process_event_ring(client_config->mhi_dev_ctxt,
client_config->event_ring_index,
1);
- if (ret_val)
+ if (ret_val < 0)
mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
"NAPI failed to process event ring\n");
return &(client_config->result);
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index 644004672cd2..46baf7332900 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -30,11 +30,6 @@
#include "mhi_bhi.h"
#include "mhi_trace.h"
-static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt);
-static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
- struct mhi_ring *bb_ctxt);
-
static int enable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *bb_ctxt,
int nr_el,
@@ -306,6 +301,47 @@ static int populate_tre_ring(struct mhi_client_config *client_config)
return 0;
}
+void mhi_notify_client(struct mhi_client_handle *client_handle,
+ enum MHI_CB_REASON reason)
+{
+ struct mhi_cb_info cb_info = {0};
+ struct mhi_result result = {0};
+ struct mhi_client_config *client_config;
+
+ cb_info.result = NULL;
+ cb_info.cb_reason = reason;
+
+ if (client_handle == NULL)
+ return;
+
+ client_config = client_handle->client_config;
+
+ if (client_config->client_info.mhi_client_cb) {
+ result.user_data = client_config->user_data;
+ cb_info.chan = client_config->chan_info.chan_nr;
+ cb_info.result = &result;
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "Calling back for chan %d, reason %d\n",
+ cb_info.chan,
+ reason);
+ client_config->client_info.mhi_client_cb(&cb_info);
+ }
+}
+
+void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_CB_REASON reason)
+{
+ int i;
+ struct mhi_client_handle *client_handle = NULL;
+
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+ if (VALID_CHAN_NR(i)) {
+ client_handle = mhi_dev_ctxt->client_handle_list[i];
+ mhi_notify_client(client_handle, reason);
+ }
+ }
+}
+
int mhi_open_channel(struct mhi_client_handle *client_handle)
{
int ret_val = 0;
@@ -389,10 +425,10 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ret_val = 0;
}
- spin_lock(&cfg->event_lock);
+ spin_lock_irq(&cfg->event_lock);
cmd_event_pkt = cfg->cmd_event_pkt;
cmd_pkt = cfg->cmd_pkt;
- spin_unlock(&cfg->event_lock);
+ spin_unlock_irq(&cfg->event_lock);
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
@@ -628,10 +664,7 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
}
error_completion:
- ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
- if (ret_val)
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Error resetting cmd ret:%d\n", ret_val);
+ mhi_reset_chan(mhi_dev_ctxt, chan);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
@@ -1391,11 +1424,8 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
}
-static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt)
+void mhi_reset_chan(struct mhi_device_ctxt *mhi_dev_ctxt, int chan)
{
- u32 chan = 0;
- int ret_val = 0;
struct mhi_ring *local_chan_ctxt;
struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
@@ -1405,14 +1435,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-
- if (!VALID_CHAN_NR(chan)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Bad channel number for CCE\n");
- return -EINVAL;
- }
-
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
@@ -1420,7 +1442,7 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
ev_ctxt = &mhi_dev_ctxt->
dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Processed cmd reset event\n");
+ "Marking all events for chan:%d as stale\n", chan);
/* Clear all stale events related to Channel */
spin_lock_irqsave(&ev_ring->ring_lock, flags);
@@ -1483,7 +1505,6 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
- return ret_val;
}
enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1510,11 +1531,11 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_tx_pkt *pending_trb = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct mhi_ring *local_chan_ctxt = NULL;
- struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
struct mhi_client_config *client_config;
- int chan = 0, r = 0;
+ int chan = 0, r = -EIO;
+ unsigned long flags;
if (!client_handle || !result)
return -EINVAL;
@@ -1525,36 +1546,38 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
chan = client_config->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- mutex_lock(&cfg->chan_lock);
- if (bb_ctxt->rp != bb_ctxt->ack_rp) {
- pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
- result->flags = pending_trb->info;
- bb = bb_ctxt->ack_rp;
- if (bb->bb_active) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
- "Bounce buffer active chan %d, copying data\n",
- chan);
+ spin_lock_irqsave(&local_chan_ctxt->ring_lock, flags);
+ if (local_chan_ctxt->ch_state == MHI_CHAN_STATE_ENABLED) {
+ if (bb_ctxt->rp != bb_ctxt->ack_rp) {
+ pending_trb =
+ (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
+ result->flags = pending_trb->info;
+ bb = bb_ctxt->ack_rp;
+ if (bb->bb_active) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Bounce buffer active chan %d, copying data\n",
+ chan);
+ }
+ result->buf_addr = bb->client_buf;
+ result->bytes_xferd = bb->filled_size;
+ result->transaction_status = 0;
+ r = delete_element(local_chan_ctxt,
+ &local_chan_ctxt->ack_rp,
+ &local_chan_ctxt->rp, NULL);
+ WARN_ON(r);
+ r = delete_element(bb_ctxt,
+ &bb_ctxt->ack_rp,
+ &bb_ctxt->rp, NULL);
+ WARN_ON(r);
+ } else {
+ result->buf_addr = 0;
+ result->bytes_xferd = 0;
+ r = -ENODATA;
}
- result->buf_addr = bb->client_buf;
- result->bytes_xferd = bb->filled_size;
- result->transaction_status = 0;
- r = delete_element(local_chan_ctxt,
- &local_chan_ctxt->ack_rp,
- &local_chan_ctxt->rp, NULL);
- BUG_ON(r);
- r = delete_element(bb_ctxt,
- &bb_ctxt->ack_rp,
- &bb_ctxt->rp, NULL);
- BUG_ON(r);
- } else {
- result->buf_addr = 0;
- result->bytes_xferd = 0;
- r = -ENODATA;
}
- mutex_unlock(&cfg->chan_lock);
+ spin_unlock_irqrestore(&local_chan_ctxt->ring_lock, flags);
mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
@@ -1647,9 +1670,10 @@ void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
if (unlikely(force_set)) {
spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
atomic_inc(&mhi_dev_ctxt->counters.device_wake);
- mhi_write_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 1);
+ if (MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB, 1);
spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
} else {
if (likely(atomic_add_unless(&mhi_dev_ctxt->
@@ -1744,7 +1768,7 @@ EXPORT_SYMBOL(mhi_deregister_channel);
int mhi_register_device(struct mhi_device *mhi_device,
const char *node_name,
- unsigned long user_data)
+ void *user_data)
{
const struct device_node *of_node;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
@@ -1793,6 +1817,7 @@ int mhi_register_device(struct mhi_device *mhi_device,
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
+ INIT_WORK(&mhi_dev_ctxt->process_sys_err_worker, mhi_sys_err_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
@@ -1828,11 +1853,15 @@ int mhi_register_device(struct mhi_device *mhi_device,
if (!core_info->bar0_base || !core_info->irq_base)
return -EINVAL;
+ if (mhi_device->support_rddm && !mhi_device->rddm_size)
+ return -EINVAL;
mhi_dev_ctxt->bus_master_rt_get = mhi_device->pm_runtime_get;
- mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_noidle;
- if (!mhi_dev_ctxt->bus_master_rt_get ||
- !mhi_dev_ctxt->bus_master_rt_put)
+ mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_put_noidle;
+ mhi_dev_ctxt->status_cb = mhi_device->status_cb;
+ mhi_dev_ctxt->priv_data = user_data;
+ if (!mhi_dev_ctxt->bus_master_rt_get || !mhi_dev_ctxt->bus_master_rt_put
+ || !mhi_dev_ctxt->status_cb)
return -EINVAL;
ret = mhi_ctxt_init(mhi_dev_ctxt);
@@ -1849,12 +1878,44 @@ int mhi_register_device(struct mhi_device *mhi_device,
mhi_dev_ctxt->runtime_get = mhi_slave_mode_runtime_get;
mhi_dev_ctxt->runtime_put = mhi_slave_mode_runtime_put;
mhi_device->mhi_dev_ctxt = mhi_dev_ctxt;
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
+ /* Store RDDM information */
+ if (mhi_device->support_rddm) {
+ mhi_dev_ctxt->bhi_ctxt.support_rddm = true;
+ mhi_dev_ctxt->bhi_ctxt.rddm_size = mhi_device->rddm_size;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device support rddm of size:0x%lx bytes\n",
+ mhi_dev_ctxt->bhi_ctxt.rddm_size);
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
return 0;
}
EXPORT_SYMBOL(mhi_register_device);
+int mhi_xfer_rddm(struct mhi_device *mhi_device, enum mhi_rddm_segment seg,
+ struct scatterlist **sg_list)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ int segments = 0;
+
+ *sg_list = NULL;
+ switch (seg) {
+ case MHI_RDDM_FW_SEGMENT:
+ *sg_list = bhi_ctxt->fw_table.sg_list;
+ segments = bhi_ctxt->fw_table.segment_count;
+ break;
+ case MHI_RDDM_RD_SEGMENT:
+ *sg_list = bhi_ctxt->rddm_table.sg_list;
+ segments = bhi_ctxt->rddm_table.segment_count;
+ break;
+ }
+ return segments;
+}
+EXPORT_SYMBOL(mhi_xfer_rddm);
+
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan,
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index d7a4f7aa93ef..caa34eadf8ea 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,6 +62,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_m3)
{
int r = 0;
+ enum MHI_PM_STATE new_state;
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
@@ -79,13 +80,20 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
}
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake) &&
- force_m3 == false)){
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Busy, Aborting M3\n");
+ force_m3 == false)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Busy, Aborting M3\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EBUSY;
}
+ if (unlikely(!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error, no register access, PM_STATE:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
+
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
@@ -93,7 +101,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (!r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M0||M1 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -102,7 +110,14 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Allowing M3 State\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+ new_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3_ENTER);
+ if (unlikely(new_state != MHI_PM_M3_ENTER)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error setting PM_STATE from 0x%x to 0x%x\n",
+ new_state, MHI_PM_M3_ENTER);
+ return -EIO;
+ }
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
@@ -110,7 +125,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
if (!r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get M3 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
return -EIO;
@@ -122,6 +137,7 @@ static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State:0x%x %s\n",
@@ -129,11 +145,16 @@ static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3_EXIT);
+ if (unlikely(cur_state != MHI_PM_M3_EXIT)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error setting PM_STATE from 0x%x to 0x%x\n",
+ cur_state, MHI_PM_M3_EXIT);
+ return -EAGAIN;
+ }
/* Set and wait for M0 Event */
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
@@ -164,7 +185,7 @@ int mhi_runtime_suspend(struct device *dev)
mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
- r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+ r = mhi_turn_off_pcie_link(mhi_dev_ctxt, true);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Turn off link ret:%d\n", r);
@@ -294,6 +315,21 @@ unlock_pm_lock:
return ret_val;
}
+static void mhi_pm_slave_mode_power_off(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with pm_state:0x%x MHI_STATE:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI already in disabled state\n");
+ return;
+ }
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS, mhi_dev_ctxt);
+}
+
static int mhi_pm_slave_mode_suspend(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r;
@@ -367,7 +403,7 @@ ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
return count;
}
-int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
+int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt, bool graceful)
{
struct pci_dev *pcie_dev;
int r = 0;
@@ -376,22 +412,23 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dev = mhi_dev_ctxt->pcie_device;
if (0 == mhi_dev_ctxt->flags.link_up) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Link already marked as down, nothing to do\n");
goto exit;
}
- r = pci_save_state(pcie_dev);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to save pcie state ret: %d\n", r);
- }
- mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
- pci_disable_device(pcie_dev);
- r = pci_set_power_state(pcie_dev, PCI_D3hot);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Failed to set pcie power state to D3hot ret:%d\n", r);
+ if (graceful) {
+ r = pci_save_state(pcie_dev);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to save pcie state ret: %d\n", r);
+ mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
+ pci_disable_device(pcie_dev);
+ r = pci_set_power_state(pcie_dev, PCI_D3hot);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to set pcie power state to D3hot ret:%d\n",
+ r);
}
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
@@ -430,21 +467,26 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Could not set bus frequency ret: %d\n", r);
- r = msm_pcie_pm_control(MSM_PCIE_RESUME,
- pcie_dev->bus->number,
- pcie_dev,
- NULL,
- 0);
+ r = msm_pcie_pm_control(MSM_PCIE_RESUME, pcie_dev->bus->number,
+ pcie_dev, NULL, 0);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to resume pcie bus ret %d\n", r);
goto exit;
}
+ r = pci_set_power_state(pcie_dev, PCI_D0);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set PCI_D0 state ret:%d\n", r);
+ goto exit;
+ }
r = pci_enable_device(pcie_dev);
- if (r)
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Failed to enable device ret:%d\n", r);
+ goto exit;
+ }
pci_load_and_free_saved_state(pcie_dev,
&mhi_dev_ctxt->core.pcie_state);
@@ -457,6 +499,44 @@ exit:
return r;
}
+void mhi_link_state_cb(struct msm_pcie_notify *notify)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+
+ if (!notify || !notify->data) {
+ pr_err("%s: incomplete handle received\n", __func__);
+ return;
+ }
+
+ mhi_dev_ctxt = notify->data;
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_LINKDOWN:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKDOWN\n");
+ break;
+ case MSM_PCIE_EVENT_LINKUP:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKUP\n");
+ mhi_dev_ctxt->counters.link_up_cntr++;
+ break;
+ case MSM_PCIE_EVENT_WAKEUP:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_WAKE\n");
+ __pm_stay_awake(&mhi_dev_ctxt->w_lock);
+ __pm_relax(&mhi_dev_ctxt->w_lock);
+
+ if (mhi_dev_ctxt->flags.mhi_initialized) {
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ }
+ break;
+ default:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received bad link event\n");
+ return;
+ }
+}
+
int mhi_pm_control_device(struct mhi_device *mhi_device,
enum mhi_dev_ctrl ctrl)
{
@@ -477,9 +557,34 @@ int mhi_pm_control_device(struct mhi_device *mhi_device,
return mhi_pm_slave_mode_suspend(mhi_dev_ctxt);
case MHI_DEV_CTRL_RESUME:
return mhi_pm_slave_mode_resume(mhi_dev_ctxt);
- default:
+ case MHI_DEV_CTRL_POWER_OFF:
+ mhi_pm_slave_mode_power_off(mhi_dev_ctxt);
+ break;
+ case MHI_DEV_CTRL_RDDM:
+ return bhi_rddm(mhi_dev_ctxt, false);
+ case MHI_DEV_CTRL_DE_INIT:
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
+ mhi_dev_ctxt);
+ bhi_exit(mhi_dev_ctxt);
+ break;
+ case MHI_DEV_CTRL_NOTIFY_LINK_ERROR:
+ {
+ enum MHI_PM_STATE cur_state;
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
break;
}
- return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+ return 0;
}
EXPORT_SYMBOL(mhi_pm_control_device);
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index 22481dede21a..9f18b1e7ef85 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -13,12 +13,8 @@
#include <linux/pm_runtime.h>
#include <mhi_sys.h>
#include <mhi.h>
-#include <mhi_bhi.h>
-#include <mhi_hwio.h>
-
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
-
#include <linux/esoc_client.h>
static int mhi_ssr_notify_cb(struct notifier_block *nb,
@@ -26,35 +22,45 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb,
{
struct mhi_device_ctxt *mhi_dev_ctxt =
container_of(nb, struct mhi_device_ctxt, mhi_ssr_nb);
+ enum MHI_PM_STATE cur_state;
+ struct notif_data *notif_data = (struct notif_data *)data;
+ bool crashed = notif_data->crashed;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received ESOC notifcation:%lu crashed:%d\n", action, crashed);
switch (action) {
- case SUBSYS_BEFORE_POWERUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event BEFORE_POWERUP\n");
- break;
- case SUBSYS_AFTER_POWERUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event AFTER_POWERUP\n");
- break;
- case SUBSYS_POWERUP_FAILURE:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event POWERUP_FAILURE\n");
- break;
case SUBSYS_BEFORE_SHUTDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event BEFORE_SHUTDOWN\n");
+ /*
+ * update internal states only, we'll clean up MHI context
+ * after device shutdown completely.
+ */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_LD_ERR_FATAL_DETECT);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_LD_ERR_FATAL_DETECT))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_LD_ERR_FATAL_DETECT, cur_state);
break;
case SUBSYS_AFTER_SHUTDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event AFTER_SHUTDOWN\n");
- break;
- case SUBSYS_RAMDUMP_NOTIFICATION:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received Subsystem event RAMDUMP\n");
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_DISABLE)
+ process_disable_transition(MHI_PM_SHUTDOWN_PROCESS,
+ mhi_dev_ctxt);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt,
+ MHI_PM_SSR_PENDING);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ if (unlikely(cur_state != MHI_PM_SSR_PENDING))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_SSR_PENDING, cur_state);
break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received ESOC notifcation %d, NOT handling\n",
- (int)action);
+ "Not handling esoc notification:%lu\n", action);
break;
}
return NOTIFY_OK;
@@ -91,128 +97,242 @@ int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
return ret_val;
}
-void mhi_notify_client(struct mhi_client_handle *client_handle,
- enum MHI_CB_REASON reason)
+/* handles sys_err, and shutdown transition */
+void process_disable_transition(enum MHI_PM_STATE transition_state,
+ struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct mhi_cb_info cb_info = {0};
- struct mhi_result result = {0};
- struct mhi_client_config *client_config;
+ enum MHI_PM_STATE cur_state, prev_state;
+ struct mhi_client_handle *client_handle;
+ struct mhi_ring *ch_ring, *bb_ring, *cmd_ring;
+ struct mhi_cmd_ctxt *cmd_ctxt;
+ struct mhi_chan_cfg *chan_cfg;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ enum MHI_CB_REASON reason;
+ u32 timeout = mhi_dev_ctxt->poll_reset_timeout_ms;
+ int i;
+ int ret;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enter with pm_state:0x%x MHI_STATE:%s transition_state:0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ transition_state);
- cb_info.result = NULL;
- cb_info.cb_reason = reason;
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(pm_xfer_lock);
+ prev_state = mhi_dev_ctxt->mhi_pm_state;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, transition_state);
+ if (cur_state == transition_state) {
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_DISABLE_TRANSITION;
+ mhi_dev_ctxt->flags.mhi_initialized = false;
+ }
+ write_unlock_irq(pm_xfer_lock);
- if (client_handle == NULL)
+ /* Not handling sys_err, could be middle of shut down */
+ if (unlikely(cur_state != transition_state)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ transition_state, cur_state);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
return;
+ }
- client_config = client_handle->client_config;
-
- if (client_config->client_info.mhi_client_cb) {
- result.user_data = client_config->user_data;
- cb_info.chan = client_config->chan_info.chan_nr;
- cb_info.result = &result;
- mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
- "Calling back for chan %d, reason %d\n",
- cb_info.chan,
- reason);
- client_config->client_info.mhi_client_cb(&cb_info);
+ /*
+ * If we're shutting down trigger device into MHI reset
+ * so we can gurantee device will not access host DDR
+ * during reset
+ */
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS &&
+ MHI_REG_ACCESS_VALID(prev_state)) {
+ read_lock_bh(pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
+ read_unlock_bh(pm_xfer_lock);
+ mhi_test_for_device_reset(mhi_dev_ctxt);
}
-}
-void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
- enum MHI_CB_REASON reason)
-{
- int i;
- struct mhi_client_handle *client_handle = NULL;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for all pending event ring processing to complete\n");
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+ tasklet_kill(&mhi_dev_ctxt->mhi_local_event_ctxt[i].ev_task);
+ flush_work(&mhi_dev_ctxt->mhi_local_event_ctxt[i].ev_worker);
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying all clients and resetting channels\n");
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- if (VALID_CHAN_NR(i)) {
- client_handle = mhi_dev_ctxt->client_handle_list[i];
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS)
+ reason = MHI_CB_MHI_SHUTDOWN;
+ else
+ reason = MHI_CB_SYS_ERROR;
+ ch_ring = mhi_dev_ctxt->mhi_local_chan_ctxt;
+ chan_cfg = mhi_dev_ctxt->mhi_chan_cfg;
+ bb_ring = mhi_dev_ctxt->chan_bb_list;
+ for (i = 0; i < MHI_MAX_CHANNELS;
+ i++, ch_ring++, chan_cfg++, bb_ring++) {
+ enum MHI_CHAN_STATE ch_state;
+
+ client_handle = mhi_dev_ctxt->client_handle_list[i];
+ if (client_handle)
mhi_notify_client(client_handle, reason);
+
+ mutex_lock(&chan_cfg->chan_lock);
+ spin_lock_irq(&ch_ring->ring_lock);
+ ch_state = ch_ring->ch_state;
+ ch_ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ spin_unlock_irq(&ch_ring->ring_lock);
+
+ /* Reset channel and free ring */
+ if (ch_state == MHI_CHAN_STATE_ENABLED) {
+ mhi_reset_chan(mhi_dev_ctxt, i);
+ free_tre_ring(mhi_dev_ctxt, i);
+ bb_ring->rp = bb_ring->base;
+ bb_ring->wp = bb_ring->base;
+ bb_ring->ack_rp = bb_ring->base;
}
+ mutex_unlock(&chan_cfg->chan_lock);
}
-}
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Finished notifying clients\n");
-int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- u32 pcie_word_val = 0;
- int r = 0;
+ /* Release lock and wait for all pending threads to complete */
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for all pending threads to complete\n");
+ complete(&mhi_dev_ctxt->cmd_complete);
+ flush_work(&mhi_dev_ctxt->process_m1_worker);
+ flush_work(&mhi_dev_ctxt->st_thread_worker);
+ if (mhi_dev_ctxt->bhi_ctxt.manage_boot)
+ flush_work(&mhi_dev_ctxt->bhi_ctxt.fw_load_work);
+ if (cur_state == MHI_PM_SHUTDOWN_PROCESS)
+ flush_work(&mhi_dev_ctxt->process_sys_err_worker);
- mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
- /* confirm it's a valid reading */
- if (unlikely(pcie_word_val == U32_MAX)) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Invalid BHI Offset:0x%x\n", pcie_word_val);
- return -EIO;
- }
- mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
- BHI_EXECENV);
- mhi_dev_ctxt->dev_exec_env = pcie_word_val;
- if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
- mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
- } else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
- mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
- } else {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Invalid EXEC_ENV: 0x%x\n",
- pcie_word_val);
- r = -EIO;
+ /*
+ * Shutdown has higher priority than sys_err and can be called
+ * middle of sys error, check current state to confirm state
+ * was not changed.
+ */
+ if (mhi_dev_ctxt->mhi_pm_state != cur_state) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "PM State transitioned to 0x%x while processing 0x%x\n",
+ mhi_dev_ctxt->mhi_pm_state, transition_state);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
}
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "EXEC_ENV: %d Base state %d\n",
- pcie_word_val, mhi_dev_ctxt->base_state);
- return r;
-}
-void mhi_link_state_cb(struct msm_pcie_notify *notify)
-{
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+ /* Check all counts to make sure 0 */
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake));
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
+ if (mhi_dev_ctxt->core.pci_master)
+ WARN_ON(atomic_read(&mhi_dev_ctxt->pcie_device->dev.
+ power.usage_count));
- if (!notify || !notify->data) {
- pr_err("%s: incomplete handle received\n", __func__);
- return;
+ /* Reset Event rings and CMD rings */
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Resetting ev ctxt and cmd ctxt\n");
+
+ cmd_ring = mhi_dev_ctxt->mhi_local_cmd_ctxt;
+ cmd_ctxt = mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt;
+ for (i = 0; i < NR_OF_CMD_RINGS; i++, cmd_ring++) {
+ cmd_ring->rp = cmd_ring->base;
+ cmd_ring->wp = cmd_ring->base;
+ cmd_ctxt->mhi_cmd_ring_read_ptr =
+ cmd_ctxt->mhi_cmd_ring_base_addr;
+ cmd_ctxt->mhi_cmd_ring_write_ptr =
+ cmd_ctxt->mhi_cmd_ring_base_addr;
}
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++)
+ mhi_reset_ev_ctxt(mhi_dev_ctxt, i);
- mhi_dev_ctxt = notify->data;
- switch (notify->event) {
- case MSM_PCIE_EVENT_LINKDOWN:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_LINKDOWN\n");
- break;
- case MSM_PCIE_EVENT_LINKUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_LINKUP\n");
- mhi_dev_ctxt->counters.link_up_cntr++;
- break;
- case MSM_PCIE_EVENT_WAKEUP:
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received MSM_PCIE_EVENT_WAKE\n");
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
- __pm_relax(&mhi_dev_ctxt->w_lock);
+ /*
+ * If we're the bus master disable runtime suspend
+ * we will enable it back again during AMSS transition
+ */
+ if (mhi_dev_ctxt->core.pci_master)
+ pm_runtime_forbid(&mhi_dev_ctxt->pcie_device->dev);
+
+ if (cur_state == MHI_PM_SYS_ERR_PROCESS) {
+ bool trigger_reset = false;
- if (mhi_dev_ctxt->flags.mhi_initialized) {
- mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
- mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
- }
- break;
- default:
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Received bad link event\n");
- return;
+ "Triggering device reset\n");
+ reinit_completion(&mhi_dev_ctxt->cmd_complete);
+ write_lock_irq(pm_xfer_lock);
+ /* Link can go down while processing SYS_ERR */
+ if (MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
+ mhi_init_state_transition(mhi_dev_ctxt,
+ STATE_TRANSITION_RESET);
+ trigger_reset = true;
}
+ write_unlock_irq(pm_xfer_lock);
+
+ if (trigger_reset) {
+ /*
+ * Keep the MHI state in Active (M0) state until host
+ * enter AMSS/RDDM state. Otherwise modem would error
+ * fatal if host try to enter M1 before reaching
+ * AMSS\RDDM state.
+ */
+ read_lock_bh(pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(pm_xfer_lock);
+
+ /* Wait till we enter AMSS/RDDM Exec env.*/
+ ret = wait_for_completion_timeout
+ (&mhi_dev_ctxt->cmd_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret || (mhi_dev_ctxt->dev_exec_env !=
+ MHI_EXEC_ENV_AMSS &&
+ mhi_dev_ctxt->dev_exec_env !=
+ MHI_EXEC_ENV_RDDM)) {
+
+ /*
+ * device did not reset properly, notify bus
+ * master
+ */
+ if (!mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying bus master Sys Error Status\n");
+ mhi_dev_ctxt->status_cb(
+ MHI_CB_SYS_ERROR,
+ mhi_dev_ctxt->priv_data);
+ }
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ }
+ }
+ } else {
+ write_lock_irq(pm_xfer_lock);
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_DISABLE);
+ write_unlock_irq(pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_DISABLE))
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error transition from state:0x%x to 0x%x\n",
+ cur_state, MHI_PM_DISABLE);
+
+ if (mhi_dev_ctxt->core.pci_master &&
+ cur_state == MHI_PM_DISABLE)
+ mhi_turn_off_pcie_link(mhi_dev_ctxt,
+ MHI_REG_ACCESS_VALID(prev_state));
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exit with pm_state:0x%x exec_env:0x%x mhi_state:%s\n",
+ mhi_dev_ctxt->mhi_pm_state, mhi_dev_ctxt->dev_exec_env,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
-int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_sys_err_worker(struct work_struct *work)
{
- int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ container_of(work, struct mhi_device_ctxt,
+ process_sys_err_worker);
- r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to start state change event, to %d\n",
- mhi_dev_ctxt->base_state);
- }
- return r;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enter with pm_state:0x%x MHI_STATE:%s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ process_disable_transition(MHI_PM_SYS_ERR_PROCESS, mhi_dev_ctxt);
}
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index a4da6c21b50d..c0c23c4e0756 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -13,6 +13,7 @@
#include "mhi_sys.h"
#include "mhi_hwio.h"
#include "mhi_trace.h"
+#include "mhi_bhi.h"
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -33,6 +34,7 @@ const char *state_transition_str(enum STATE_TRANSITION state)
[STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
[STATE_TRANSITION_WAKE] = "WAKE",
[STATE_TRANSITION_BHIE] = "BHIE",
+ [STATE_TRANSITION_RDDM] = "RDDM",
[STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
};
@@ -40,6 +42,53 @@ const char *state_transition_str(enum STATE_TRANSITION state)
mhi_states_transition_str[state] : "Invalid";
}
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 pcie_word_val = 0;
+ int r = 0;
+
+ mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+
+ /* confirm it's a valid reading */
+ if (unlikely(pcie_word_val == U32_MAX)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid BHI Offset:0x%x\n", pcie_word_val);
+ return -EIO;
+ }
+ mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
+ BHI_EXECENV);
+ mhi_dev_ctxt->dev_exec_env = pcie_word_val;
+ if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
+ mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
+ } else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
+ mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
+ } else {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid EXEC_ENV: 0x%x\n",
+ pcie_word_val);
+ r = -EIO;
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "EXEC_ENV: %d Base state %d\n",
+ pcie_word_val, mhi_dev_ctxt->base_state);
+ return r;
+}
+
+int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int r = 0;
+
+ r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to start state change event, to %d\n",
+ mhi_dev_ctxt->base_state);
+ }
+ return r;
+}
+
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
@@ -47,7 +96,16 @@ enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
MHISTATUS_MHISTATE_MASK,
MHISTATUS_MHISTATE_SHIFT);
- return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+ return state;
+}
+
+bool mhi_in_sys_err(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHISTATUS, MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT);
+
+ return (state) ? true : false;
}
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -69,6 +127,140 @@ void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL);
}
+/*
+ * Not all MHI states transitions are sync transitions. Linkdown, SSR, and
+ * shutdown can happen anytime asynchronously. This function will transition to
+ * new state only if it's a valid transitions.
+ *
+ * Priority increase as we go down, example while in any states from L0, start
+ * state from L1, L2, or L3 can be set. Notable exception to this rule is state
+ * DISABLE. From DISABLE state we can transition to only POR or SSR_PENDING
+ * state. Also for example while in L2 state, user cannot jump back to L1 or
+ * L0 states.
+ * Valid transitions:
+ * L0: DISABLE <--> POR
+ * DISABLE <--> SSR_PENDING
+ * POR <--> POR
+ * POR -> M0 -> M1 -> M1_M2 -> M2 --> M0
+ * M1_M2 -> M0 (Device can trigger it)
+ * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
+ * M1 -> M3_ENTER --> M3
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L2: SHUTDOWN_PROCESS -> DISABLE -> SSR_PENDING (via SSR Notification only)
+ * L3: LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ */
+static const struct mhi_pm_transitions const mhi_state_transitions[] = {
+ /* L0 States */
+ {
+ MHI_PM_DISABLE,
+ MHI_PM_POR | MHI_PM_SSR_PENDING
+ },
+ {
+ MHI_PM_POR,
+ MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M0,
+ MHI_PM_M1 | MHI_PM_M3_ENTER | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M1,
+ MHI_PM_M1_M2_TRANSITION | MHI_PM_M3_ENTER |
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M1_M2_TRANSITION,
+ MHI_PM_M2 | MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M2,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_ENTER,
+ MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3,
+ MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_M3_EXIT,
+ MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L1 States */
+ {
+ MHI_PM_SYS_ERR_DETECT,
+ MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_PROCESS,
+ MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L2 States */
+ {
+ MHI_PM_SHUTDOWN_PROCESS,
+ MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ /* L3 States */
+ {
+ MHI_PM_LD_ERR_FATAL_DETECT,
+ MHI_PM_SHUTDOWN_PROCESS
+ },
+ /* From SSR notification only */
+ {
+ MHI_PM_SSR_PENDING,
+ MHI_PM_DISABLE
+ }
+};
+
+enum MHI_PM_STATE __must_check mhi_tryset_pm_state(
+ struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_PM_STATE state)
+{
+ unsigned long cur_state = mhi_dev_ctxt->mhi_pm_state;
+ int index = find_last_bit(&cur_state, 32);
+
+ if (unlikely(index >= ARRAY_SIZE(mhi_state_transitions))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "cur_state:0x%lx out side of mhi_state_transitions\n",
+ cur_state);
+ return cur_state;
+ }
+
+ if (unlikely(mhi_state_transitions[index].from_state != cur_state)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "index:%u cur_state:0x%lx != actual_state: 0x%x\n",
+ index, cur_state,
+ mhi_state_transitions[index].from_state);
+ return cur_state;
+ }
+
+ if (unlikely(!(mhi_state_transitions[index].to_states & state))) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Not allowing pm state transition from:0x%lx to:0x%x state\n",
+ cur_state, state);
+ return cur_state;
+ }
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Transition to pm state from:0x%lx to:0x%x\n",
+ cur_state, state);
+ mhi_dev_ctxt->mhi_pm_state = state;
+ return mhi_dev_ctxt->mhi_pm_state;
+}
+
static void conditional_chan_db_write(
struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan)
{
@@ -158,20 +350,10 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
}
}
-static int process_bhie_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
- wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
-
- return 0;
-}
-
int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered With State %s\n",
@@ -190,8 +372,14 @@ int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M0);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (unlikely(cur_state != MHI_PM_M0)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M0, cur_state);
+ return -EIO;
+ }
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
@@ -212,6 +400,7 @@ int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
void process_m1_transition(struct work_struct *work)
{
struct mhi_device_ctxt *mhi_dev_ctxt;
+ enum MHI_PM_STATE cur_state;
mhi_dev_ctxt = container_of(work,
struct mhi_device_ctxt,
@@ -224,15 +413,18 @@ void process_m1_transition(struct work_struct *work)
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
/* We either Entered M3 or we did M3->M0 Exit */
- if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- return;
- }
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1)
+ goto invalid_pm_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Transitioning to M2 Transition\n");
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M1_M2_TRANSITION);
+ if (unlikely(cur_state != MHI_PM_M1_M2_TRANSITION)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M1_M2_TRANSITION, cur_state);
+ goto invalid_pm_state;
+ }
mhi_dev_ctxt->counters.m1_m2++;
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
@@ -245,7 +437,13 @@ void process_m1_transition(struct work_struct *work)
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered M2 State\n");
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M2);
+ if (unlikely(cur_state != MHI_PM_M2)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M2, cur_state);
+ goto invalid_pm_state;
+ }
}
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -263,11 +461,17 @@ void process_m1_transition(struct work_struct *work)
pm_request_autosuspend(&mhi_dev_ctxt->pcie_device->dev);
}
mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
+
+invalid_pm_state:
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
unsigned long flags;
+ enum MHI_PM_STATE cur_state;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -285,25 +489,18 @@ int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_M3);
write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (unlikely(cur_state != MHI_PM_M3)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to transition to state 0x%x from 0x%x\n",
+ MHI_PM_M3, cur_state);
+ return -EIO;
+ }
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
-static int process_bhi_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
- return 0;
-}
-
static int process_ready_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
@@ -313,15 +510,12 @@ static int process_ready_transition(
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing READY state transition\n");
- r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
- "Failed to reset thread queues\n");
- return r;
- }
-
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
r = mhi_init_mmio(mhi_dev_ctxt);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
@@ -341,6 +535,10 @@ static int process_ready_transition(
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
@@ -350,30 +548,25 @@ static int process_ready_transition(
return r;
}
-static void mhi_reset_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
- int chan)
-{
- struct mhi_chan_ctxt *chan_ctxt =
- &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- struct mhi_ring *local_chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- local_chan_ctxt->rp = local_chan_ctxt->base;
- local_chan_ctxt->wp = local_chan_ctxt->base;
- local_chan_ctxt->ack_rp = local_chan_ctxt->base;
-}
-
static int process_reset_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0, i = 0;
+ int r = 0;
+ enum MHI_PM_STATE cur_state;
+
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing RESET state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
+ cur_state = mhi_tryset_pm_state(mhi_dev_ctxt, MHI_PM_POR);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(cur_state != MHI_PM_POR)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error transitining from state:0x%x to:0x%x\n",
+ cur_state, MHI_PM_POR);
+ return -EIO;
+ }
mhi_dev_ctxt->counters.mhi_reset_cntr++;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
@@ -387,25 +580,6 @@ static int process_reset_transition(
return r;
}
- for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp =
- mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
- mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt[i].
- mhi_cmd_ring_read_ptr =
- mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_CMD_RING,
- i,
- (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp);
- }
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
- mhi_reset_ev_ctxt(mhi_dev_ctxt, i);
-
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- if (VALID_CHAN_NR(i))
- mhi_reset_chan_ctxt(mhi_dev_ctxt, i);
- }
r = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_READY);
if (0 != r)
@@ -441,19 +615,6 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done.\n");
}
-static int process_sbl_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
-
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enabled\n");
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- return 0;
-}
-
static int process_amss_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
@@ -465,26 +626,19 @@ static int process_amss_transition(
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->flags.mhi_initialized = true;
+ complete(&mhi_dev_ctxt->cmd_complete);
- if (!mhi_dev_ctxt->flags.mhi_initialized) {
- r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
+ r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
- mhi_dev_ctxt->flags.mhi_initialized = 1;
- if (r) {
- mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
- "Failed to set local chan state ret %d\n", r);
- mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
- return r;
- }
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "Notifying clients that MHI is enabled\n");
- enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- } else {
- mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
- "MHI is initialized\n");
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to set local chan state ret %d\n", r);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ return r;
}
+ enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- complete(&mhi_dev_ctxt->cmd_complete);
/*
* runtime_allow will decrement usage_count, counts were
@@ -508,7 +662,7 @@ static int process_amss_transition(
return 0;
}
-static int process_stt_work_item(
+void process_stt_work_item(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
@@ -520,7 +674,10 @@ static int process_stt_work_item(
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
- r = process_bhi_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
break;
case STATE_TRANSITION_RESET:
r = process_reset_transition(mhi_dev_ctxt, cur_work_item);
@@ -529,13 +686,34 @@ static int process_stt_work_item(
r = process_ready_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_SBL:
- r = process_sbl_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
break;
case STATE_TRANSITION_AMSS:
r = process_amss_transition(mhi_dev_ctxt, cur_work_item);
break;
case STATE_TRANSITION_BHIE:
- r = process_bhie_transition(mhi_dev_ctxt, cur_work_item);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
+ break;
+ case STATE_TRANSITION_RDDM:
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_RDDM;
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ complete(&mhi_dev_ctxt->cmd_complete);
+
+ /* Notify bus master device entered rddm mode */
+ if (!mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Notifying bus master RDDM Status\n");
+ mhi_dev_ctxt->status_cb(MHI_CB_RDDM,
+ mhi_dev_ctxt->priv_data);
+ }
break;
default:
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
@@ -543,12 +721,11 @@ static int process_stt_work_item(
state_transition_str(cur_work_item));
break;
}
- return r;
}
void mhi_state_change_worker(struct work_struct *work)
{
- int r = 0;
+ int r;
struct mhi_device_ctxt *mhi_dev_ctxt = container_of(work,
struct mhi_device_ctxt,
st_thread_worker);
@@ -564,7 +741,7 @@ void mhi_state_change_worker(struct work_struct *work)
MHI_ASSERT(r == 0,
"Failed to delete element from STT workqueue\n");
spin_unlock_irq(work_q->q_lock);
- r = process_stt_work_item(mhi_dev_ctxt, cur_work_item);
+ process_stt_work_item(mhi_dev_ctxt, cur_work_item);
}
}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index 3389de2f95b3..1d9282627d4e 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,15 +35,15 @@ module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
const char * const mhi_states_str[MHI_STATE_LIMIT] = {
- "RESET",
- "READY",
- "M0",
- "M1",
- "M2",
- "M3",
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
"Reserved: 0x06",
- "BHI",
- "SYS_ERR",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS_ERR",
};
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,