summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSiddartha Mohanadoss <smohanad@codeaurora.org>2016-04-07 14:59:33 -0700
committerJeevan Shriram <jshriram@codeaurora.org>2016-04-13 11:12:11 -0700
commit4cb670d8db1073c3076d93e88ea299d94c81189e (patch)
tree81cface98f8947d077bec684898fd5a29a6ee936
parent00eb45fc95b968b9fa0f4228106c18a9efd6464b (diff)
msm: mhi_dev: Add MHI device driver
The Modem Host Interface (MHI) device driver supports clients to send control and data packets such as IP data packets, control messages and Diagnostic data between the Host and the device. It follows the MHI specification to transfer data. The driver interfaces with the IPA driver for Hardware accelerated channels and PCIe End point driver to communicate between the Host and the device. The driver exposes to both userspace and kernel space generic IO read/write/open/close system calls and kernel APIs to communicate and transfer data between Host and the device. Change-Id: I64990a972cbf7c2022d638c35f7517071de67f19 Signed-off-by: Siddartha Mohanadoss <smohanad@codeaurora.org>
-rw-r--r--Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt34
-rw-r--r--drivers/platform/msm/Kconfig10
-rw-r--r--drivers/platform/msm/Makefile1
-rw-r--r--drivers/platform/msm/mhi_dev/Makefile6
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.c1952
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.h1126
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_hwio.h191
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_mmio.c999
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_ring.c438
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.c1319
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.h51
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_uci.c835
12 files changed, 6962 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
new file mode 100644
index 000000000000..49d33a3c4440
--- /dev/null
+++ b/Documentation/devicetree/bindings/mhi/msm_mhi_dev.txt
@@ -0,0 +1,34 @@
+MSM MHI DEV
+
+MSM MHI DEV enables communication with the host over a PCIe link using the
+Modem Host Interface protocol. The driver interfaces with the IPA for
+enabling the HW acceleration channel path and provides interface for
+software channels to communicate between Host and device.
+
+Required properties:
+ - compatible: should be "qcom,msm-mhi-dev" for MHI device driver.
+ - reg: MHI MMIO physical register space.
+ - reg-names: resource names used for the MHI MMIO physical address region,
+ IPA uC command and event ring doorbell mail box address.
+ Should be "mhi_mmio_base" for MHI MMIO physical address,
+ "ipa_uc_mbox_crdb" for IPA uC Command Ring doorbell,
+ "ipa_uc_mbox_erdb" for IPA uC Event Ring doorbell passed to
+ the IPA driver.
+ - qcom,mhi-ifc-id: ID of HW interface via which MHI on device side
+ communicates with host side.
+ - qcom,mhi-ep-msi: End point MSI number.
+ - qcom,mhi-version: MHI specification version supported by the device.
+
+Example:
+
+ mhi: qcom,msm-mhi-dev {
+ compatible = "qcom,msm-mhi-dev";
+ reg = <0xfc527000 0x1000>,
+ <0xfd4fa000 0x1>,
+ <0xfd4fa080 0x1>;
+ reg-names = "mhi_mmio_base", "ipa_uc_mbox_crdb",
+ "ipa_uc_mbox_erdb";
+ qcom,mhi-ifc-id = <0x030017cb>;
+ qcom,mhi-ep-msi = <1>;
+ qcom,mhi-version = <0x1000000>;
+ };
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 19510e5c2279..18ae7fa5454b 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -171,6 +171,16 @@ config MSM_MHI_DEBUG
throughput as individual MHI packets and state transitions
will be logged.
+config MSM_MHI_DEV
+ tristate "Modem Device Interface Driver"
+ depends on EP_PCIE && IPA
+ help
+ This kernel module is used to interact with PCIe Root complex
+ supporting MHI protocol. MHI is a data transmission protocol
+ involving communication between a host and a device over shared
+ memory. MHI interacts with the IPA for supporting transfers
+ on the HW accelerated channels between Host and device.
+
config MSM_11AD
tristate "Platform driver for 11ad chip"
depends on PCI
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index c33f5e53c1b3..d5e87c209c21 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_MSM_11AD) += msm_11ad/
obj-$(CONFIG_SEEMP_CORE) += seemp_core/
obj-$(CONFIG_SSM) += ssm.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
+obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
diff --git a/drivers/platform/msm/mhi_dev/Makefile b/drivers/platform/msm/mhi_dev/Makefile
new file mode 100644
index 000000000000..c1969e20426d
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/Makefile
@@ -0,0 +1,6 @@
+# Makefile for MHI driver
+obj-y += mhi_mmio.o
+obj-y += mhi.o
+obj-y += mhi_ring.o
+obj-y += mhi_uci.o
+obj-y += mhi_sm.o
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
new file mode 100644
index 000000000000..142263be23aa
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -0,0 +1,1952 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ep_pcie.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+/* Wait time on the device for Host to set M0 state */
+#define MHI_M0_WAIT_MIN_USLEEP 20000000
+#define MHI_M0_WAIT_MAX_USLEEP 25000000
+#define MHI_DEV_M0_MAX_CNT 30
+/* Wait time before suspend/resume is complete */
+#define MHI_SUSPEND_WAIT_MIN 3100
+#define MHI_SUSPEND_WAIT_MAX 3200
+#define MHI_SUSPEND_WAIT_TIMEOUT 500
+#define MHI_MASK_CH_EV_LEN 32
+#define MHI_RING_CMD_ID 0
+#define MHI_RING_PRIMARY_EVT_ID 1
+#define MHI_1K_SIZE 0x1000
+/* Updated Specification for event start is NER - 2 and end - NER -1 */
+#define MHI_HW_ACC_EVT_RING_START 2
+#define MHI_HW_ACC_EVT_RING_END 1
+
+#define MHI_HOST_REGION_NUM 2
+
+#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
+#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
+
+#define HOST_ADDR(lsb, msb) ((lsb) | ((uint64_t)(msb) << 32))
+#define HOST_ADDR_LSB(addr) (addr & 0xFFFFFFFF)
+#define HOST_ADDR_MSB(addr) ((addr >> 32) & 0xFFFFFFFF)
+
+#define MHI_IPC_LOG_PAGES (100)
+enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
+enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
+void *mhi_ipc_log;
+
+static struct mhi_dev *mhi_ctx;
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data);
+static void mhi_ring_init_cb(void *user_data);
+
+void mhi_dev_read_from_host(struct mhi_addr *host, dma_addr_t dev, size_t size)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ host_addr_pa = ((u64) host->host_pa) | bit_40;
+
+ mhi_log(MHI_MSG_ERROR, "device 0x%x <<-- host 0x%llx, size %d\n",
+ dev, host_addr_pa, size);
+
+ rc = ipa_dma_sync_memcpy((u64) dev, host_addr_pa, (int) size);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_read_from_host);
+
+void mhi_dev_write_to_host(struct mhi_addr *host, void *dev, size_t size,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi) {
+ pr_err("invalid MHI ctx\n");
+ return;
+ }
+
+ host_addr_pa = ((u64) host->host_pa) | bit_40;
+ /* Copy the device content to a local device physical address */
+ memcpy(mhi->dma_cache, dev, size);
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx --> host 0x%llx, size %d\n",
+ (uint64_t) mhi->cache_dma_handle, host_addr_pa, (int) size);
+
+ rc = ipa_dma_sync_memcpy(host_addr_pa, (u64) mhi->cache_dma_handle,
+ (int) size);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+}
+EXPORT_SYMBOL(mhi_dev_write_to_host);
+
+int mhi_transfer_host_to_device(void *dev, uint64_t host_pa, uint32_t len,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi) {
+ pr_err("Invalid mhi device\n");
+ return -EINVAL;
+ }
+
+ if (!dev) {
+ pr_err("Invalid virt device\n");
+ return -EINVAL;
+ }
+
+ if (!host_pa) {
+ pr_err("Invalid host pa device\n");
+ return -EINVAL;
+ }
+
+ host_addr_pa = host_pa | bit_40;
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx <-- host 0x%llx, size %d\n",
+ (uint64_t) mhi->read_dma_handle, host_addr_pa, (int) len);
+ rc = ipa_dma_sync_memcpy((u64) mhi->read_dma_handle,
+ host_addr_pa, (int) len);
+ if (rc) {
+ pr_err("error while reading from host:%d\n", rc);
+ return rc;
+ }
+
+ memcpy(dev, mhi->read_handle, len);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_host_to_device);
+
+int mhi_transfer_device_to_host(uint64_t host_addr, void *dev, uint32_t len,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint64_t bit_40 = ((u64) 1) << 40, host_addr_pa = 0;
+
+ if (!mhi || !dev || !host_addr) {
+ pr_err("%sInvalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ host_addr_pa = host_addr | bit_40;
+ memcpy(mhi->write_handle, dev, len);
+
+ mhi_log(MHI_MSG_ERROR, "device 0x%llx ---> host 0x%llx, size %d\n",
+ (uint64_t) mhi->write_dma_handle, host_addr_pa, (int) len);
+ rc = ipa_dma_sync_memcpy(host_addr_pa,
+ (u64) mhi->write_dma_handle,
+ (int) len);
+ if (rc)
+ pr_err("error while reading from host:%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_transfer_device_to_host);
+
+int mhi_dev_is_list_empty(void)
+{
+
+ if (list_empty(&mhi_ctx->event_ring_list) &&
+ list_empty(&mhi_ctx->process_ring_list))
+ return 0;
+ else
+ return 1;
+}
+EXPORT_SYMBOL(mhi_dev_is_list_empty);
+
+static void mhi_dev_get_erdb_db_cfg(struct mhi_dev *mhi,
+ struct ep_pcie_db_config *erdb_cfg)
+{
+ switch (mhi->cfg.event_rings) {
+ case NUM_CHANNELS:
+ erdb_cfg->base = HW_CHANNEL_BASE;
+ erdb_cfg->end = HW_CHANNEL_END;
+ break;
+ default:
+ erdb_cfg->base = mhi->cfg.event_rings -
+ MHI_HW_ACC_EVT_RING_START;
+ erdb_cfg->end = mhi->cfg.event_rings -
+ MHI_HW_ACC_EVT_RING_END;
+ break;
+ }
+}
+
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ep_pcie_db_config chdb_cfg, erdb_cfg;
+
+ if (!mhi) {
+ pr_err("Invalid MHI context\n");
+ return -EINVAL;
+ }
+
+ /* Configure Doorbell routing */
+ chdb_cfg.base = HW_CHANNEL_BASE;
+ chdb_cfg.end = HW_CHANNEL_END;
+ chdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_crdb;
+
+ mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+
+ mhi_log(MHI_MSG_ERROR,
+ "Event rings 0x%x => er_base 0x%x, er_end %d\n",
+ mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+ erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+ ep_pcie_config_db_routing(mhi_ctx->phandle, chdb_cfg, erdb_cfg);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_pcie_config_db_routing);
+
+static int mhi_hwc_init(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ep_pcie_msi_config cfg;
+ struct ipa_mhi_init_params ipa_init_params;
+ struct ep_pcie_db_config erdb_cfg;
+
+ /* Call IPA HW_ACC Init with MSI Address and db routing info */
+ rc = ep_pcie_get_msi_config(mhi_ctx->phandle, &cfg);
+ if (rc) {
+ pr_err("Error retrieving pcie msi logic\n");
+ return rc;
+ }
+
+ rc = mhi_pcie_config_db_routing(mhi);
+ if (rc) {
+ pr_err("Error configuring DB routing\n");
+ return rc;
+ }
+
+ mhi_dev_get_erdb_db_cfg(mhi, &erdb_cfg);
+ mhi_log(MHI_MSG_ERROR,
+ "Event rings 0x%x => er_base 0x%x, er_end %d\n",
+ mhi->cfg.event_rings, erdb_cfg.base, erdb_cfg.end);
+
+ erdb_cfg.tgt_addr = (uint32_t) mhi->ipa_uc_mbox_erdb;
+ memset(&ipa_init_params, 0, sizeof(ipa_init_params));
+ ipa_init_params.msi.addr_hi = cfg.upper;
+ ipa_init_params.msi.addr_low = cfg.lower;
+ ipa_init_params.msi.data = cfg.data;
+ ipa_init_params.msi.mask = ((1 << cfg.msg_num) - 1);
+ ipa_init_params.first_er_idx = erdb_cfg.base;
+ ipa_init_params.first_ch_idx = HW_CHANNEL_BASE;
+ ipa_init_params.mmio_addr = ((uint32_t) mhi_ctx->mmio_base_pa_addr);
+ ipa_init_params.assert_bit40 = true;
+
+ mhi_log(MHI_MSG_ERROR,
+ "MMIO Addr 0x%x, MSI config: U:0x%x L: 0x%x D: 0x%x\n",
+ ipa_init_params.mmio_addr, cfg.upper, cfg.lower, cfg.data);
+ ipa_init_params.notify = mhi_hwc_cb;
+ ipa_init_params.priv = mhi;
+
+ rc = ipa_mhi_init(&ipa_init_params);
+ if (rc) {
+ pr_err("Error initializing IPA\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int mhi_hwc_start(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct ipa_mhi_start_params ipa_start_params;
+
+ memset(&ipa_start_params, 0, sizeof(ipa_start_params));
+
+ ipa_start_params.channel_context_array_addr =
+ mhi->ch_ctx_shadow.host_pa;
+ ipa_start_params.event_context_array_addr =
+ mhi->ev_ctx_shadow.host_pa;
+
+ rc = ipa_mhi_start(&ipa_start_params);
+ if (rc)
+ pr_err("Error starting IPA (rc = 0x%X)\n", rc);
+
+ return rc;
+}
+
+static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data)
+{
+ int rc = 0;
+
+ switch (event) {
+ case IPA_MHI_EVENT_READY:
+ mhi_log(MHI_MSG_ERROR,
+ "HW Channel uC is ready event=0x%X\n", event);
+ rc = mhi_hwc_start(mhi_ctx);
+ if (rc) {
+ pr_err("hwc_init start failed with %d\n", rc);
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_chdb_interrupts(mhi_ctx);
+ if (rc) {
+ pr_err("Failed to enable channel db\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_ctrl_interrupt(mhi_ctx);
+ if (rc) {
+ pr_err("Failed to enable control interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_enable_cmdb_interrupt(mhi_ctx);
+
+ if (rc) {
+ pr_err("Failed to enable command db\n");
+ return;
+ }
+ break;
+ case IPA_MHI_EVENT_DATA_AVAILABLE:
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+ if (rc) {
+ pr_err("Event HW_ACC_WAKEUP failed with %d\n", rc);
+ return;
+ }
+ break;
+ default:
+ pr_err("HW Channel uC unknown event 0x%X\n", event);
+ break;
+ }
+}
+
+static int mhi_hwc_chcmd(struct mhi_dev *mhi, uint chid,
+ enum mhi_dev_ring_element_type_id type)
+{
+ int rc = 0;
+ struct ipa_mhi_connect_params connect_params;
+
+ memset(&connect_params, 0, sizeof(connect_params));
+
+ switch (type) {
+ case MHI_DEV_RING_EL_STOP:
+ rc = ipa_mhi_disconnect_pipe(
+ mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+ if (rc)
+ pr_err("Stopping HW Channel%d failed 0x%X\n",
+ chid, rc);
+ break;
+ case MHI_DEV_RING_EL_START:
+ connect_params.channel_id = chid;
+ connect_params.sys.skip_ep_cfg = true;
+ if ((chid % 2) == 0x0)
+ connect_params.sys.client = IPA_CLIENT_MHI_PROD;
+ else
+ connect_params.sys.client = IPA_CLIENT_MHI_CONS;
+
+ rc = ipa_mhi_connect_pipe(&connect_params,
+ &mhi->ipa_clnt_hndl[chid-HW_CHANNEL_BASE]);
+ if (rc)
+ pr_err("HW Channel%d start failed 0x%X\n",
+ chid, rc);
+ break;
+ case MHI_DEV_RING_EL_INVALID:
+ default:
+ pr_err("Invalid Ring Element type = 0x%X\n", type);
+ break;
+ }
+
+ return rc;
+}
+
+static void mhi_dev_core_ack_ctrl_interrupts(struct mhi_dev *dev,
+ uint32_t *int_value)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, int_value);
+ if (rc) {
+ pr_err("Failed to read A7 status\n");
+ return;
+ }
+
+ mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7, *int_value);
+ if (rc) {
+ pr_err("Failed to clear A7 status\n");
+ return;
+ }
+}
+
+static void mhi_dev_fetch_ch_ctx(struct mhi_dev *mhi, uint32_t ch_id)
+{
+ struct mhi_addr addr;
+
+ addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ addr.size = sizeof(struct mhi_dev_ch_ctx);
+ /* Fetch the channel ctx (*dst, *src, size) */
+ mhi_dev_read_from_host(&addr, mhi->ch_ctx_cache_dma_handle +
+ (sizeof(struct mhi_dev_ch_ctx) * ch_id),
+ sizeof(struct mhi_dev_ch_ctx));
+}
+
+int mhi_dev_syserr(struct mhi_dev *mhi)
+{
+
+ if (!mhi) {
+ pr_err("%s: Invalid MHI ctx\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_dev_dump_mmio(mhi);
+ pr_err("MHI dev sys error\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_syserr);
+
+int mhi_dev_send_event(struct mhi_dev *mhi, int evnt_ring,
+ union mhi_dev_ring_element_type *el)
+{
+ int rc = 0;
+ uint64_t evnt_ring_idx = mhi->ev_ring_start + evnt_ring;
+ struct mhi_dev_ring *ring = &mhi->ring[evnt_ring_idx];
+ union mhi_dev_ring_ctx *ctx;
+ struct ep_pcie_msi_config cfg;
+ struct mhi_addr msi_addr;
+ uint32_t msi = 0;
+ struct mhi_addr host_rp_addr;
+
+ rc = ep_pcie_get_msi_config(mhi->phandle,
+ &cfg);
+ if (rc) {
+ pr_err("Error retrieving pcie msi logic\n");
+ return rc;
+ }
+
+ if (evnt_ring_idx > mhi->cfg.event_rings) {
+ pr_err("Invalid event ring idx: %lld\n", evnt_ring_idx);
+ return -EINVAL;
+ }
+
+ if (mhi_ring_get_state(ring) == RING_STATE_UINT) {
+ ctx = (union mhi_dev_ring_ctx *)&mhi->ev_ctx_cache[evnt_ring];
+ rc = mhi_ring_start(ring, ctx, mhi);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error starting event ring %d\n", evnt_ring);
+ return rc;
+ }
+ }
+
+ mutex_lock(&mhi->mhi_event_lock);
+ /* add the ring element */
+ mhi_dev_add_element(ring, el);
+
+ ring->ring_ctx_shadow->ev.rp = (ring->rd_offset *
+ sizeof(union mhi_dev_ring_element_type)) +
+ ring->ring_ctx->generic.rbase;
+
+ mhi_log(MHI_MSG_ERROR, "ev.rp = %llx for %lld\n",
+ ring->ring_ctx_shadow->ev.rp, evnt_ring_idx);
+
+ host_rp_addr.host_pa = (mhi->ev_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ev_ctx) *
+ evnt_ring) + (uint32_t) &ring->ring_ctx->ev.rp -
+ (uint32_t) ring->ring_ctx;
+ mhi_dev_write_to_host(&host_rp_addr, &ring->ring_ctx_shadow->ev.rp,
+ sizeof(uint64_t),
+ mhi);
+
+ /*
+ * rp update in host memory should be flushed
+ * before sending a MSI to the host
+ */
+ wmb();
+
+ mutex_unlock(&mhi->mhi_event_lock);
+ mhi_log(MHI_MSG_ERROR, "event sent:\n");
+ mhi_log(MHI_MSG_ERROR, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
+ mhi_log(MHI_MSG_ERROR, "evnt len : 0x%x\n", el->evt_tr_comp.len);
+ mhi_log(MHI_MSG_ERROR, "evnt code :0x%x\n", el->evt_tr_comp.code);
+ mhi_log(MHI_MSG_ERROR, "evnt type :0x%x\n", el->evt_tr_comp.type);
+ mhi_log(MHI_MSG_ERROR, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
+
+ msi_addr.host_pa = (uint64_t)((uint64_t)cfg.upper << 32) |
+ (uint64_t)cfg.lower;
+ msi = cfg.data + mhi_ctx->mhi_ep_msi_num;
+ mhi_log(MHI_MSG_ERROR, "Sending MSI %d to 0x%llx as data = 0x%x\n",
+ mhi_ctx->mhi_ep_msi_num, msi_addr.host_pa, msi);
+ mhi_dev_write_to_host(&msi_addr, &msi, 4, mhi);
+
+ return rc;
+}
+
+static int mhi_dev_send_completion_event(struct mhi_dev_channel *ch,
+ uint32_t rd_ofst, uint32_t len,
+ enum mhi_dev_cmd_completion_code code)
+{
+ int rc = 0;
+ union mhi_dev_ring_element_type compl_event;
+ struct mhi_dev *mhi = ch->ring->mhi_dev;
+
+ compl_event.evt_tr_comp.chid = ch->ch_id;
+ compl_event.evt_tr_comp.type =
+ MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT;
+ compl_event.evt_tr_comp.len = len;
+ compl_event.evt_tr_comp.code = code;
+ compl_event.evt_tr_comp.ptr = ch->ring->ring_ctx->generic.rbase +
+ rd_ofst * sizeof(struct mhi_dev_transfer_ring_element);
+
+ rc = mhi_dev_send_event(mhi,
+ mhi->ch_ctx_cache[ch->ch_id].err_indx, &compl_event);
+
+ return rc;
+}
+
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+ enum mhi_dev_state state)
+{
+ union mhi_dev_ring_element_type event;
+ int rc = 0;
+
+ event.evt_state_change.type = MHI_DEV_RING_EL_MHI_STATE_CHG;
+ event.evt_state_change.mhistate = state;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("Sending state change event failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_state_change_event);
+
+int mhi_dev_send_ee_event(struct mhi_dev *mhi, enum mhi_dev_execenv exec_env)
+{
+ union mhi_dev_ring_element_type event;
+ int rc = 0;
+
+ event.evt_ee_state.type = MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY;
+ event.evt_ee_state.execenv = exec_env;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("Sending EE change event failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_send_ee_event);
+
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi)
+{
+ int rc = 0;
+
+ /*
+ * Expected usuage is when there is HW ACC traffic IPA uC notifes
+ * Q6 -> IPA A7 -> MHI core -> MHI SM
+ */
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_HW_ACC_WAKEUP);
+ if (rc) {
+ pr_err("error sending SM event\n");
+ return rc;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_trigger_hw_acc_wakeup);
+
+static int mhi_dev_send_cmd_comp_event(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ union mhi_dev_ring_element_type event;
+
+ /* send the command completion event to the host */
+ event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase
+ + (mhi->ring[MHI_RING_CMD_ID].rd_offset *
+ (sizeof(union mhi_dev_ring_element_type)));
+ mhi_log(MHI_MSG_ERROR, "evt cmd comp ptr :%d\n",
+ (uint32_t) event.evt_cmd_comp.ptr);
+ event.evt_cmd_comp.type = MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+ event.evt_cmd_comp.code = MHI_CMD_COMPL_CODE_SUCCESS;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc)
+ pr_err("channel start command faied\n");
+
+ return rc;
+}
+
+static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct mhi_addr host_addr;
+
+ if (ring->rd_offset != ring->wr_offset &&
+ mhi->ch_ctx_cache[ch_id].ch_type ==
+ MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL) {
+ mhi_log(MHI_MSG_INFO, "Pending transaction to be processed\n");
+ return 0;
+ } else if (mhi->ch_ctx_cache[ch_id].ch_type ==
+ MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
+ mhi->ch[ch_id].wr_request_active) {
+ return 0;
+ }
+
+ /* set the channel to stop */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr, &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ /* send the completion event to the host */
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+
+ return rc;
+}
+
+static void mhi_dev_process_cmd_ring(struct mhi_dev *mhi,
+ union mhi_dev_ring_element_type *el, void *ctx)
+{
+ int rc = 0;
+ uint32_t ch_id = 0;
+ union mhi_dev_ring_element_type event;
+ struct mhi_addr host_addr;
+
+ mhi_log(MHI_MSG_ERROR, "for channel:%d and cmd:%d\n",
+ ch_id, el->generic.type);
+ ch_id = el->generic.chid;
+
+ switch (el->generic.type) {
+ case MHI_DEV_RING_EL_START:
+ mhi_log(MHI_MSG_ERROR, "recived start cmd for channel %d\n",
+ ch_id);
+ if (ch_id >= (HW_CHANNEL_BASE)) {
+ rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+ if (rc) {
+ pr_err("Error with HW channel cmd :%d\n", rc);
+ return;
+ }
+ goto send_start_completion_event;
+ }
+
+ /* fetch the channel context from host */
+ mhi_dev_fetch_ch_ctx(mhi, ch_id);
+
+ /* Initialize and configure the corresponding channel ring */
+ rc = mhi_ring_start(&mhi->ring[mhi->ch_ring_start + ch_id],
+ (union mhi_dev_ring_ctx *)&mhi->ch_ctx_cache[ch_id],
+ mhi);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "start ring failed for ch %d\n", ch_id);
+ return;
+ }
+
+ mhi->ring[mhi->ch_ring_start + ch_id].state =
+ RING_STATE_PENDING;
+
+ /* set the channel to running */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+ mhi->ch[ch_id].ch_id = ch_id;
+ mhi->ch[ch_id].ring = &mhi->ring[mhi->ch_ring_start + ch_id];
+ mhi->ch[ch_id].ch_type = mhi->ch_ctx_cache[ch_id].ch_type;
+
+ /* enable DB for event ring */
+ rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch_id);
+ if (rc) {
+ pr_err("Failed to enable channel db\n");
+ return;
+ }
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+send_start_completion_event:
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+
+ break;
+ case MHI_DEV_RING_EL_STOP:
+ if (ch_id >= HW_CHANNEL_BASE) {
+ rc = mhi_hwc_chcmd(mhi, ch_id, el->generic.type);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "send channel stop cmd event failed\n");
+ return;
+ }
+
+ /* send the completion event to the host */
+ event.evt_cmd_comp.ptr = mhi->cmd_ctx_cache->rbase +
+ (mhi->ring[MHI_RING_CMD_ID].rd_offset *
+ (sizeof(union mhi_dev_ring_element_type)));
+ event.evt_cmd_comp.type =
+ MHI_DEV_RING_EL_CMD_COMPLETION_EVT;
+ if (rc == 0)
+ event.evt_cmd_comp.code =
+ MHI_CMD_COMPL_CODE_SUCCESS;
+ else
+ event.evt_cmd_comp.code =
+ MHI_CMD_COMPL_CODE_UNDEFINED;
+
+ rc = mhi_dev_send_event(mhi, 0, &event);
+ if (rc) {
+ pr_err("stop event send failed\n");
+ return;
+ }
+ } else {
+ /*
+ * Check if there are any pending transactions for the
+ * ring associated with the channel. If no, proceed to
+ * write disable the channel state else send stop
+ * channel command to check if one can suspend the
+ * command.
+ */
+ mhi->ch[ch_id].state = MHI_DEV_CH_PENDING_STOP;
+ rc = mhi_dev_process_stop_cmd(
+ &mhi->ring[mhi->ch_ring_start + ch_id],
+ ch_id, mhi);
+ if (rc) {
+ pr_err("stop event send failed\n");
+ return;
+ }
+ }
+ break;
+ case MHI_DEV_RING_EL_RESET:
+ /* hard stop and set the channel to stop */
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_STOP;
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ /* send the completion event to the host */
+ rc = mhi_dev_send_cmd_comp_event(mhi);
+ if (rc)
+ pr_err("Error sending command completion event\n");
+ break;
+ default:
+ pr_err("%s: Invalid command:%d\n", __func__, el->generic.type);
+ break;
+ }
+}
+
+static void mhi_dev_process_tre_ring(struct mhi_dev *mhi,
+ union mhi_dev_ring_element_type *el, void *ctx)
+{
+ struct mhi_dev_ring *ring = (struct mhi_dev_ring *)ctx;
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_client_cb_reason reason;
+
+ if (ring->id < mhi->ch_ring_start) {
+ mhi_log(MHI_MSG_ERROR,
+ "invalid channel ring id (%d), should be < %d\n",
+ ring->id, mhi->ch_ring_start);
+ return;
+ }
+
+ ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+ reason.ch_id = ch->ch_id;
+ reason.reason = MHI_DEV_TRE_AVAILABLE;
+
+ /* Invoke a callback to let the client know its data is ready.
+ * Copy this event to the clients context so that it can be
+ * sent out once the client has fetch the data. Update the rp
+ * before sending the data as part of the event completion
+ */
+ if (ch->active_client && ch->active_client->event_trigger != NULL)
+ ch->active_client->event_trigger(&reason);
+}
+
+static void mhi_dev_process_ring_pending(struct work_struct *work)
+{
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, pending_work);
+ struct list_head *cp, *q;
+ struct mhi_dev_ring *ring;
+ struct mhi_dev_channel *ch;
+ int rc = 0;
+
+ mutex_lock(&mhi_ctx->mhi_lock);
+ rc = mhi_dev_process_ring(&mhi->ring[mhi->cmd_ring_idx]);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR, "error processing command ring\n");
+ goto exit;
+ }
+
+ list_for_each_safe(cp, q, &mhi->process_ring_list) {
+ ring = list_entry(cp, struct mhi_dev_ring, list);
+ list_del(cp);
+ mhi_log(MHI_MSG_ERROR, "processing ring %d\n", ring->id);
+ rc = mhi_dev_process_ring(ring);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error processing ring %d\n", ring->id);
+ goto exit;
+ }
+
+ if (ring->id < mhi->ch_ring_start) {
+ mhi_log(MHI_MSG_ERROR,
+ "ring (%d) is not a channel ring\n", ring->id);
+ goto exit;
+ }
+
+ ch = &mhi->ch[ring->id - mhi->ch_ring_start];
+ rc = mhi_dev_mmio_enable_chdb_a7(mhi, ch->ch_id);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "error enabling chdb interrupt for %d\n", ch->ch_id);
+ goto exit;
+ }
+ }
+
+exit:
+ mutex_unlock(&mhi_ctx->mhi_lock);
+}
+
+static int mhi_dev_get_event_notify(enum mhi_dev_state state,
+ enum mhi_dev_event *event)
+{
+ int rc = 0;
+
+ switch (state) {
+ case MHI_DEV_M0_STATE:
+ *event = MHI_DEV_EVENT_M0_STATE;
+ break;
+ case MHI_DEV_M1_STATE:
+ *event = MHI_DEV_EVENT_M1_STATE;
+ break;
+ case MHI_DEV_M2_STATE:
+ *event = MHI_DEV_EVENT_M2_STATE;
+ break;
+ case MHI_DEV_M3_STATE:
+ *event = MHI_DEV_EVENT_M3_STATE;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static void mhi_dev_queue_channel_db(struct mhi_dev *mhi,
+ uint32_t chintr_value, uint32_t ch_num)
+{
+ struct mhi_dev_ring *ring;
+ int rc = 0;
+
+ for (; chintr_value; ch_num++, chintr_value >>= 1) {
+ if (chintr_value & 1) {
+ ring = &mhi->ring[ch_num + mhi->ch_ring_start];
+ if (ring->state == RING_STATE_UINT) {
+ pr_err("Channel not opened for %d\n", ch_num);
+ break;
+ }
+ mhi_ring_set_state(ring, RING_STATE_PENDING);
+ list_add(&ring->list, &mhi->process_ring_list);
+ rc = mhi_dev_mmio_disable_chdb_a7(mhi, ch_num);
+ if (rc) {
+ pr_err("Error disabling chdb\n");
+ return;
+ }
+ queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+ }
+ }
+}
+
+static void mhi_dev_check_channel_interrupt(struct mhi_dev *mhi)
+{
+ int i, rc = 0;
+ uint32_t chintr_value = 0, ch_num = 0;
+
+ rc = mhi_dev_mmio_read_chdb_status_interrupts(mhi);
+ if (rc) {
+ pr_err("Read channel db\n");
+ return;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ ch_num = i * MHI_MASK_CH_EV_LEN;
+ chintr_value = mhi->chdb[i].status;
+ if (chintr_value) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing id: %d, ch interrupt 0x%x\n",
+ i, chintr_value);
+ mhi_dev_queue_channel_db(mhi, chintr_value, ch_num);
+ rc = mhi_dev_mmio_write(mhi, MHI_CHDB_INT_CLEAR_A7_n(i),
+ mhi->chdb[i].status);
+ if (rc) {
+ pr_err("Error writing interrupt clear for A7\n");
+ return;
+ }
+ }
+ }
+}
+
+static void mhi_dev_scheduler(struct work_struct *work)
+{
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, chdb_ctrl_work);
+ int rc = 0;
+ uint32_t int_value = 0;
+ struct mhi_dev_ring *ring;
+ enum mhi_dev_state state;
+ enum mhi_dev_event event = 0;
+
+ mutex_lock(&mhi_ctx->mhi_lock);
+ /* Check for interrupts */
+ mhi_dev_core_ack_ctrl_interrupts(mhi, &int_value);
+
+ if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing ctrl interrupt with %d\n", int_value);
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+
+ rc = mhi_dev_get_event_notify(state, &event);
+ if (rc) {
+ pr_err("unsupported state :%d\n", state);
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+
+ rc = mhi_dev_notify_sm_event(event);
+ if (rc) {
+ pr_err("error sending SM event\n");
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ return;
+ }
+ }
+
+ if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
+ mhi_log(MHI_MSG_ERROR,
+ "processing cmd db interrupt with %d\n", int_value);
+ ring = &mhi->ring[MHI_RING_CMD_ID];
+ ring->state = RING_STATE_PENDING;
+ queue_work(mhi->pending_ring_wq, &mhi->pending_work);
+ }
+
+ /* get the specific channel interrupts */
+ mhi_dev_check_channel_interrupt(mhi);
+
+ mutex_unlock(&mhi_ctx->mhi_lock);
+ ep_pcie_mask_irq_event(mhi->phandle,
+ EP_PCIE_INT_EVT_MHI_A7, true);
+}
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi)
+{
+ schedule_work(&mhi->chdb_ctrl_work);
+ mhi_log(MHI_MSG_ERROR, "mhi irq triggered\n");
+}
+EXPORT_SYMBOL(mhi_dev_notify_a7_event);
+
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi)
+{
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_config_outbound_iatu);
+
+static int mhi_dev_cache_host_cfg(struct mhi_dev *mhi)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+ uint64_t addr1 = 0;
+
+ pdev = mhi->pdev;
+
+ /* Get host memory region configuration */
+ mhi_dev_get_mhi_addr(mhi);
+
+ mhi->ctrl_base.host_pa = HOST_ADDR(mhi->host_addr.ctrl_base_lsb,
+ mhi->host_addr.ctrl_base_msb);
+ mhi->data_base.host_pa = HOST_ADDR(mhi->host_addr.data_base_lsb,
+ mhi->host_addr.data_base_msb);
+
+ addr1 = HOST_ADDR(mhi->host_addr.ctrl_limit_lsb,
+ mhi->host_addr.ctrl_limit_msb);
+ mhi->ctrl_base.size = addr1 - mhi->ctrl_base.host_pa;
+ addr1 = HOST_ADDR(mhi->host_addr.data_limit_lsb,
+ mhi->host_addr.data_limit_msb);
+ mhi->data_base.size = addr1 - mhi->data_base.host_pa;
+
+ /* Get Channel, event and command context base pointer */
+ rc = mhi_dev_mmio_get_chc_base(mhi);
+ if (rc) {
+ pr_err("Fetching channel context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_get_erc_base(mhi);
+ if (rc) {
+ pr_err("Fetching event ring context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_get_crc_base(mhi);
+ if (rc) {
+ pr_err("Fetching command ring context failed\n");
+ return rc;
+ }
+
+ rc = mhi_dev_update_ner(mhi);
+ if (rc) {
+ pr_err("Fetching NER failed\n");
+ return rc;
+ }
+
+ mhi->cmd_ctx_shadow.size = sizeof(struct mhi_dev_cmd_ctx);
+ mhi->ev_ctx_shadow.size = sizeof(struct mhi_dev_ev_ctx) *
+ mhi->cfg.event_rings;
+ mhi->ch_ctx_shadow.size = sizeof(struct mhi_dev_ch_ctx) *
+ mhi->cfg.channels;
+
+ mhi->cmd_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_cmd_ctx),
+ &mhi->cmd_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi->cmd_ctx_cache) {
+ pr_err("no memory while allocating cmd ctx\n");
+ return -ENOMEM;
+ }
+ memset(mhi->cmd_ctx_cache, 0, sizeof(struct mhi_dev_cmd_ctx));
+
+ mhi->ev_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_ev_ctx) *
+ mhi->cfg.event_rings,
+ &mhi->ev_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi->ev_ctx_cache)
+ return -ENOMEM;
+
+ mhi->ch_ctx_cache = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct mhi_dev_ch_ctx) *
+ mhi->cfg.channels,
+ &mhi->ch_ctx_cache_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->ch_ctx_cache)
+ return -ENOMEM;
+
+ /* Cache the command and event context */
+ mhi_dev_read_from_host(&mhi->cmd_ctx_shadow,
+ mhi->cmd_ctx_cache_dma_handle,
+ mhi->cmd_ctx_shadow.size);
+
+ mhi_dev_read_from_host(&mhi->ev_ctx_shadow,
+ mhi->ev_ctx_cache_dma_handle,
+ mhi->ev_ctx_shadow.size);
+
+ mhi_log(MHI_MSG_ERROR,
+ "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi->cmd_ctx_cache->rbase,
+ mhi->cmd_ctx_cache->rp,
+ mhi->cmd_ctx_cache->wp);
+ mhi_log(MHI_MSG_ERROR,
+ "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi_ctx->ev_ctx_cache->rbase,
+ mhi->ev_ctx_cache->rp,
+ mhi->ev_ctx_cache->wp);
+
+ rc = mhi_ring_start(&mhi->ring[0],
+ (union mhi_dev_ring_ctx *)mhi->cmd_ctx_cache, mhi);
+ if (rc) {
+ pr_err("error in ring start\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+int mhi_dev_suspend(struct mhi_dev *mhi)
+{
+ int ch_id = 0, rc = 0;
+ struct mhi_addr host_addr;
+
+ mutex_lock(&mhi_ctx->mhi_write_test);
+ atomic_set(&mhi->is_suspended, 1);
+
+ for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+ if (mhi->ch_ctx_cache[ch_id].ch_state !=
+ MHI_DEV_CH_STATE_RUNNING)
+ continue;
+
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_SUSPENDED;
+
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+
+ }
+
+ rc = ipa_dma_disable();
+ if (rc)
+ pr_err("Disable IPA failed\n");
+
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_suspend);
+
+int mhi_dev_resume(struct mhi_dev *mhi)
+{
+ int ch_id = 0, rc = 0;
+ struct mhi_addr host_addr;
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ pr_err("IPA enable failed\n");
+ return rc;
+ }
+
+ for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
+ if (mhi->ch_ctx_cache[ch_id].ch_state !=
+ MHI_DEV_CH_STATE_SUSPENDED)
+ continue;
+
+ mhi->ch_ctx_cache[ch_id].ch_state = MHI_DEV_CH_STATE_RUNNING;
+ host_addr.host_pa = mhi->ch_ctx_shadow.host_pa +
+ sizeof(struct mhi_dev_ch_ctx) * ch_id;
+
+ /* update the channel state in the host */
+ mhi_dev_write_to_host(&host_addr,
+ &mhi->ch_ctx_cache[ch_id].ch_state,
+ sizeof(enum mhi_dev_ch_ctx_state), mhi);
+ }
+
+ atomic_set(&mhi->is_suspended, 0);
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_resume);
+
+static int mhi_dev_ring_init(struct mhi_dev *dev)
+{
+ int i = 0;
+
+ mhi_log(MHI_MSG_INFO, "initializing all rings");
+ dev->cmd_ring_idx = 0;
+ dev->ev_ring_start = 1;
+ dev->ch_ring_start = dev->ev_ring_start + dev->cfg.event_rings;
+
+ /* Initialize CMD ring */
+ mhi_ring_init(&dev->ring[dev->cmd_ring_idx],
+ RING_TYPE_CMD, dev->cmd_ring_idx);
+
+ mhi_ring_set_cb(&dev->ring[dev->cmd_ring_idx],
+ mhi_dev_process_cmd_ring);
+
+ /* Initialize Event ring */
+ for (i = dev->ev_ring_start; i < (dev->cfg.event_rings
+ + dev->ev_ring_start); i++)
+ mhi_ring_init(&dev->ring[i], RING_TYPE_ER, i);
+
+ /* Initialize CH */
+ for (i = dev->ch_ring_start; i < (dev->cfg.channels
+ + dev->ch_ring_start); i++) {
+ mhi_ring_init(&dev->ring[i], RING_TYPE_CH, i);
+ mhi_ring_set_cb(&dev->ring[i], mhi_dev_process_tre_ring);
+ }
+
+
+ return 0;
+}
+
+int mhi_dev_open_channel(uint32_t chan_id,
+ struct mhi_dev_client **handle_client,
+ void (*mhi_dev_client_cb_reason)
+ (struct mhi_dev_client_cb_reason *cb))
+{
+ int rc = 0;
+ struct mhi_dev_channel *ch;
+ struct platform_device *pdev;
+
+ pdev = mhi_ctx->pdev;
+ ch = &mhi_ctx->ch[chan_id];
+
+ mutex_lock(&ch->ch_lock);
+
+ if (ch->active_client) {
+ mhi_log(MHI_MSG_ERROR,
+ "Channel (%d) already opened by client\n", chan_id);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* Initialize the channel, client and state information */
+ *handle_client = kzalloc(sizeof(struct mhi_dev_client), GFP_KERNEL);
+ if (!(*handle_client)) {
+ dev_err(&pdev->dev, "can not allocate mhi_dev memory\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ ch->active_client = (*handle_client);
+ (*handle_client)->channel = ch;
+ (*handle_client)->event_trigger = mhi_dev_client_cb_reason;
+
+ if (ch->state == MHI_DEV_CH_UNINT) {
+ ch->ring = &mhi_ctx->ring[chan_id + mhi_ctx->ch_ring_start];
+ ch->state = MHI_DEV_CH_PENDING_START;
+ } else if (ch->state == MHI_DEV_CH_CLOSED)
+ ch->state = MHI_DEV_CH_STARTED;
+ else if (ch->state == MHI_DEV_CH_STOPPED)
+ ch->state = MHI_DEV_CH_PENDING_START;
+
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_open_channel);
+
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
+{
+ struct mhi_dev_channel *ch;
+ int rc;
+
+ ch = handle->channel;
+
+ rc = ch->ring->rd_offset == ch->ring->wr_offset;
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_channel_isempty);
+
+int mhi_dev_close_channel(struct mhi_dev_client *handle)
+{
+ struct mhi_dev_channel *ch;
+ int rc = 0;
+
+ ch = handle->channel;
+
+ mutex_lock(&ch->ch_lock);
+ if (ch->state != MHI_DEV_CH_PENDING_START) {
+ if (ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
+ !mhi_dev_channel_isempty(handle)) {
+ mhi_log(MHI_MSG_ERROR,
+ "Trying to close an active channel (%d)\n",
+ ch->ch_id);
+ mutex_unlock(&ch->ch_lock);
+ rc = -EAGAIN;
+ goto exit;
+ } else if (ch->tre_loc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Trying to close channel (%d) when a TRE is active",
+ ch->ch_id);
+ mutex_unlock(&ch->ch_lock);
+ rc = -EAGAIN;
+ goto exit;
+ }
+ }
+
+ ch->state = MHI_DEV_CH_CLOSED;
+ ch->active_client = NULL;
+ kfree(handle);
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_close_channel);
+
+static int mhi_dev_check_tre_bytes_left(struct mhi_dev_channel *ch,
+ struct mhi_dev_ring *ring, union mhi_dev_ring_element_type *el,
+ uint32_t *chain)
+{
+ uint32_t td_done = 0;
+
+ /*
+ * A full TRE worth of data was consumed.
+ * Check if we are at a TD boundary.
+ */
+ if (ch->tre_bytes_left == 0) {
+ if (el->tre.chain) {
+ if (el->tre.ieob)
+ mhi_dev_send_completion_event(ch,
+ ring->rd_offset, el->tre.len,
+ MHI_CMD_COMPL_CODE_EOB);
+ *chain = 1;
+ } else {
+ if (el->tre.ieot)
+ mhi_dev_send_completion_event(
+ ch, ring->rd_offset, el->tre.len,
+ MHI_CMD_COMPL_CODE_EOT);
+ td_done = 1;
+ *chain = 0;
+ }
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ ch->tre_bytes_left = 0;
+ ch->tre_loc = 0;
+ }
+
+ return td_done;
+}
+
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+ void *buf, uint32_t buf_size, uint32_t *chain)
+{
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_ring *ring;
+ union mhi_dev_ring_element_type *el;
+ uint32_t ch_id;
+ size_t bytes_to_read, addr_offset;
+ uint64_t read_from_loc;
+ ssize_t bytes_read = 0;
+ uint32_t write_to_loc = 0;
+ size_t usr_buf_remaining = buf_size;
+ int td_done = 0, rc = 0;
+
+ if (!handle_client) {
+ mhi_log(MHI_MSG_ERROR, "invalid client handle\n");
+ return -ENXIO;
+ }
+
+ ch = handle_client->channel;
+ ring = ch->ring;
+ ch_id = ch->ch_id;
+ *chain = 0;
+
+ mutex_lock(&ch->ch_lock);
+
+ do {
+ el = &ring->ring_cache[ring->rd_offset];
+ if (ch->tre_loc) {
+ bytes_to_read = min(usr_buf_remaining,
+ ch->tre_bytes_left);
+ *chain = 1;
+ mhi_log(MHI_MSG_ERROR,
+ "remaining buffered data size %d\n",
+ (int) ch->tre_bytes_left);
+ } else {
+ if (ring->rd_offset == ring->wr_offset) {
+ mhi_log(MHI_MSG_ERROR,
+ "nothing to read, returning\n");
+ bytes_read = 0;
+ goto exit;
+ }
+
+ if (ch->state == MHI_DEV_CH_STOPPED) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%d) already stopped\n",
+ ch_id);
+ bytes_read = -1;
+ goto exit;
+ }
+
+ ch->tre_loc = el->tre.data_buf_ptr;
+ ch->tre_size = el->tre.len;
+ ch->tre_bytes_left = ch->tre_size;
+
+ mhi_log(MHI_MSG_ERROR,
+ "user_buf_remaining %d, ch->tre_size %d\n",
+ usr_buf_remaining, ch->tre_size);
+ bytes_to_read = min(usr_buf_remaining, ch->tre_size);
+ }
+
+ addr_offset = ch->tre_size - ch->tre_bytes_left;
+ read_from_loc = ch->tre_loc + addr_offset;
+ write_to_loc = (uint32_t) buf + (buf_size - usr_buf_remaining);
+
+ mhi_log(MHI_MSG_ERROR, "reading %d bytes from chan %d\n",
+ bytes_to_read, ch_id);
+
+ mhi_transfer_host_to_device((void *) write_to_loc,
+ read_from_loc, bytes_to_read, mhi_ctx);
+
+ bytes_read += bytes_to_read;
+ ch->tre_bytes_left -= bytes_to_read;
+ usr_buf_remaining -= bytes_to_read;
+ td_done = mhi_dev_check_tre_bytes_left(ch, ring, el, chain);
+ } while (usr_buf_remaining && !td_done);
+
+ if (td_done && ch->state == MHI_DEV_CH_PENDING_STOP) {
+ ch->state = MHI_DEV_CH_STOPPED;
+ rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error while stopping channel (%d)\n", ch_id);
+ bytes_read = -1;
+ }
+ }
+exit:
+ mutex_unlock(&ch->ch_lock);
+ return bytes_read;
+}
+EXPORT_SYMBOL(mhi_dev_read_channel);
+
+static void skip_to_next_td(struct mhi_dev_channel *ch)
+{
+ struct mhi_dev_ring *ring = ch->ring;
+ union mhi_dev_ring_element_type *el;
+ uint32_t td_boundary_reached = 0;
+
+ ch->skip_td = 1;
+ el = &ring->ring_cache[ring->rd_offset];
+ while (ring->rd_offset != ring->wr_offset) {
+ if (td_boundary_reached) {
+ ch->skip_td = 0;
+ break;
+ }
+ if (!el->tre.chain)
+ td_boundary_reached = 1;
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ el = &ring->ring_cache[ring->rd_offset];
+ }
+}
+
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client,
+ void *buf, size_t buf_size)
+{
+ struct mhi_dev_channel *ch;
+ struct mhi_dev_ring *ring;
+ union mhi_dev_ring_element_type *el;
+ enum mhi_dev_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
+ int rc = 0;
+ uint64_t ch_id, skip_tres = 0, write_to_loc;
+ uint32_t read_from_loc;
+ size_t usr_buf_remaining = buf_size;
+ size_t usr_buf_offset = 0;
+ size_t bytes_to_write = 0;
+ size_t bytes_written = 0;
+ uint32_t tre_len = 0, suspend_wait_timeout = 0;
+
+ if (!handle_client) {
+ pr_err("%s: invalid client handle\n", __func__);
+ return -ENXIO;
+ }
+
+ if (!buf) {
+ pr_err("%s: invalid buffer to write data\n", __func__);
+ return -ENXIO;
+ }
+
+ mutex_lock(&mhi_ctx->mhi_write_test);
+
+ if (atomic_read(&mhi_ctx->is_suspended)) {
+ /*
+ * Expected usage is when there is a write
+ * to the MHI core -> notify SM.
+ */
+ rc = mhi_dev_notify_sm_event(MHI_DEV_EVENT_CORE_WAKEUP);
+ if (rc) {
+ pr_err("error sending core wakeup event\n");
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+ return rc;
+ }
+ }
+
+ atomic_inc(&mhi_ctx->write_active);
+ while (atomic_read(&mhi_ctx->is_suspended) &&
+ suspend_wait_timeout < MHI_SUSPEND_WAIT_TIMEOUT) {
+ /* wait for the suspend to finish */
+ usleep_range(MHI_SUSPEND_WAIT_MIN, MHI_SUSPEND_WAIT_MAX);
+ suspend_wait_timeout++;
+ }
+
+ ch = handle_client->channel;
+ ch->wr_request_active = true;
+
+ ring = ch->ring;
+ ch_id = ch->ch_id;
+
+ mutex_lock(&ch->ch_lock);
+
+ if (ch->state == MHI_DEV_CH_STOPPED) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%lld) already stopped\n", ch_id);
+ bytes_written = -1;
+ goto exit;
+ }
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+ if (mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx) < 0)
+ bytes_written = -1;
+ goto exit;
+ }
+
+ if (ch->skip_td)
+ skip_to_next_td(ch);
+
+ do {
+ if (ring->rd_offset == ring->wr_offset) {
+ mhi_log(MHI_MSG_INFO, "No TREs available\n");
+ break;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = el->tre.len;
+
+ bytes_to_write = min(usr_buf_remaining, tre_len);
+ usr_buf_offset = buf_size - bytes_to_write;
+ read_from_loc = (uint32_t) buf + usr_buf_offset;
+ write_to_loc = el->tre.data_buf_ptr;
+ mhi_transfer_device_to_host(write_to_loc,
+ (void *) read_from_loc,
+ bytes_to_write, mhi_ctx);
+ bytes_written += bytes_to_write;
+ usr_buf_remaining -= bytes_to_write;
+
+ if (usr_buf_remaining) {
+ if (!el->tre.chain)
+ code = MHI_CMD_COMPL_CODE_OVERFLOW;
+ else if (el->tre.ieob)
+ code = MHI_CMD_COMPL_CODE_EOB;
+ } else {
+ if (el->tre.chain)
+ skip_tres = 1;
+ code = MHI_CMD_COMPL_CODE_EOT;
+ }
+
+ if (mhi_dev_send_completion_event(ch,
+ ring->rd_offset, bytes_to_write, code) < 0) {
+ mhi_log(MHI_MSG_ERROR,
+ "error sending completion event ch_id:%lld\n",
+ ch_id);
+ }
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP)
+ break;
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ } while (!skip_tres && usr_buf_remaining);
+
+ if (skip_tres)
+ skip_to_next_td(ch);
+
+ if (ch->state == MHI_DEV_CH_PENDING_STOP) {
+ rc = mhi_dev_process_stop_cmd(ring, ch_id, mhi_ctx);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "channel (%lld) stop failed\n", ch_id);
+ }
+ }
+exit:
+ mutex_unlock(&ch->ch_lock);
+ atomic_dec(&mhi_ctx->write_active);
+ mutex_unlock(&mhi_ctx->mhi_write_test);
+ return bytes_written;
+}
+EXPORT_SYMBOL(mhi_dev_write_channel);
+
+static void mhi_dev_enable(struct work_struct *work)
+{
+ int rc = 0;
+ struct ep_pcie_msi_config msi_cfg;
+ struct mhi_dev *mhi = container_of(work,
+ struct mhi_dev, ring_init_cb_work);
+
+ enum mhi_dev_state state;
+ uint32_t max_cnt = 0;
+
+
+ rc = ipa_dma_init();
+ if (rc) {
+ pr_err("ipa dma init failed\n");
+ return;
+ }
+
+ rc = ipa_dma_enable();
+ if (rc) {
+ pr_err("ipa enable failed\n");
+ return;
+ }
+
+ rc = mhi_dev_ring_init(mhi);
+ if (rc) {
+ pr_err("MHI dev ring init failed\n");
+ return;
+ }
+
+ /* Invoke MHI SM when device is in RESET state */
+ mhi_dev_sm_init(mhi);
+
+ /* set the env before setting the ready bit */
+ rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+ if (rc) {
+ pr_err("%s: env setting failed\n", __func__);
+ return;
+ }
+ mhi_uci_init();
+
+ /* All set...let's notify the host */
+ mhi_dev_sm_set_ready();
+
+ rc = ep_pcie_get_msi_config(mhi->phandle, &msi_cfg);
+ if (rc)
+ pr_warn("MHI: error geting msi configs\n");
+
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ return;
+ }
+
+ while (state != MHI_DEV_M0_STATE && max_cnt < MHI_DEV_M0_MAX_CNT) {
+ /* Wait for Host to set the M0 state */
+ usleep_range(MHI_M0_WAIT_MIN_USLEEP, MHI_M0_WAIT_MAX_USLEEP);
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ return;
+ }
+ max_cnt++;
+ }
+
+ mhi_log(MHI_MSG_INFO, "state:%d\n", state);
+
+ if (state == MHI_DEV_M0_STATE) {
+ rc = mhi_dev_cache_host_cfg(mhi);
+ if (rc) {
+ pr_err("Failed to cache the host config\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_set_env(mhi, MHI_ENV_VALUE);
+ if (rc) {
+ pr_err("%s: env setting failed\n", __func__);
+ return;
+ }
+ } else {
+ pr_err("MHI device failed to enter M0\n");
+ return;
+ }
+
+ rc = mhi_hwc_init(mhi_ctx);
+ if (rc) {
+ pr_err("error during hwc_init\n");
+ return;
+ }
+}
+
+static void mhi_ring_init_cb(void *data)
+{
+ struct mhi_dev *mhi = data;
+
+ if (!mhi) {
+ pr_err("Invalid MHI ctx\n");
+ return;
+ }
+
+ queue_work(mhi->ring_init_wq, &mhi->ring_init_cb_work);
+}
+
+static int get_device_tree_data(struct platform_device *pdev)
+{
+ struct mhi_dev *mhi;
+ int rc = 0;
+ struct resource *res_mem = NULL;
+
+ mhi = devm_kzalloc(&pdev->dev,
+ sizeof(struct mhi_dev), GFP_KERNEL);
+ if (!mhi)
+ return -ENOMEM;
+
+ mhi->pdev = pdev;
+ mhi->dev = &pdev->dev;
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "mhi_mmio_base");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request MHI MMIO physical memory region failed\n");
+ return rc;
+ }
+
+ mhi->mmio_base_pa_addr = res_mem->start;
+ mhi->mmio_base_addr = ioremap_nocache(res_mem->start, MHI_1K_SIZE);
+ if (!mhi->mmio_base_addr) {
+ pr_err("Failed to IO map MMIO registers.\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ipa_uc_mbox_crdb");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request IPA_UC_MBOX CRDB physical region failed\n");
+ return rc;
+ }
+
+ mhi->ipa_uc_mbox_crdb = res_mem->start;
+
+ res_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ipa_uc_mbox_erdb");
+ if (!res_mem) {
+ rc = -EINVAL;
+ pr_err("Request IPA_UC_MBOX ERDB physical region failed\n");
+ return rc;
+ }
+
+ mhi->ipa_uc_mbox_erdb = res_mem->start;
+
+ mhi_ctx = mhi;
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-ifc-id",
+ &mhi_ctx->ifc_id);
+
+ if (rc) {
+ pr_err("qcom,mhi-ifc-id does not exist.\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-ep-msi",
+ &mhi_ctx->mhi_ep_msi_num);
+ if (rc) {
+ pr_err("qcom,mhi-ep-msi does not exist.\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,mhi-version",
+ &mhi_ctx->mhi_version);
+ if (rc) {
+ pr_err("qcom,mhi-version does not exist.\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mhi_init(struct mhi_dev *mhi)
+{
+ int rc = 0, i = 0;
+ struct platform_device *pdev = mhi->pdev;
+
+
+ rc = mhi_dev_mmio_init(mhi);
+ if (rc) {
+ pr_err("Failed to update the MMIO init\n");
+ return rc;
+ }
+
+
+ mhi->ring = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mhi_dev_ring) *
+ (mhi->cfg.channels + mhi->cfg.event_rings + 1)),
+ GFP_KERNEL);
+ if (!mhi->ring)
+ return -ENOMEM;
+
+ mhi->ch = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mhi_dev_channel) *
+ (mhi->cfg.channels)), GFP_KERNEL);
+ if (!mhi->ch)
+ return -ENOMEM;
+
+ for (i = 0; i < mhi->cfg.channels; i++)
+ mutex_init(&mhi->ch[i].ch_lock);
+
+ mhi->mmio_backup = devm_kzalloc(&pdev->dev, MHI_DEV_MMIO_RANGE,
+ GFP_KERNEL);
+ if (!mhi->mmio_backup)
+ return -ENOMEM;
+
+ mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0);
+ if (mhi_ipc_log == NULL) {
+ dev_err(&pdev->dev,
+ "Failed to create IPC logging context\n");
+ }
+
+ return 0;
+}
+
+static int mhi_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ if (pdev->dev.of_node) {
+ rc = get_device_tree_data(pdev);
+ if (rc) {
+ pr_err("Error reading MHI Dev DT\n");
+ return rc;
+ }
+ }
+
+ mhi_ctx->phandle = ep_pcie_get_phandle(mhi_ctx->ifc_id);
+ if (!mhi_ctx->phandle) {
+ pr_err("PCIe driver is not ready yet.\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (ep_pcie_get_linkstatus(mhi_ctx->phandle) != EP_PCIE_LINK_ENABLED) {
+ pr_err("PCIe link is not ready to use.\n");
+ return -EPROBE_DEFER;
+ }
+
+ INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
+
+ mhi_ctx->pending_ring_wq = alloc_workqueue("mhi_pending_wq",
+ WQ_HIGHPRI, 0);
+ if (!mhi_ctx->pending_ring_wq) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ INIT_WORK(&mhi_ctx->pending_work, mhi_dev_process_ring_pending);
+
+ INIT_WORK(&mhi_ctx->ring_init_cb_work, mhi_dev_enable);
+
+ mhi_ctx->ring_init_wq = alloc_workqueue("mhi_ring_init_cb_wq",
+ WQ_HIGHPRI, 0);
+ if (!mhi_ctx->ring_init_wq) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&mhi_ctx->event_ring_list);
+ INIT_LIST_HEAD(&mhi_ctx->process_ring_list);
+ mutex_init(&mhi_ctx->mhi_lock);
+ mutex_init(&mhi_ctx->mhi_event_lock);
+ mutex_init(&mhi_ctx->mhi_write_test);
+
+ rc = mhi_init(mhi_ctx);
+ if (rc)
+ return rc;
+
+ mhi_ctx->dma_cache = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 4),
+ &mhi_ctx->cache_dma_handle, GFP_KERNEL);
+ if (!mhi_ctx->dma_cache)
+ return -ENOMEM;
+
+ mhi_ctx->read_handle = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 4),
+ &mhi_ctx->read_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->read_handle)
+ return -ENOMEM;
+
+ mhi_ctx->write_handle = dma_alloc_coherent(&pdev->dev,
+ (TRB_MAX_DATA_SIZE * 24),
+ &mhi_ctx->write_dma_handle,
+ GFP_KERNEL);
+ if (!mhi_ctx->write_handle)
+ return -ENOMEM;
+
+ rc = mhi_dev_mmio_write(mhi_ctx, MHIVER, mhi_ctx->mhi_version);
+ if (rc) {
+ pr_err("Failed to update the MHI version\n");
+ return rc;
+ }
+
+ mhi_ctx->event_reg.events = EP_PCIE_EVENT_PM_D3_HOT |
+ EP_PCIE_EVENT_PM_D3_COLD |
+ EP_PCIE_EVENT_PM_D0 |
+ EP_PCIE_EVENT_PM_RST_DEAST |
+ EP_PCIE_EVENT_MHI_A7 |
+ EP_PCIE_EVENT_LINKDOWN;
+ mhi_ctx->event_reg.user = mhi_ctx;
+ mhi_ctx->event_reg.mode = EP_PCIE_TRIGGER_CALLBACK;
+ mhi_ctx->event_reg.callback = mhi_dev_sm_pcie_handler;
+
+ rc = ep_pcie_register_event(mhi_ctx->phandle, &mhi_ctx->event_reg);
+ if (rc) {
+ pr_err("Failed to register for events from PCIe\n");
+ return rc;
+ }
+
+ pr_err("Registering with IPA\n");
+
+ rc = ipa_register_ipa_ready_cb(mhi_ring_init_cb, mhi_ctx);
+ if (rc < 0) {
+ if (rc == -EEXIST) {
+ mhi_ring_init_cb(mhi_ctx);
+ } else {
+ pr_err("Error calling IPA cb with %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int mhi_dev_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id mhi_dev_match_table[] = {
+ { .compatible = "qcom,msm-mhi-dev" },
+ {}
+};
+
+static struct platform_driver mhi_dev_driver = {
+ .driver = {
+ .name = "qcom,msm-mhi-dev",
+ .of_match_table = mhi_dev_match_table,
+ },
+ .probe = mhi_dev_probe,
+ .remove = mhi_dev_remove,
+};
+
+module_param(mhi_msg_lvl, uint, S_IRUGO | S_IWUSR);
+module_param(mhi_ipc_msg_lvl, uint, S_IRUGO | S_IWUSR);
+
+MODULE_PARM_DESC(mhi_msg_lvl, "mhi msg lvl");
+MODULE_PARM_DESC(mhi_ipc_msg_lvl, "mhi ipc msg lvl");
+
+static int __init mhi_dev_init(void)
+{
+ return platform_driver_register(&mhi_dev_driver);
+}
+module_init(mhi_dev_init);
+
+static void __exit mhi_dev_exit(void)
+{
+ platform_driver_unregister(&mhi_dev_driver);
+}
+module_exit(mhi_dev_exit);
+
+MODULE_DESCRIPTION("MHI device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
new file mode 100644
index 000000000000..6b3c6a8a78b2
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -0,0 +1,1126 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MHI_H
+#define __MHI_H
+
+#include <linux/msm_ep_pcie.h>
+#include <linux/types.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+
+/**
+ * MHI control data structures alloted by the host, including
+ * channel context array, event context array, command context and rings.
+ */
+
+/* Channel context state */
+enum mhi_dev_ch_ctx_state {
+ MHI_DEV_CH_STATE_DISABLED,
+ MHI_DEV_CH_STATE_ENABLED,
+ MHI_DEV_CH_STATE_RUNNING,
+ MHI_DEV_CH_STATE_SUSPENDED,
+ MHI_DEV_CH_STATE_STOP,
+ MHI_DEV_CH_STATE_ERROR,
+ MHI_DEV_CH_STATE_RESERVED,
+ MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
+};
+
+/* Channel type */
+enum mhi_dev_ch_ctx_type {
+ MHI_DEV_CH_TYPE_NONE,
+ MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
+ MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
+ MHI_DEV_CH_RESERVED
+};
+
+/* Channel context type */
+struct mhi_dev_ch_ctx {
+ enum mhi_dev_ch_ctx_state ch_state;
+ enum mhi_dev_ch_ctx_type ch_type;
+ uint32_t err_indx;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+enum mhi_dev_ring_element_type_id {
+ MHI_DEV_RING_EL_INVALID = 0,
+ MHI_DEV_RING_EL_NOOP = 1,
+ MHI_DEV_RING_EL_TRANSFER = 2,
+ MHI_DEV_RING_EL_RESET = 16,
+ MHI_DEV_RING_EL_STOP = 17,
+ MHI_DEV_RING_EL_START = 18,
+ MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
+ MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
+ MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
+ MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
+ MHI_DEV_RING_EL_UNDEF
+};
+
+enum mhi_dev_ring_state {
+ RING_STATE_UINT = 0,
+ RING_STATE_IDLE,
+ RING_STATE_PENDING,
+};
+
+enum mhi_dev_ring_type {
+ RING_TYPE_CMD = 0,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+ RING_TYPE_INVAL
+};
+
+/* Event context interrupt moderation */
+enum mhi_dev_evt_ctx_int_mod_timer {
+ MHI_DEV_EVT_INT_MODERATION_DISABLED
+};
+
+/* Event ring type */
+enum mhi_dev_evt_ctx_event_ring_type {
+ MHI_DEV_EVT_TYPE_DEFAULT,
+ MHI_DEV_EVT_TYPE_VALID,
+ MHI_DEV_EVT_RESERVED
+};
+
+/* Event ring context type */
+struct mhi_dev_ev_ctx {
+ uint32_t res1:16;
+ enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
+ enum mhi_dev_evt_ctx_event_ring_type ertype;
+ uint32_t msivec;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Command context */
+struct mhi_dev_cmd_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* generic context */
+struct mhi_dev_gen_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Transfer ring element */
+struct mhi_dev_transfer_ring_element {
+ uint64_t data_buf_ptr;
+ uint32_t len:16;
+ uint32_t res1:16;
+ uint32_t chain:1;
+ uint32_t res2:7;
+ uint32_t ieob:1;
+ uint32_t ieot:1;
+ uint32_t bei:1;
+ uint32_t res3:5;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res4:8;
+} __packed;
+
+/* Command ring element */
+/* Command ring No op command */
+struct mhi_dev_cmd_ring_op {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring reset channel command */
+struct mhi_dev_cmd_ring_reset_channel_cmd {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring stop channel command */
+struct mhi_dev_cmd_ring_stop_channel_cmd {
+ uint64_t res1;
+ uint32_t res2;
+ uint32_t res3:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command ring start channel command */
+struct mhi_dev_cmd_ring_start_channel_cmd {
+ uint64_t res1;
+ uint32_t seqnum;
+ uint32_t reliable:1;
+ uint32_t res2:15;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+enum mhi_dev_cmd_completion_code {
+ MHI_CMD_COMPL_CODE_INVALID = 0,
+ MHI_CMD_COMPL_CODE_SUCCESS = 1,
+ MHI_CMD_COMPL_CODE_EOT = 2,
+ MHI_CMD_COMPL_CODE_OVERFLOW = 3,
+ MHI_CMD_COMPL_CODE_EOB = 4,
+ MHI_CMD_COMPL_CODE_UNDEFINED = 16,
+ MHI_CMD_COMPL_CODE_RING_EL = 17,
+ MHI_CMD_COMPL_CODE_RES
+};
+
+/* Event ring elements */
+/* Transfer completion event */
+struct mhi_dev_event_ring_transfer_completion {
+ uint64_t ptr;
+ uint32_t len:16;
+ uint32_t res1:8;
+ enum mhi_dev_cmd_completion_code code:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+/* Command completion event */
+struct mhi_dev_event_ring_cmd_completion {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_cmd_completion_code code:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+enum mhi_dev_state {
+ MHI_DEV_RESET_STATE = 0,
+ MHI_DEV_READY_STATE,
+ MHI_DEV_M0_STATE,
+ MHI_DEV_M1_STATE,
+ MHI_DEV_M2_STATE,
+ MHI_DEV_M3_STATE,
+ MHI_DEV_MAX_STATE,
+ MHI_DEV_SYSERR_STATE = 0xff
+};
+
+/* MHI state change event */
+struct mhi_dev_event_ring_state_change {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_state mhistate:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+enum mhi_dev_execenv {
+ MHI_DEV_SBL_EE = 1,
+ MHI_DEV_AMSS_EE = 2,
+ MHI_DEV_UNRESERVED
+};
+
+/* EE state change event */
+struct mhi_dev_event_ring_ee_state_change {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_execenv execenv:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t res3:8;
+} __packed;
+
+/* Generic cmd to parse common details like type and channel id */
+struct mhi_dev_ring_generic {
+ uint64_t ptr;
+ uint32_t res1:24;
+ enum mhi_dev_state mhistate:8;
+ uint32_t res2:16;
+ enum mhi_dev_ring_element_type_id type:8;
+ uint32_t chid:8;
+} __packed;
+
+struct mhi_config {
+ uint32_t mhi_reg_len;
+ uint32_t version;
+ uint32_t event_rings;
+ uint32_t channels;
+ uint32_t chdb_offset;
+ uint32_t erdb_offset;
+};
+
+#define NUM_CHANNELS 128
+#define HW_CHANNEL_BASE 100
+#define HW_CHANNEL_END 107
+#define MHI_ENV_VALUE 2
+#define MHI_MASK_ROWS_CH_EV_DB 4
+#define TRB_MAX_DATA_SIZE 4096
+
+/* Possible ring element types */
+union mhi_dev_ring_element_type {
+ struct mhi_dev_cmd_ring_op cmd_no_op;
+ struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
+ struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
+ struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
+ struct mhi_dev_transfer_ring_element tre;
+ struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
+ struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
+ struct mhi_dev_event_ring_state_change evt_state_change;
+ struct mhi_dev_event_ring_ee_state_change evt_ee_state;
+ struct mhi_dev_ring_generic generic;
+};
+
+/* Transfer ring element type */
+union mhi_dev_ring_ctx {
+ struct mhi_dev_cmd_ctx cmd;
+ struct mhi_dev_ev_ctx ev;
+ struct mhi_dev_ch_ctx ch;
+ struct mhi_dev_gen_ctx generic;
+};
+
+/* MHI host Control and data address region */
+struct mhi_host_addr {
+ uint32_t ctrl_base_lsb;
+ uint32_t ctrl_base_msb;
+ uint32_t ctrl_limit_lsb;
+ uint32_t ctrl_limit_msb;
+ uint32_t data_base_lsb;
+ uint32_t data_base_msb;
+ uint32_t data_limit_lsb;
+ uint32_t data_limit_msb;
+};
+
+/* MHI physical and virtual address region */
+struct mhi_meminfo {
+ struct device *dev;
+ uintptr_t pa_aligned;
+ uintptr_t pa_unaligned;
+ uintptr_t va_aligned;
+ uintptr_t va_unaligned;
+ uintptr_t size;
+};
+
+struct mhi_addr {
+ uint64_t host_pa;
+ uintptr_t device_pa;
+ uintptr_t device_va;
+ uint32_t size;
+};
+
+struct mhi_interrupt_state {
+ uint32_t mask;
+ uint32_t status;
+};
+
+enum mhi_dev_channel_state {
+ MHI_DEV_CH_UNINT,
+ MHI_DEV_CH_STARTED,
+ MHI_DEV_CH_PENDING_START,
+ MHI_DEV_CH_PENDING_STOP,
+ MHI_DEV_CH_STOPPED,
+ MHI_DEV_CH_CLOSED,
+};
+
+enum mhi_dev_ch_operation {
+ MHI_DEV_OPEN_CH,
+ MHI_DEV_CLOSE_CH,
+ MHI_DEV_READ_CH,
+ MHI_DEV_READ_WR,
+ MHI_DEV_POLL,
+};
+
+struct mhi_dev_channel;
+
+struct mhi_dev_ring {
+ struct list_head list;
+ struct mhi_dev *mhi_dev;
+
+ uint32_t id;
+ uint32_t rd_offset;
+ uint32_t wr_offset;
+ uint32_t ring_size;
+
+ enum mhi_dev_ring_type type;
+ enum mhi_dev_ring_state state;
+
+ /* device virtual address location of the cached host ring ctx data */
+ union mhi_dev_ring_element_type *ring_cache;
+ /* Physical address of the cached ring copy on the device side */
+ dma_addr_t ring_cache_dma_handle;
+ /* Physical address of the host where we will write/read to/from */
+ struct mhi_addr ring_shadow;
+ /* Ring type - cmd, event, transfer ring and its rp/wp... */
+ union mhi_dev_ring_ctx *ring_ctx;
+ /* ring_ctx_shadow -> tracking ring_ctx in the host */
+ union mhi_dev_ring_ctx *ring_ctx_shadow;
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el,
+ void *ctx);
+};
+
+static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
+ uint32_t rd_offset)
+{
+ ring->rd_offset++;
+ if (ring->rd_offset == ring->ring_size)
+ ring->rd_offset = 0;
+}
+
+/* trace information planned to use for read/write */
+#define TRACE_DATA_MAX 128
+#define MHI_DEV_DATA_MAX 512
+
+#define MHI_DEV_MMIO_RANGE 0xc80
+
+enum cb_reason {
+ MHI_DEV_TRE_AVAILABLE = 0,
+};
+
+struct mhi_dev_client_cb_reason {
+ uint32_t ch_id;
+ enum cb_reason reason;
+};
+
+struct mhi_dev_client {
+ struct list_head list;
+ struct mhi_dev_channel *channel;
+ void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);
+
+ /* mhi_dev calls are fully synchronous -- only one call may be
+ * active per client at a time for now.
+ */
+ struct mutex write_lock;
+ wait_queue_head_t wait;
+
+ /* trace logs */
+ spinlock_t tr_lock;
+ unsigned tr_head;
+ unsigned tr_tail;
+ struct mhi_dev_trace *tr_log;
+
+ /* client buffers */
+ struct mhi_dev_iov *iov;
+ uint32_t nr_iov;
+};
+
+struct mhi_dev_channel {
+ struct list_head list;
+ struct list_head clients;
+ /* synchronization for changing channel state,
+ * adding/removing clients, mhi_dev callbacks, etc
+ */
+ spinlock_t lock;
+
+ struct mhi_dev_ring *ring;
+
+ enum mhi_dev_channel_state state;
+ uint32_t ch_id;
+ enum mhi_dev_ch_ctx_type ch_type;
+ struct mutex ch_lock;
+ /* client which the current inbound/outbound message is for */
+ struct mhi_dev_client *active_client;
+
+ /* current TRE being processed */
+ uint64_t tre_loc;
+ /* current TRE size */
+ uint32_t tre_size;
+ /* tre bytes left to read/write */
+ uint32_t tre_bytes_left;
+ /* td size being read/written from/to so far */
+ uint32_t td_size;
+ bool wr_request_active;
+ bool skip_td;
+};
+
+/* Structure device for mhi dev */
+struct mhi_dev {
+ struct platform_device *pdev;
+ struct device *dev;
+ /* MHI MMIO related members */
+ phys_addr_t mmio_base_pa_addr;
+ void *mmio_base_addr;
+ phys_addr_t ipa_uc_mbox_crdb;
+ phys_addr_t ipa_uc_mbox_erdb;
+
+ uint32_t *mmio_backup;
+ struct mhi_config cfg;
+ bool mmio_initialized;
+
+ /* Host control base information */
+ struct mhi_host_addr host_addr;
+ struct mhi_addr ctrl_base;
+ struct mhi_addr data_base;
+ struct mhi_addr ch_ctx_shadow;
+ struct mhi_dev_ch_ctx *ch_ctx_cache;
+ dma_addr_t ch_ctx_cache_dma_handle;
+ struct mhi_addr ev_ctx_shadow;
+ struct mhi_dev_ch_ctx *ev_ctx_cache;
+ dma_addr_t ev_ctx_cache_dma_handle;
+
+ struct mhi_addr cmd_ctx_shadow;
+ struct mhi_dev_ch_ctx *cmd_ctx_cache;
+ dma_addr_t cmd_ctx_cache_dma_handle;
+ struct mhi_dev_ring *ring;
+ struct mhi_dev_channel *ch;
+
+ int ctrl_int;
+ int cmd_int;
+ /* CHDB and EVDB device interrupt state */
+ struct mhi_interrupt_state chdb[4];
+ struct mhi_interrupt_state evdb[4];
+
+ /* Scheduler work */
+ struct work_struct chdb_ctrl_work;
+ struct mutex mhi_lock;
+ struct mutex mhi_event_lock;
+
+ /* process a ring element */
+ struct workqueue_struct *pending_ring_wq;
+ struct work_struct pending_work;
+
+ struct list_head event_ring_list;
+ struct list_head process_ring_list;
+
+ uint32_t cmd_ring_idx;
+ uint32_t ev_ring_start;
+ uint32_t ch_ring_start;
+
+ /* IPA Handles */
+ u32 ipa_clnt_hndl[4];
+ struct workqueue_struct *ring_init_wq;
+ struct work_struct ring_init_cb_work;
+
+ /* EP PCIe registration */
+ struct ep_pcie_register_event event_reg;
+ u32 ifc_id;
+ struct ep_pcie_hw *phandle;
+
+ atomic_t write_active;
+ atomic_t is_suspended;
+ struct mutex mhi_write_test;
+ u32 mhi_ep_msi_num;
+ u32 mhi_version;
+ void *dma_cache;
+ void *read_handle;
+ void *write_handle;
+ /* Physical scratch buffer for writing control data to the host */
+ dma_addr_t cache_dma_handle;
+ /*
+ * Physical scratch buffer address used when picking host data
+ * from the host used in mhi_read()
+ */
+ dma_addr_t read_dma_handle;
+ /*
+ * Physical scratch buffer address used when writing to the host
+ * region from device used in mhi_write()
+ */
+ dma_addr_t write_dma_handle;
+};
+
+enum mhi_msg_level {
+ MHI_MSG_VERBOSE = 0x0,
+ MHI_MSG_INFO = 0x1,
+ MHI_MSG_DBG = 0x2,
+ MHI_MSG_WARNING = 0x3,
+ MHI_MSG_ERROR = 0x4,
+ MHI_MSG_CRITICAL = 0x5,
+ MHI_MSG_reserved = 0x80000000
+};
+
+extern enum mhi_msg_level mhi_msg_lvl;
+extern enum mhi_msg_level mhi_ipc_msg_lvl;
+extern void *mhi_ipc_log;
+
+#define mhi_log(_msg_lvl, _msg, ...) do { \
+ if (_msg_lvl >= mhi_msg_lvl) { \
+ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+ } \
+ if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
+ ipc_log_string(mhi_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+/* SW channel client list */
+enum mhi_client_channel {
+ MHI_CLIENT_LOOPBACK_OUT = 0,
+ MHI_CLIENT_LOOPBACK_IN = 1,
+ MHI_CLIENT_SAHARA_OUT = 2,
+ MHI_CLIENT_SAHARA_IN = 3,
+ MHI_CLIENT_DIAG_OUT = 4,
+ MHI_CLIENT_DIAG_IN = 5,
+ MHI_CLIENT_SSR_OUT = 6,
+ MHI_CLIENT_SSR_IN = 7,
+ MHI_CLIENT_QDSS_OUT = 8,
+ MHI_CLIENT_QDSS_IN = 9,
+ MHI_CLIENT_EFS_OUT = 10,
+ MHI_CLIENT_EFS_IN = 11,
+ MHI_CLIENT_MBIM_OUT = 12,
+ MHI_CLIENT_MBIM_IN = 13,
+ MHI_CLIENT_QMI_OUT = 14,
+ MHI_CLIENT_QMI_IN = 15,
+ MHI_CLIENT_IP_CTRL_0_OUT = 16,
+ MHI_CLIENT_IP_CTRL_0_IN = 17,
+ MHI_CLIENT_IP_CTRL_1_OUT = 18,
+ MHI_CLIENT_IP_CTRL_1_IN = 19,
+ MHI_CLIENT_DCI_OUT = 20,
+ MHI_CLIENT_DCI_IN = 21,
+ MHI_CLIENT_IP_CTRL_3_OUT = 22,
+ MHI_CLIENT_IP_CTRL_3_IN = 23,
+ MHI_CLIENT_IP_CTRL_4_OUT = 24,
+ MHI_CLIENT_IP_CTRL_4_IN = 25,
+ MHI_CLIENT_IP_CTRL_5_OUT = 26,
+ MHI_CLIENT_IP_CTRL_5_IN = 27,
+ MHI_CLIENT_IP_CTRL_6_OUT = 28,
+ MHI_CLIENT_IP_CTRL_6_IN = 29,
+ MHI_CLIENT_IP_CTRL_7_OUT = 30,
+ MHI_CLIENT_IP_CTRL_7_IN = 31,
+ MHI_CLIENT_DUN_OUT = 32,
+ MHI_CLIENT_DUN_IN = 33,
+ MHI_CLIENT_IP_SW_0_OUT = 34,
+ MHI_CLIENT_IP_SW_0_IN = 35,
+ MHI_CLIENT_IP_SW_1_OUT = 36,
+ MHI_CLIENT_IP_SW_1_IN = 37,
+ MHI_CLIENT_IP_SW_2_OUT = 38,
+ MHI_CLIENT_IP_SW_2_IN = 39,
+ MHI_CLIENT_IP_SW_3_OUT = 40,
+ MHI_CLIENT_IP_SW_3_IN = 41,
+ MHI_CLIENT_CSVT_OUT = 42,
+ MHI_CLIENT_CSVT_IN = 43,
+ MHI_CLIENT_SMCT_OUT = 44,
+ MHI_CLIENT_SMCT_IN = 45,
+ MHI_MAX_SOFTWARE_CHANNELS = 46,
+ MHI_CLIENT_TEST_OUT = 60,
+ MHI_CLIENT_TEST_IN = 61,
+ MHI_CLIENT_RESERVED_1_LOWER = 62,
+ MHI_CLIENT_RESERVED_1_UPPER = 99,
+ MHI_CLIENT_IP_HW_0_OUT = 100,
+ MHI_CLIENT_IP_HW_0_IN = 101,
+ MHI_CLIENT_RESERVED_2_LOWER = 102,
+ MHI_CLIENT_RESERVED_2_UPPER = 127,
+ MHI_MAX_CHANNELS = 102,
+};
+
+struct mhi_dev_iov {
+ void *addr;
+ uint32_t buf_size;
+};
+
+/**
+ * mhi_dev_open_channel() - Channel open for a given client done prior
+ * to read/write.
+ * @chan_id: Software Channel ID for the assigned client.
+ * @handle_client: Structure device for client handle.
+ * @notifier: Client issued callback notification.
+ */
+int mhi_dev_open_channel(uint32_t chan_id,
+ struct mhi_dev_client **handle_client,
+ void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
+/**
+ * mhi_dev_close_channel() - Channel close for a given client.
+ */
+int mhi_dev_close_channel(struct mhi_dev_client *handle_client);
+
+/**
+ * mhi_dev_read_channel() - Channel read for a given client
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data received
+ * from the Host.
+ * @buf_size: Size of the buffer pointer.
+ * @chain : Indicate if the received data is part of chained packet.
+ */
+int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
+ void *buf, uint32_t buf_size, uint32_t *chain);
+
+/**
+ * mhi_dev_write_channel() - Channel write for a given software client.
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ * @buf: Pointer to the buffer used by the MHI core to copy the data from the
+ * device to the host.
+ * @buf_size: Size of the buffer pointer.
+ */
+int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
+ uint32_t buf_size);
+
+/**
+ * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
+ * @handle_client: Client Handle issued during mhi_dev_open_channel
+ */
+int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
+
+struct mhi_dev_trace {
+ unsigned timestamp;
+ uint32_t data[TRACE_DATA_MAX];
+};
+
+/* MHI Ring related functions */
+
+/**
+ * mhi_ring_init() - Initializes the Ring id to the default un-initialized
+ * state. Once a start command is received, the respective ring
+ * is then prepared by fetching the context and updating the
+ * offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @type: Command/Event or Channel transfer ring.
+ * @id: Index to the ring id. For command its usually 1, Event rings
+ * may vary from 1 to 128. Channels vary from 1 to 256.
+ */
+void mhi_ring_init(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_type type, int id);
+
+/**
+ * mhi_ring_start() - Fetches the respective transfer ring's context from
+ * the host and updates the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @ctx: Transfer ring of type mhi_dev_ring_ctx.
+ * @dev: MHI device structure.
+ */
+int mhi_ring_start(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @wr_offset: Cache the TRE's upto the write offset value.
+ */
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);
+
+/**
+ * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
+ * and invoke the clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_dev_process_ring(struct mhi_dev_ring *ring);
+
+/**
+ * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
+ * clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @offset: Offset index into the respective ring's cache element.
+ */
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
+
+/**
+ * mhi_dev_add_element() - Copy the element to the respective transfer rings
+ * read pointer and increment the index.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @element: Transfer ring element to be copied to the host memory.
+ */
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_element_type *element);
+
+/**
+ * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
+ * from device to the host.
+ * @dst_pa: Physical destination address.
+ * @src: Source virtual address.
+ * @len: Numer of bytes to be transferred.
+ * @mhi: MHI dev structure.
+ */
+int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
+ * from host to the device.
+ * @dst: Physical destination virtual address.
+ * @src_pa: Source physical address.
+ * @len: Numer of bytes to be transferred.
+ * @mhi: MHI dev structure.
+ */
+int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_write_to_host() - memcpy equivalent API to transfer data
+ * from device to host.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be written to the host.
+ * @size: Data buffer size.
+ */
+void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
+ struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ * from host to device.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be read from the host.
+ * @size: Data buffer size.
+ */
+void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);
+
+/**
+ * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
+ * from host to device.
+ * @host: Host and device address details.
+ * @buf: Data buffer that needs to be read from the host.
+ * @size: Data buffer size.
+ */
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el, void *ctx));
+
+/**
+ * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
+ * a ring is being processed, idle or uninitialized.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @state: state of type mhi_dev_ring_state.
+ */
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_state state);
+
+/**
+ * mhi_ring_get_state() - Obtains the internal state of the ring.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);
+
+/* MMIO related functions */
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @reg_val: Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t *reg_value);
+
+/**
+ * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @val: Value to be written to the register offset.
+ */
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t val);
+
+/**
+ * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Register field mask shift value.
+ * @val: Value to be written to the register offset.
+ */
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t val);
+/**
+ * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Register field mask shift value.
+ * @reg_val: Pointer the register value is stored to.
+ */
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t *reg_val);
+/**
+ * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
+ * channel id.
+ * @dev: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+/**
+ * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
+ * channel id.
+ * @dev: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
+
+/**
+ * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
+ * event ring id.
+ * @dev: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
+ * event ring id.
+ * @dev: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
+
+/**
+ * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
+ * interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
+ @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
+ * @dev: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
+
+/**
+ * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
+ * @dev: MHI device structure.
+ * @value: Value of the EXEC EVN.
+ */
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);
+
+/**
+ * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_reset(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
+ * @dev: MHI device structure.
+ * @state: Pointer of type mhi_dev_state
+ */
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state);
+
+/**
+ * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
+ * rings, support number of channels, and offsets to the Channel
+ * and Event doorbell from the host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_mmio_init(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
+ * the host.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_update_ner(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_restore_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_backup_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_dump_mmio(struct mhi_dev *dev);
+
+/**
+ * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
+ * unit between device and host to map the Data and Control
+ * information.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_send_state_change_event() - Send state change event to the host
+ * such as M0/M1/M2/M3.
+ * @dev: MHI device structure.
+ * @state: MHI state of type mhi_dev_state
+ */
+int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
+ enum mhi_dev_state state);
+/**
+ * mhi_dev_send_ee_event() - Send Execution enviornment state change
+ * event to the host.
+ * @dev: MHI device structure.
+ * @state: MHI state of type mhi_dev_execenv
+ */
+int mhi_dev_send_ee_event(struct mhi_dev *mhi,
+ enum mhi_dev_execenv exec_env);
+/**
+ * mhi_dev_syserr() - System error when unexpected events are received.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_syserr(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
+ * Transfer ring boundary, update the channel state to suspended.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_suspend(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_resume() - MHI device resume to update the channel state to running.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_resume(struct mhi_dev *mhi);
+
+/**
+ * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
+ * accelerated data to be send and prevent MHI suspend.
+ * @dev: MHI device structure.
+ */
+int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);
+
+/**
+ * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
+ * context with IPA when performing a MHI resume.
+ * @dev: MHI device structure.
+ */
+int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
+
+/**
+ * mhi_uci_init() - Initializes the User control interface (UCI) which
+ * exposes device nodes for the supported MHI software
+ * channels.
+ */
+int mhi_uci_init(void);
+
+void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
+
+#endif /* _MHI_H_ */
diff --git a/drivers/platform/msm/mhi_dev/mhi_hwio.h b/drivers/platform/msm/mhi_dev/mhi_hwio.h
new file mode 100644
index 000000000000..bcc4095575b3
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_hwio.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MHI_HWIO_
+#define _MHI_HWIO_
+
+/* MHI register definition */
+#define MHI_CTRL_INT_STATUS_A7 (0x0004)
+#define MHI_CTRL_INT_STATUS_A7_STATUS_MASK 0xffffffff
+#define MHI_CTRL_INT_STATUS_A7_STATUS_SHIFT 0x0
+
+#define MHI_CHDB_INT_STATUS_A7_n(n) (0x0028 + 0x4 * (n))
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
+#define MHI_CHDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
+
+#define MHI_ERDB_INT_STATUS_A7_n(n) (0x0038 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_MASK 0xffffffff
+#define MHI_ERDB_INT_STATUS_A7_n_STATUS_SHIFT 0x0
+
+#define MHI_CTRL_INT_CLEAR_A7 (0x004C)
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_MASK 0xffffffff
+#define MHI_CTRL_INT_CLEAR_A7_CLEAR_SHIFT 0x0
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x0070 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
+
+#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x0080 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK 0xffffffff
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_SHIFT 0x0
+
+#define MHI_CTRL_INT_MASK_A7 (0x0094)
+#define MHI_CTRL_INT_MASK_A7_MASK_MASK 0x3
+#define MHI_CTRL_INT_MASK_A7_MASK_SHIFT 0x0
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_MHICTRL_SHFT 0
+#define MHI_CTRL_CRDB_MASK BIT(1)
+#define MHI_CTRL_CRDB_SHFT 1
+
+#define MHI_CHDB_INT_MASK_A7_n(n) (0x00B8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
+#define MHI_CHDB_INT_MASK_A7_n_MASK_SHIFT 0x0
+
+#define MHI_ERDB_INT_MASK_A7_n(n) (0x00C8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_A7_n_MASK_MASK 0xffffffff
+#define MHI_ERDB_INT_MASK_A7_n_MASK_SHIFT 0x0
+
+#define MHIREGLEN (0x0100)
+#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff
+#define MHIREGLEN_MHIREGLEN_SHIFT 0x0
+
+#define MHIVER (0x0108)
+#define MHIVER_MHIVER_MASK 0xffffffff
+#define MHIVER_MHIVER_SHIFT 0x0
+
+#define MHICFG (0x0110)
+#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
+#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
+#define MHICFG_NER_MASK 0xff0000
+#define MHICFG_NER_SHIFT 0x10
+#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
+#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
+#define MHICFG_NCH_MASK 0xff
+#define MHICFG_NCH_SHIFT 0x0
+
+#define CHDBOFF (0x0118)
+#define CHDBOFF_CHDBOFF_MASK 0xffffffff
+#define CHDBOFF_CHDBOFF_SHIFT 0x0
+
+#define ERDBOFF (0x0120)
+#define ERDBOFF_ERDBOFF_MASK 0xffffffff
+#define ERDBOFF_ERDBOFF_SHIFT 0x0
+
+#define BHIOFF (0x0128)
+#define BHIOFF_BHIOFF_MASK 0xffffffff
+#define BHIOFF_BHIOFF_SHIFT 0x0
+
+#define DEBUGOFF (0x0130)
+#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff
+#define DEBUGOFF_DEBUGOFF_SHIFT 0x0
+
+#define MHICTRL (0x0138)
+#define MHICTRL_MHISTATE_MASK 0x0000FF00
+#define MHICTRL_MHISTATE_SHIFT 0x8
+#define MHICTRL_RESET_MASK 0x2
+#define MHICTRL_RESET_SHIFT 0x1
+
+#define MHISTATUS (0x0148)
+#define MHISTATUS_MHISTATE_MASK 0x0000ff00
+#define MHISTATUS_MHISTATE_SHIFT 0x8
+#define MHISTATUS_SYSERR_MASK 0x4
+#define MHISTATUS_SYSERR_SHIFT 0x2
+#define MHISTATUS_READY_MASK 0x1
+#define MHISTATUS_READY_SHIFT 0x0
+
+#define CCABAP_LOWER (0x0158)
+#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0
+
+#define CCABAP_HIGHER (0x015C)
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0
+
+#define ECABAP_LOWER (0x0160)
+#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0
+
+#define ECABAP_HIGHER (0x0164)
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0
+
+#define CRCBAP_LOWER (0x0168)
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0
+
+#define CRCBAP_HIGHER (0x016C)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0
+
+#define CRDB_LOWER (0x0170)
+#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff
+#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0
+
+#define CRDB_HIGHER (0x0174)
+#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0
+
+#define MHICTRLBASE_LOWER (0x0180)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0
+
+#define MHICTRLBASE_HIGHER (0x0184)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0
+
+#define MHICTRLLIMIT_LOWER (0x0188)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0
+
+#define MHICTRLLIMIT_HIGHER (0x018C)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0
+
+#define MHIDATABASE_LOWER (0x0198)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0
+
+#define MHIDATABASE_HIGHER (0x019C)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0
+
+#define MHIDATALIMIT_LOWER (0x01A0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0
+
+#define MHIDATALIMIT_HIGHER (0x01A4)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0
+
+#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n))
+#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff
+#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0
+
+#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n))
+#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff
+#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0
+
+#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n))
+#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff
+#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0
+
+#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n))
+#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff
+#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0
+
+#define BHI_EXECENV (0x228)
+#define BHI_EXECENV_MASK 0xFFFFFFFF
+#define BHI_EXECENV_SHIFT 0
+
+#endif
diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c
new file mode 100644
index 000000000000..12e4a0d4851c
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c
@@ -0,0 +1,999 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+#include "mhi_hwio.h"
+
+int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t *reg_value)
+{
+ void __iomem *addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ addr = dev->mmio_base_addr + offset;
+
+ *reg_value = readl_relaxed(addr);
+
+ pr_debug("reg read:0x%x with value 0x%x\n", offset, *reg_value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read);
+
+int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t val)
+{
+ void __iomem *addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ addr = dev->mmio_base_addr + offset;
+
+ writel_relaxed(val, addr);
+
+ pr_debug("reg write:0x%x with value 0x%x\n", offset, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_write);
+
+int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t val)
+{
+ uint32_t reg_val;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, offset, &reg_val);
+ if (rc) {
+ pr_err("Read error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ reg_val &= ~mask;
+ reg_val |= ((val << shift) & mask);
+
+ rc = mhi_dev_mmio_write(dev, offset, reg_val);
+ if (rc) {
+ pr_err("Write error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_write);
+
+int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
+ uint32_t mask, uint32_t shift,
+ uint32_t *reg_val)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, offset, reg_val);
+ if (rc) {
+ pr_err("Read error failed for offset:0x%x\n", offset);
+ return rc;
+ }
+
+ *reg_val &= mask;
+ *reg_val >>= shift;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_masked_read);
+
+static int mhi_dev_mmio_mask_set_chdb_int_a7(struct mhi_dev *dev,
+ uint32_t chdb_id, bool enable)
+{
+ uint32_t chid_mask, chid_idx, chid_shft, val = 0;
+ int rc = 0;
+
+ chid_shft = chdb_id%32;
+ chid_mask = (1 << chid_shft);
+ chid_idx = chdb_id/32;
+
+ if (enable)
+ val = 1;
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ chid_mask, chid_shft, val);
+ if (rc) {
+ pr_err("Write on channel db interrupt failed\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, true);
+ if (rc) {
+ pr_err("Setting channel DB failed for ch_id:%d\n", chdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_a7);
+
+int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_mask_set_chdb_int_a7(dev, chdb_id, false);
+ if (rc) {
+ pr_err("Disabling channel DB failed for ch_id:%d\n", chdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_chdb_a7);
+
+static int mhi_dev_mmio_set_erdb_int_a7(struct mhi_dev *dev,
+ uint32_t erdb_ch_id, bool enable)
+{
+ uint32_t erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0;
+ int rc = 0;
+
+ erdb_id_shft = erdb_ch_id%32;
+ erdb_id_mask = (1 << erdb_id_shft);
+ erdb_id_idx = erdb_ch_id/32;
+
+ if (enable)
+ val = 1;
+
+ rc = mhi_dev_mmio_masked_write(dev,
+ MHI_ERDB_INT_MASK_A7_n(erdb_id_idx),
+ erdb_id_mask, erdb_id_shft, val);
+ if (rc) {
+ pr_err("Error setting event ring db for %d\n", erdb_ch_id);
+ return rc;
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, true);
+ if (rc) {
+ pr_err("Error setting event ring db for %d\n", erdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_a7);
+
+int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_int_a7(dev, erdb_id, false);
+ if (rc) {
+ pr_err("Error disabling event ring db for %d\n", erdb_id);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7);
+
+int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state)
+{
+ uint32_t reg_value = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICTRL,
+ MHISTATUS_MHISTATE_MASK, MHISTATUS_MHISTATE_SHIFT, state);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, MHICTRL, &reg_value);
+ if (rc)
+ return rc;
+
+ pr_debug("MHICTRL is 0x%x\n", reg_value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_mhi_state);
+
+static int mhi_dev_mmio_set_chdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+ uint32_t mask = 0, i = 0;
+ int rc = 0;
+
+ if (enable)
+ mask = MHI_CHDB_INT_MASK_A7_n_MASK_MASK;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev,
+ MHI_CHDB_INT_MASK_A7_n(i), mask);
+ if (rc) {
+ pr_err("Set channel db on row:%d failed\n", i);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_chdb_interrupts(dev, true);
+ if (rc) {
+ pr_err("Error setting channel db interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_chdb_interrupts);
+
+int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_chdb_interrupts(dev, false);
+ if (rc) {
+ pr_err("Error masking channel db interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_chdb_interrupts);
+
+int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_read(dev,
+ MHI_CHDB_INT_STATUS_A7_n(i), &dev->chdb[i].status);
+ if (rc) {
+ pr_err("Error reading chdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_chdb_status_interrupts);
+
+static int mhi_dev_mmio_set_erdb_interrupts(struct mhi_dev *dev, bool enable)
+{
+ uint32_t mask = 0, i;
+ int rc = 0;
+
+ if (enable)
+ mask = MHI_ERDB_INT_MASK_A7_n_MASK_MASK;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev,
+ MHI_ERDB_INT_MASK_A7_n(i), mask);
+ if (rc) {
+ pr_err("Error setting erdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_interrupts(dev, true);
+ if (rc) {
+ pr_err("Error enabling all erdb interrupts\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_erdb_interrupts);
+
+int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_set_erdb_interrupts(dev, false);
+ if (rc) {
+ pr_err("Error masking all event db interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_mask_erdb_interrupts);
+
+int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_read(dev, MHI_ERDB_INT_STATUS_A7_n(i),
+ &dev->evdb[i].status);
+ if (rc) {
+ pr_err("Error setting erdb status for row:%d\n", i);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_erdb_status_interrupts);
+
+int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 1);
+ if (rc) {
+ pr_err("Error enabling control interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_ctrl_interrupt);
+
+int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK, MHI_CTRL_MHICTRL_SHFT, 0);
+ if (rc) {
+ pr_err("Error disabling control interrupt\n");
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_ctrl_interrupt);
+
+int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->ctrl_int);
+ if (rc) {
+ pr_err("Error reading control status interrupt\n");
+ return rc;
+ }
+
+ dev->ctrl_int &= 0x1;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_ctrl_status_interrupt);
+
+int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHI_CTRL_INT_STATUS_A7, &dev->cmd_int);
+ if (rc) {
+ pr_err("Error reading cmd status register\n");
+ return rc;
+ }
+
+ dev->cmd_int &= 0x10;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_read_cmdb_status_interrupt);
+
+int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 1);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_enable_cmdb_interrupt);
+
+int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_masked_write(dev, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK, MHI_CTRL_CRDB_SHFT, 0);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_disable_cmdb_interrupt);
+
+static void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_disable_ctrl_interrupt(dev);
+ if (rc) {
+ pr_err("Error disabling control interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_disable_cmdb_interrupt(dev);
+ if (rc) {
+ pr_err("Error disabling command db interrupt\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_mask_chdb_interrupts(dev);
+ if (rc) {
+ pr_err("Error masking all channel db interrupts\n");
+ return;
+ }
+
+ rc = mhi_dev_mmio_mask_erdb_interrupts(dev);
+ if (rc) {
+ pr_err("Error masking all erdb interrupts\n");
+ return;
+ }
+}
+
+int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev)
+{
+ uint32_t i = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev, MHI_CHDB_INT_CLEAR_A7_n(i),
+ MHI_CHDB_INT_CLEAR_A7_n_CLEAR_MASK);
+ if (rc)
+ return rc;
+ }
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ rc = mhi_dev_mmio_write(dev, MHI_ERDB_INT_CLEAR_A7_n(i),
+ MHI_ERDB_INT_CLEAR_A7_n_CLEAR_MASK);
+ if (rc)
+ return rc;
+ }
+
+ rc = mhi_dev_mmio_write(dev, MHI_CTRL_INT_CLEAR_A7,
+ MHI_CTRL_INT_CRDB_CLEAR);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_clear_interrupts);
+
+int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev)
+{
+ uint32_t ccabap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, CCABAP_HIGHER, &ccabap_value);
+ if (rc)
+ return rc;
+
+ dev->ch_ctx_shadow.host_pa = ccabap_value;
+ dev->ch_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, CCABAP_LOWER, &ccabap_value);
+ if (rc)
+ return rc;
+
+ dev->ch_ctx_shadow.host_pa |= ccabap_value;
+
+ offset = (uint32_t)(dev->ch_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->ch_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->ch_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_chc_base);
+
+int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev)
+{
+ uint32_t ecabap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, ECABAP_HIGHER, &ecabap_value);
+ if (rc)
+ return rc;
+
+ dev->ev_ctx_shadow.host_pa = ecabap_value;
+ dev->ev_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, ECABAP_LOWER, &ecabap_value);
+ if (rc)
+ return rc;
+
+ dev->ev_ctx_shadow.host_pa |= ecabap_value;
+
+ offset = (uint32_t)(dev->ev_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->ev_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->ev_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_base);
+
+int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev)
+{
+ uint32_t crcbap_value = 0, offset = 0;
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, CRCBAP_HIGHER, &crcbap_value);
+ if (rc)
+ return rc;
+
+ dev->cmd_ctx_shadow.host_pa = crcbap_value;
+ dev->cmd_ctx_shadow.host_pa <<= 32;
+
+ rc = mhi_dev_mmio_read(dev, CRCBAP_LOWER, &crcbap_value);
+ if (rc)
+ return rc;
+
+ dev->cmd_ctx_shadow.host_pa |= crcbap_value;
+
+ offset = (uint32_t)(dev->cmd_ctx_shadow.host_pa -
+ dev->ctrl_base.host_pa);
+
+ dev->cmd_ctx_shadow.device_pa = dev->ctrl_base.device_pa + offset;
+ dev->cmd_ctx_shadow.device_va = dev->ctrl_base.device_va + offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_crc_base);
+
+int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0, ch_start_idx = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ ch_start_idx = ring->mhi_dev->ch_ring_start;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ CHDB_HIGHER_n(ring->id-ch_start_idx), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ CHDB_LOWER_n(ring->id-ch_start_idx), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_ch_db);
+
+int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0, ev_idx_start = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ ev_idx_start = ring->mhi_dev->ev_ring_start;
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ ERDB_HIGHER_n(ring->id - ev_idx_start), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev,
+ ERDB_LOWER_n(ring->id - ev_idx_start), &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_erc_db);
+
+int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset)
+{
+ uint32_t value = 0;
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("Invalid ring context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_HIGHER, &value);
+ if (rc)
+ return rc;
+
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ rc = mhi_dev_mmio_read(ring->mhi_dev, CRDB_LOWER, &value);
+ if (rc)
+ return rc;
+
+ *wr_offset |= value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_get_cmd_db);
+
+int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value)
+{
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_write(dev, BHI_EXECENV, value);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_set_env);
+
+int mhi_dev_mmio_reset(struct mhi_dev *dev)
+{
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_write(dev, MHICTRL, 0);
+ mhi_dev_mmio_write(dev, MHISTATUS, 0);
+ mhi_dev_mmio_clear_interrupts(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_reset);
+
+int mhi_dev_restore_mmio(struct mhi_dev *dev)
+{
+ uint32_t i, reg_cntl_value;
+ void *reg_cntl_addr;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_mask_interrupts(dev);
+
+ for (i = 0; i < (MHI_DEV_MMIO_RANGE/4); i++) {
+ reg_cntl_addr = dev->mmio_base_addr + (i * 4);
+ reg_cntl_value = dev->mmio_backup[i];
+ writel_relaxed(reg_cntl_value, reg_cntl_addr);
+ }
+
+ mhi_dev_mmio_clear_interrupts(dev);
+ mhi_dev_mmio_enable_ctrl_interrupt(dev);
+
+ /* Mask and enable control interrupt */
+ mb();
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_restore_mmio);
+
+int mhi_dev_backup_mmio(struct mhi_dev *dev)
+{
+ uint32_t i = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i++)
+ dev->mmio_backup[i] =
+ readl_relaxed(dev->mmio_base_addr + (i * 4));
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_backup_mmio);
+
+int mhi_dev_get_mhi_addr(struct mhi_dev *dev)
+{
+ uint32_t data_value = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ mhi_dev_mmio_read(dev, MHICTRLBASE_LOWER, &data_value);
+ dev->host_addr.ctrl_base_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLBASE_HIGHER, &data_value);
+ dev->host_addr.ctrl_base_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLLIMIT_LOWER, &data_value);
+ dev->host_addr.ctrl_limit_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHICTRLLIMIT_HIGHER, &data_value);
+ dev->host_addr.ctrl_limit_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATABASE_LOWER, &data_value);
+ dev->host_addr.data_base_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATABASE_HIGHER, &data_value);
+ dev->host_addr.data_base_msb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATALIMIT_LOWER, &data_value);
+ dev->host_addr.data_limit_lsb = data_value;
+
+ mhi_dev_mmio_read(dev, MHIDATALIMIT_HIGHER, &data_value);
+ dev->host_addr.data_limit_msb = data_value;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_get_mhi_addr);
+
+int mhi_dev_mmio_init(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ if (!dev) {
+ pr_err("Invalid MHI dev context\n");
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_mmio_read(dev, MHIREGLEN, &dev->cfg.mhi_reg_len);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+ MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, CHDBOFF, &dev->cfg.chdb_offset);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, ERDBOFF, &dev->cfg.erdb_offset);
+ if (rc)
+ return rc;
+
+ dev->cfg.channels = NUM_CHANNELS;
+
+ if (!dev->mmio_initialized) {
+ rc = mhi_dev_mmio_reset(dev);
+ if (rc) {
+ pr_err("Error resetting MMIO\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_mmio_init);
+
+int mhi_dev_update_ner(struct mhi_dev *dev)
+{
+ int rc = 0;
+
+ rc = mhi_dev_mmio_masked_read(dev, MHICFG, MHICFG_NER_MASK,
+ MHICFG_NER_SHIFT, &dev->cfg.event_rings);
+ if (rc) {
+ pr_err("Error update NER\n");
+ return rc;
+ }
+
+ pr_debug("NER in HW :%d\n", dev->cfg.event_rings);
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_ner);
+
+int mhi_dev_dump_mmio(struct mhi_dev *dev)
+{
+ uint32_t r1, r2, r3, r4, i, offset = 0;
+ int rc = 0;
+
+ for (i = 0; i < MHI_DEV_MMIO_RANGE/4; i += 4) {
+ rc = mhi_dev_mmio_read(dev, offset, &r1);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+4, &r2);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+8, &r3);
+ if (rc)
+ return rc;
+
+ rc = mhi_dev_mmio_read(dev, offset+0xC, &r4);
+ if (rc)
+ return rc;
+
+ offset += 0x10;
+ pr_debug("0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ offset, r1, r2, r3, r4);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_dev_dump_mmio);
diff --git a/drivers/platform/msm/mhi_dev/mhi_ring.c b/drivers/platform/msm/mhi_dev/mhi_ring.c
new file mode 100644
index 000000000000..b7eab1eb8b64
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_ring.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "mhi.h"
+
+static uint32_t mhi_dev_ring_addr2ofst(struct mhi_dev_ring *ring, uint64_t p)
+{
+ uint64_t rbase;
+
+ rbase = ring->ring_ctx->generic.rbase;
+
+ return (p - rbase)/sizeof(union mhi_dev_ring_element_type);
+}
+
+static uint32_t mhi_dev_ring_num_elems(struct mhi_dev_ring *ring)
+{
+ return ring->ring_ctx->generic.rlen/
+ sizeof(union mhi_dev_ring_element_type);
+}
+
+/* fetch ring elements from stat->end, take care of wrap-around case */
+int mhi_dev_fetch_ring_elements(struct mhi_dev_ring *ring,
+ uint32_t start, uint32_t end)
+{
+ struct mhi_addr host_addr;
+
+ host_addr.device_pa = ring->ring_shadow.device_pa
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ host_addr.device_va = ring->ring_shadow.device_va
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ host_addr.host_pa = ring->ring_shadow.host_pa
+ + sizeof(union mhi_dev_ring_element_type) * start;
+ if (start < end) {
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) * start),
+ (end-start) *
+ sizeof(union mhi_dev_ring_element_type));
+ } else if (start > end) {
+ /* copy from 'start' to ring end, then ring start to 'end'*/
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) * start),
+ (ring->ring_size-start) *
+ sizeof(union mhi_dev_ring_element_type));
+ if (end) {
+ /* wrapped around */
+ host_addr.device_pa = ring->ring_shadow.device_pa;
+ host_addr.device_va = ring->ring_shadow.device_va;
+ host_addr.host_pa = ring->ring_shadow.host_pa;
+ mhi_dev_read_from_host(&host_addr,
+ (ring->ring_cache_dma_handle +
+ sizeof(union mhi_dev_ring_element_type) *
+ start),
+ end * sizeof(union mhi_dev_ring_element_type));
+ }
+ }
+
+ return 0;
+}
+
+int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset)
+{
+ uint32_t old_offset = 0;
+ struct mhi_dev *mhi_ctx;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_ctx = ring->mhi_dev;
+
+ if (ring->wr_offset == wr_offset) {
+ mhi_log(MHI_MSG_INFO,
+ "nothing to cache for ring %d, local wr_ofst %d\n",
+ ring->id, ring->wr_offset);
+ mhi_log(MHI_MSG_INFO,
+ "new wr_offset %d\n", wr_offset);
+ return 0;
+ }
+
+ old_offset = ring->wr_offset;
+
+ mhi_log(MHI_MSG_ERROR,
+ "caching - rng size :%d local ofst:%d new ofst: %d\n",
+ (uint32_t) ring->ring_size, old_offset,
+ ring->wr_offset);
+
+ /*
+ * copy the elements starting from old_offset to wr_offset
+ * take in to account wrap around case event rings are not
+ * cached, not required
+ */
+ if (ring->id >= mhi_ctx->ev_ring_start &&
+ ring->id < (mhi_ctx->ev_ring_start +
+ mhi_ctx->cfg.event_rings)) {
+ mhi_log(MHI_MSG_ERROR,
+ "not caching event ring %d\n", ring->id);
+ return 0;
+ }
+
+ mhi_log(MHI_MSG_ERROR, "caching ring %d, start %d, end %d\n",
+ ring->id, old_offset, wr_offset);
+
+ if (mhi_dev_fetch_ring_elements(ring, old_offset, wr_offset)) {
+ mhi_log(MHI_MSG_ERROR,
+ "failed to fetch elements for ring %d, start %d, end %d\n",
+ ring->id, old_offset, wr_offset);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_cache_ring);
+
+int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring)
+{
+ uint64_t wr_offset = 0;
+ uint32_t new_wr_offset = 0;
+ int32_t rc = 0;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (ring->type) {
+ case RING_TYPE_CMD:
+ rc = mhi_dev_mmio_get_cmd_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: CMD DB read failed\n", __func__);
+ return rc;
+ }
+ mhi_log(MHI_MSG_ERROR,
+ "ring %d wr_offset from db 0x%x\n",
+ ring->id, (uint32_t) wr_offset);
+ break;
+ case RING_TYPE_ER:
+ rc = mhi_dev_mmio_get_erc_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: EVT DB read failed\n", __func__);
+ return rc;
+ }
+ break;
+ case RING_TYPE_CH:
+ rc = mhi_dev_mmio_get_ch_db(ring, &wr_offset);
+ if (rc) {
+ pr_err("%s: CH DB read failed\n", __func__);
+ return rc;
+ }
+ mhi_log(MHI_MSG_ERROR,
+ "ring %d wr_offset from db 0x%x\n",
+ ring->id, (uint32_t) wr_offset);
+ break;
+ default:
+ mhi_log(MHI_MSG_ERROR, "invalid ring type\n");
+ return -EINVAL;
+ }
+
+ new_wr_offset = mhi_dev_ring_addr2ofst(ring, wr_offset);
+
+ mhi_dev_cache_ring(ring, new_wr_offset);
+
+ ring->wr_offset = new_wr_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_update_wr_offset);
+
+int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset)
+{
+ union mhi_dev_ring_element_type *el;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ /* get the element and invoke the respective callback */
+ el = &ring->ring_cache[offset];
+
+ if (ring->ring_cb)
+ ring->ring_cb(ring->mhi_dev, el, (void *)ring);
+ else
+ mhi_log(MHI_MSG_INFO, "No callback registered for ring %d\n",
+ ring->id);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring_element);
+
+int mhi_dev_process_ring(struct mhi_dev_ring *ring)
+{
+ int rc = 0;
+
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = mhi_dev_update_wr_offset(ring);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error updating write-offset for ring %d\n",
+ ring->id);
+ return rc;
+ }
+
+ if (ring->type == RING_TYPE_CH) {
+ /* notify the clients that there are elements in the ring */
+ rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+ if (rc)
+ pr_err("Error fetching elements\n");
+ return rc;
+ }
+
+ while (ring->rd_offset != ring->wr_offset) {
+ rc = mhi_dev_process_ring_element(ring, ring->rd_offset);
+ if (rc) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error processing ring (%d) element (%d)\n",
+ ring->id, ring->rd_offset);
+ return rc;
+ }
+
+ mhi_log(MHI_MSG_ERROR,
+ "Processing ring (%d) rd_offset:%d, wr_offset:%d\n",
+ ring->id, ring->rd_offset, ring->wr_offset);
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+ }
+
+ if (!(ring->rd_offset == ring->wr_offset)) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error with the rd offset/wr offset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_process_ring);
+
+int mhi_dev_add_element(struct mhi_dev_ring *ring,
+ union mhi_dev_ring_element_type *element)
+{
+ uint32_t old_offset = 0;
+ struct mhi_addr host_addr;
+
+ if (!ring || !element) {
+ pr_err("%s: Invalid context\n", __func__);
+ return -EINVAL;
+ }
+
+ mhi_dev_update_wr_offset(ring);
+
+ if ((ring->rd_offset + 1) % ring->ring_size == ring->wr_offset) {
+ mhi_log(MHI_MSG_INFO, "ring full to insert element\n");
+ return -EINVAL;
+ }
+
+ old_offset = ring->rd_offset;
+
+ mhi_dev_ring_inc_index(ring, ring->rd_offset);
+
+ ring->ring_ctx->generic.rp = (ring->rd_offset *
+ sizeof(union mhi_dev_ring_element_type)) +
+ ring->ring_ctx->generic.rbase;
+ /*
+ * Write the element, ring_base has to be the
+ * iomap of the ring_base for memcpy
+ */
+ host_addr.host_pa = ring->ring_shadow.host_pa +
+ sizeof(union mhi_dev_ring_element_type) * old_offset;
+ host_addr.device_va = ring->ring_shadow.device_va +
+ sizeof(union mhi_dev_ring_element_type) * old_offset;
+
+ mhi_log(MHI_MSG_ERROR, "adding element to ring (%d)\n", ring->id);
+ mhi_log(MHI_MSG_ERROR, "rd_ofset %d\n", ring->rd_offset);
+ mhi_log(MHI_MSG_ERROR, "type %d\n", element->generic.type);
+
+ mhi_dev_write_to_host(&host_addr, element,
+ sizeof(union mhi_dev_ring_element_type), ring->mhi_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_add_element);
+
+int mhi_ring_start(struct mhi_dev_ring *ring, union mhi_dev_ring_ctx *ctx,
+ struct mhi_dev *mhi)
+{
+ int rc = 0;
+ uint32_t wr_offset = 0;
+ uint32_t offset = 0;
+
+ if (!ring || !ctx || !mhi) {
+ pr_err("%s: Invalid context\n", __func__);
+ return -EINVAL;
+ }
+
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_dev_ring_num_elems(ring);
+ ring->rd_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->wr_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->mhi_dev = mhi;
+
+ mhi_ring_set_state(ring, RING_STATE_IDLE);
+
+ wr_offset = mhi_dev_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.wp);
+
+ ring->ring_cache = dma_alloc_coherent(mhi->dev,
+ ring->ring_size *
+ sizeof(union mhi_dev_ring_element_type),
+ &ring->ring_cache_dma_handle,
+ GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ offset = (uint32_t)(ring->ring_ctx->generic.rbase -
+ mhi->ctrl_base.host_pa);
+
+ ring->ring_shadow.device_pa = mhi->ctrl_base.device_pa + offset;
+ ring->ring_shadow.device_va = mhi->ctrl_base.device_va + offset;
+ ring->ring_shadow.host_pa = mhi->ctrl_base.host_pa + offset;
+
+ if (ring->type == RING_TYPE_ER)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) (mhi->ev_ctx_shadow.device_va +
+ (ring->id - mhi->ev_ring_start) *
+ sizeof(union mhi_dev_ring_ctx));
+ else if (ring->type == RING_TYPE_CMD)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) mhi->cmd_ctx_shadow.device_va;
+ else if (ring->type == RING_TYPE_CH)
+ ring->ring_ctx_shadow =
+ (union mhi_dev_ring_ctx *) (mhi->ch_ctx_shadow.device_va +
+ (ring->id - mhi->ch_ring_start)*sizeof(union mhi_dev_ring_ctx));
+
+
+ ring->ring_ctx_shadow = ring->ring_ctx;
+
+ if (ring->type != RING_TYPE_ER) {
+ rc = mhi_dev_cache_ring(ring, wr_offset);
+ if (rc)
+ return rc;
+ }
+
+ mhi_log(MHI_MSG_ERROR, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
+ (uint32_t)ring->ring_ctx->generic.rbase,
+ (uint32_t)ring->ring_ctx->generic.rp,
+ (uint32_t)ring->ring_ctx->generic.wp);
+ ring->wr_offset = wr_offset;
+
+ return rc;
+}
+EXPORT_SYMBOL(mhi_ring_start);
+
+void mhi_ring_init(struct mhi_dev_ring *ring, enum mhi_dev_ring_type type,
+ int id)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return;
+ }
+
+ ring->id = id;
+ ring->state = RING_STATE_UINT;
+ ring->ring_cb = NULL;
+ ring->type = type;
+}
+EXPORT_SYMBOL(mhi_ring_init);
+
+void mhi_ring_set_cb(struct mhi_dev_ring *ring,
+ void (*ring_cb)(struct mhi_dev *dev,
+ union mhi_dev_ring_element_type *el, void *ctx))
+{
+ if (!ring || !ring_cb) {
+ pr_err("%s: Invalid context\n", __func__);
+ return;
+ }
+
+ ring->ring_cb = ring_cb;
+}
+EXPORT_SYMBOL(mhi_ring_set_cb);
+
+void mhi_ring_set_state(struct mhi_dev_ring *ring,
+ enum mhi_dev_ring_state state)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return;
+ }
+
+ if (state > RING_STATE_PENDING) {
+ pr_err("%s: Invalid ring state\n", __func__);
+ return;
+ }
+
+ ring->state = state;
+}
+EXPORT_SYMBOL(mhi_ring_set_state);
+
+enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring)
+{
+ if (!ring) {
+ pr_err("%s: Invalid ring context\n", __func__);
+ return -EINVAL;
+ }
+
+ return ring->state;
+}
+EXPORT_SYMBOL(mhi_ring_get_state);
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
new file mode 100644
index 000000000000..12a4fb229922
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -0,0 +1,1319 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "mhi_hwio.h"
+#include "mhi_sm.h"
+
+#define MHI_SM_DBG(fmt, args...) \
+ mhi_log(MHI_MSG_DBG, fmt, ##args)
+
+#define MHI_SM_ERR(fmt, args...) \
+ mhi_log(MHI_MSG_ERROR, fmt, ##args)
+
+#define MHI_SM_FUNC_ENTRY() MHI_SM_DBG("ENTRY\n")
+#define MHI_SM_FUNC_EXIT() MHI_SM_DBG("EXIT\n")
+
+
+static inline const char *mhi_sm_dev_event_str(enum mhi_dev_event state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ str = "MHI_DEV_EVENT_CTRL_TRIG";
+ break;
+ case MHI_DEV_EVENT_M0_STATE:
+ str = "MHI_DEV_EVENT_M0_STATE";
+ break;
+ case MHI_DEV_EVENT_M1_STATE:
+ str = "MHI_DEV_EVENT_M1_STATE";
+ break;
+ case MHI_DEV_EVENT_M2_STATE:
+ str = "MHI_DEV_EVENT_M2_STATE";
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ str = "MHI_DEV_EVENT_M3_STATE";
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ str = "MHI_DEV_EVENT_HW_ACC_WAKEUP";
+ break;
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ str = "MHI_DEV_EVENT_CORE_WAKEUP";
+ break;
+ default:
+ str = "INVALID MHI_DEV_EVENT";
+ }
+
+ return str;
+}
+
+static inline const char *mhi_sm_mstate_str(enum mhi_dev_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_DEV_RESET_STATE:
+ str = "RESET";
+ break;
+ case MHI_DEV_READY_STATE:
+ str = "READY";
+ break;
+ case MHI_DEV_M0_STATE:
+ str = "M0";
+ break;
+ case MHI_DEV_M1_STATE:
+ str = "M1";
+ break;
+ case MHI_DEV_M2_STATE:
+ str = "M2";
+ break;
+ case MHI_DEV_M3_STATE:
+ str = "M3";
+ break;
+ case MHI_DEV_SYSERR_STATE:
+ str = "SYSTEM ERROR";
+ break;
+ default:
+ str = "INVALID";
+ break;
+ }
+
+ return str;
+}
+enum mhi_sm_ep_pcie_state {
+ MHI_SM_EP_PCIE_LINK_DISABLE,
+ MHI_SM_EP_PCIE_D0_STATE,
+ MHI_SM_EP_PCIE_D3_HOT_STATE,
+ MHI_SM_EP_PCIE_D3_COLD_STATE,
+};
+
+static inline const char *mhi_sm_dstate_str(enum mhi_sm_ep_pcie_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_SM_EP_PCIE_LINK_DISABLE:
+ str = "EP_PCIE_LINK_DISABLE";
+ break;
+ case MHI_SM_EP_PCIE_D0_STATE:
+ str = "D0_STATE";
+ break;
+ case MHI_SM_EP_PCIE_D3_HOT_STATE:
+ str = "D3_HOT_STATE";
+ break;
+ case MHI_SM_EP_PCIE_D3_COLD_STATE:
+ str = "D3_COLD_STATE";
+ break;
+ default:
+ str = "INVALID D-STATE";
+ break;
+ }
+
+ return str;
+}
+
+static inline const char *mhi_sm_pcie_event_str(enum ep_pcie_event event)
+{
+ const char *str;
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKDOWN:
+ str = "EP_PCIE_LINKDOWN_EVENT";
+ break;
+ case EP_PCIE_EVENT_LINKUP:
+ str = "EP_PCIE_LINKUP_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ str = "EP_PCIE_PM_D3_HOT_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ str = "EP_PCIE_PM_D3_COLD_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ str = "EP_PCIE_PM_RST_DEAST_EVENT";
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ str = "EP_PCIE_PM_D0_EVENT";
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ str = "EP_PCIE_MHI_A7";
+ break;
+ default:
+ str = "INVALID_PCIE_EVENT";
+ break;
+ }
+
+ return str;
+}
+
+/**
+ * struct mhi_sm_device_event - mhi-core event work
+ * @event: mhi core state change event
+ * @work: work struct
+ *
+ * used to add work for mhi state change event to mhi_sm_wq
+ */
+struct mhi_sm_device_event {
+ enum mhi_dev_event event;
+ struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_ep_pcie_event - ep-pcie event work
+ * @event: ep-pcie link state change event
+ * @work: work struct
+ *
+ * used to add work for ep-pcie link state change event to mhi_sm_wq
+ */
+struct mhi_sm_ep_pcie_event {
+ enum ep_pcie_event event;
+ struct work_struct work;
+};
+
+/**
+ * struct mhi_sm_stats - MHI state machine statistics, viewable using debugfs
+ * @m0_event_cnt: total number of MHI_DEV_EVENT_M0_STATE events
+ * @m3_event_cnt: total number of MHI_DEV_EVENT_M3_STATE events
+ * @hw_acc_wakeup_event_cnt: total number of MHI_DEV_EVENT_HW_ACC_WAKEUP events
+ * @mhi_core_wakeup_event_cnt: total number of MHI_DEV_EVENT_CORE_WAKEUP events
+ * @linkup_event_cnt: total number of EP_PCIE_EVENT_LINKUP events
+ * @rst_deast_event_cnt: total number of EP_PCIE_EVENT_PM_RST_DEAST events
+ * @d3_hot_event_cnt: total number of EP_PCIE_EVENT_PM_D3_HOT events
+ * @d3_cold_event_cnt: total number of EP_PCIE_EVENT_PM_D3_COLD events
+ * @d0_event_cnt: total number of EP_PCIE_EVENT_PM_D0 events
+ * @linkdown_event_cnt: total number of EP_PCIE_EVENT_LINKDOWN events
+ */
+struct mhi_sm_stats {
+ int m0_event_cnt;
+ int m3_event_cnt;
+ int hw_acc_wakeup_event_cnt;
+ int mhi_core_wakeup_event_cnt;
+ int linkup_event_cnt;
+ int rst_deast_event_cnt;
+ int d3_hot_event_cnt;
+ int d3_cold_event_cnt;
+ int d0_event_cnt;
+ int linkdown_event_cnt;
+};
+
+/**
+ * struct mhi_sm_dev - MHI state manager context information
+ * @mhi_state: MHI M state of the MHI device
+ * @d_state: EP-PCIe D state of the MHI device
+ * @mhi_dev: MHI device struct pointer
+ * @mhi_state_lock: mutex for mhi_state
+ * @syserr_occurred:flag to indicate if a syserr condition has occurred.
+ * @mhi_sm_wq: workqueue for state change events
+ * @pending_device_events: number of pending mhi state change events in sm_wq
+ * @pending_pcie_events: number of pending mhi state change events in sm_wq
+ * @stats: stats on the handled and pending events
+ */
+struct mhi_sm_dev {
+ enum mhi_dev_state mhi_state;
+ enum mhi_sm_ep_pcie_state d_state;
+ struct mhi_dev *mhi_dev;
+ struct mutex mhi_state_lock;
+ bool syserr_occurred;
+ struct workqueue_struct *mhi_sm_wq;
+ atomic_t pending_device_events;
+ atomic_t pending_pcie_events;
+ struct mhi_sm_stats stats;
+};
+static struct mhi_sm_dev *mhi_sm_ctx;
+
+
+#ifdef CONFIG_DEBUG_FS
+#define MHI_SM_MAX_MSG_LEN 1024
+static char dbg_buff[MHI_SM_MAX_MSG_LEN];
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos);
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos);
+
+const struct file_operations mhi_sm_stats_ops = {
+ .read = mhi_sm_debugfs_read,
+ .write = mhi_sm_debugfs_write,
+};
+
+static void mhi_sm_debugfs_init(void)
+{
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP | S_IWOTH;
+
+ dent = debugfs_create_dir("mhi_sm", 0);
+ if (IS_ERR(dent)) {
+ MHI_SM_ERR("fail to create folder mhi_sm\n");
+ return;
+ }
+
+ dfile_stats =
+ debugfs_create_file("stats", read_write_mode, dent,
+ 0, &mhi_sm_stats_ops);
+ if (!dfile_stats || IS_ERR(dfile_stats)) {
+ MHI_SM_ERR("fail to create file stats\n");
+ goto fail;
+ }
+ return;
+fail:
+ debugfs_remove_recursive(dent);
+}
+
+static void mhi_sm_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(dent);
+}
+#else
+static inline void mhi_sm_debugfs_init(void) {}
+static inline void mhi_sm_debugfs_destroy(void) {}
+#endif /*CONFIG_DEBUG_FS*/
+
+
+static void mhi_sm_mmio_set_mhistatus(enum mhi_dev_state state)
+{
+ struct mhi_dev *dev = mhi_sm_ctx->mhi_dev;
+
+ MHI_SM_FUNC_ENTRY();
+
+ switch (state) {
+ case MHI_DEV_READY_STATE:
+ MHI_SM_DBG("set MHISTATUS to READY mode\n");
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, 1);
+
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_DEV_SYSERR_STATE:
+ MHI_SM_DBG("set MHISTATUS to SYSTEM ERROR mode\n");
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT, 1);
+
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_DEV_M1_STATE:
+ case MHI_DEV_M2_STATE:
+ MHI_SM_ERR("Not supported state, can't set MHISTATUS to %s\n",
+ mhi_sm_mstate_str(state));
+ goto exit;
+ case MHI_DEV_M0_STATE:
+ case MHI_DEV_M3_STATE:
+ MHI_SM_DBG("set MHISTATUS.MHISTATE to %s state\n",
+ mhi_sm_mstate_str(state));
+ mhi_dev_mmio_masked_write(dev, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ default:
+ MHI_SM_ERR("Invalid mhi state: 0x%x state", state);
+ goto exit;
+ }
+
+ mhi_sm_ctx->mhi_state = state;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid
+ * @curr_state: current MHI state
+ * @event: MHI state change event
+ *
+ * Determine according to MHI state management if the state change event
+ * is valid on the current mhi state.
+ * Note: The decision doesn't take into account M1 and M2 states.
+ *
+ * Return: true: transition is valid
+ * false: transition is not valid
+ */
+static bool mhi_sm_is_legal_event_on_state(enum mhi_dev_state curr_state,
+ enum mhi_dev_event event)
+{
+ bool res;
+
+ switch (event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ res = (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D0_STATE &&
+ curr_state != MHI_DEV_RESET_STATE);
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ res = (curr_state == MHI_DEV_M3_STATE ||
+ curr_state == MHI_DEV_M0_STATE);
+ break;
+ default:
+ MHI_SM_ERR("Received invalid event: %s\n",
+ mhi_sm_dev_event_str(event));
+ res = false;
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * mhi_sm_is_legal_pcie_event_on_state() - Determine if EP-PCIe linke state
+ * transition is valid on the current system state.
+ * @curr_mstate: current MHI state
+ * @curr_dstate: current ep-pcie link, d, state
+ * @event: ep-pcie link state change event
+ *
+ * Return: true: transition is valid
+ * false: transition is not valid
+ */
+static bool mhi_sm_is_legal_pcie_event_on_state(enum mhi_dev_state curr_mstate,
+ enum mhi_sm_ep_pcie_state curr_dstate, enum ep_pcie_event event)
+{
+ bool res;
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKUP:
+ case EP_PCIE_EVENT_LINKDOWN:
+ res = true;
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ res = (curr_mstate == MHI_DEV_M3_STATE &&
+ curr_dstate != MHI_SM_EP_PCIE_LINK_DISABLE);
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE);
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ res = (curr_dstate == MHI_SM_EP_PCIE_D0_STATE ||
+ curr_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE);
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ res = true;
+ break;
+ default:
+ MHI_SM_ERR("Invalid ep_pcie event, received: %s\n",
+ mhi_sm_pcie_event_str(event));
+ res = false;
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * mhi_sm_change_to_M0() - switch to M0 state.
+ *
+ * Switch MHI-device state to M0, if possible according to MHI state machine.
+ * Notify the MHI-host on the transition, in case MHI is suspended- resume MHI.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_change_to_M0(void)
+{
+ enum mhi_dev_state old_state;
+ struct ep_pcie_msi_config cfg;
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ old_state = mhi_sm_ctx->mhi_state;
+
+ if (old_state == MHI_DEV_M0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in M0 state\n");
+ res = 0;
+ goto exit;
+ } else if (old_state == MHI_DEV_M3_STATE ||
+ old_state == MHI_DEV_READY_STATE) {
+ /* Retrieve MHI configuration*/
+ res = mhi_dev_config_outbound_iatu(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Fail to configure iATU, returned %d\n",
+ res);
+ goto exit;
+ }
+ res = ep_pcie_get_msi_config(mhi_sm_ctx->mhi_dev->phandle,
+ &cfg);
+ if (res) {
+ MHI_SM_ERR("Error retrieving pcie msi logic\n");
+ goto exit;
+ }
+ res = mhi_pcie_config_db_routing(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Error configuring db routing\n");
+ goto exit;
+
+ }
+ } else {
+ MHI_SM_ERR("unexpected old_state: %s\n",
+ mhi_sm_mstate_str(old_state));
+ goto exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_M0_STATE);
+
+ /* Tell the host, device move to M0 */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_M0_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed to send event %s to host, returned %d\n",
+ mhi_sm_dev_event_str(MHI_DEV_EVENT_M0_STATE), res);
+ goto exit;
+ }
+
+ if (old_state == MHI_DEV_READY_STATE) {
+ /* Tell the host the EE */
+ res = mhi_dev_send_ee_event(mhi_sm_ctx->mhi_dev, 2);
+ if (res) {
+ MHI_SM_ERR("failed sending EE event to host\n");
+ goto exit;
+ }
+ } else if (old_state == MHI_DEV_M3_STATE) {
+ /*Resuming MHI operation*/
+ res = mhi_dev_resume(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Failed resuming mhi core, returned %d",
+ res);
+ goto exit;
+ }
+ res = ipa_mhi_resume();
+ if (res) {
+ MHI_SM_ERR("Failed resuming ipa_mhi, returned %d",
+ res);
+ goto exit;
+ }
+ }
+ res = 0;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_change_to_M3() - switch to M3 state
+ *
+ * Switch MHI-device state to M3, if possible according to MHI state machine.
+ * Suspend MHI traffic and notify the host on the transition.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_change_to_M3(void)
+{
+ enum mhi_dev_state old_state;
+ int res = 0;
+
+ MHI_SM_FUNC_ENTRY();
+
+ old_state = mhi_sm_ctx->mhi_state;
+ if (old_state == MHI_DEV_M3_STATE) {
+ MHI_SM_DBG("Nothing to do, already in M3 state\n");
+ res = 0;
+ goto exit;
+ }
+ /* Suspending MHI operation*/
+ res = mhi_dev_suspend(mhi_sm_ctx->mhi_dev);
+ if (res) {
+ MHI_SM_ERR("Failed to suspend mhi_core, returned %d\n", res);
+ goto exit;
+ }
+ res = ipa_mhi_suspend(true);
+ if (res) {
+ MHI_SM_ERR("Failed to suspend ipa_mhi, returned %d\n", res);
+ goto exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_M3_STATE);
+
+ /* tell the host, device move to M3 */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_M3_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed sendind event: %s to mhi_host\n",
+ mhi_sm_dev_event_str(MHI_DEV_EVENT_M3_STATE));
+ goto exit;
+ }
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_wakeup_host() - wakeup MHI-host
+ *@event: MHI state chenge event
+ *
+ * Sends wekup event to MHI-host via EP-PCIe, in case MHI is in M3 state.
+ *
+ * Return: 0:success
+ * negative: failure
+ */
+static int mhi_sm_wakeup_host(enum mhi_dev_event event)
+{
+ int res = 0;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) {
+ /*
+ * ep_pcie driver is responsible to send the right wakeup
+ * event, assert WAKE#, according to Link state
+ */
+ res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle);
+ if (res) {
+ MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n",
+ res);
+ goto exit;
+ }
+ } else {
+ MHI_SM_DBG("Nothing to do, Host is already awake\n");
+ }
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_handle_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_handle_syserr(void)
+{
+ int res;
+ enum ep_pcie_link_status link_status;
+ bool link_enabled = false;
+
+ MHI_SM_FUNC_ENTRY();
+
+ MHI_SM_ERR("Start handling SYSERR, MHI state: %s and %s",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ if (mhi_sm_ctx->mhi_state == MHI_DEV_SYSERR_STATE) {
+ MHI_SM_DBG("Nothing to do, already in SYSERR state\n");
+ return 0;
+ }
+
+ mhi_sm_ctx->syserr_occurred = true;
+ link_status = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+ if (link_status == EP_PCIE_LINK_DISABLED) {
+ /* try to power on ep-pcie, restore mmio, and wakup host */
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_POWER_ON);
+ if (res) {
+ MHI_SM_ERR("Failed to power on ep-pcie, returned %d\n",
+ res);
+ goto exit;
+ }
+ mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_AST_WAKE | EP_PCIE_OPT_ENUM);
+ if (res) {
+ MHI_SM_ERR("Failed to wakup host and enable ep-pcie\n");
+ goto exit;
+ }
+ }
+
+ link_enabled = true;
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_SYSERR_STATE);
+
+ /* Tell the host, device move to SYSERR state */
+ res = mhi_dev_send_state_change_event(mhi_sm_ctx->mhi_dev,
+ MHI_DEV_SYSERR_STATE);
+ if (res) {
+ MHI_SM_ERR("Failed to send %s state change event to host\n",
+ mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+ goto exit;
+ }
+
+exit:
+ if (!link_enabled)
+ MHI_SM_ERR("EP-PCIE Link is disable cannot set MMIO to %s\n",
+ mhi_sm_mstate_str(MHI_DEV_SYSERR_STATE));
+
+ MHI_SM_ERR("/n/n/nASSERT ON DEVICE !!!!/n/n/n");
+ WARN_ON();
+
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+
+/**
+ * mhi_sm_dev_event_manager() - performs MHI state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs mhi state change
+ * if possible according to MHI state machine
+ */
+static void mhi_sm_dev_event_manager(struct work_struct *work)
+{
+ int res;
+ struct mhi_sm_device_event *chg_event = container_of(work,
+ struct mhi_sm_device_event, work);
+
+ MHI_SM_FUNC_ENTRY();
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ MHI_SM_DBG("Start handling %s event, current states: %s & %s\n",
+ mhi_sm_dev_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ if (mhi_sm_ctx->syserr_occurred) {
+ MHI_SM_DBG("syserr occurred, Ignoring %s\n",
+ mhi_sm_dev_event_str(chg_event->event));
+ goto unlock_and_exit;
+ }
+
+ if (!mhi_sm_is_legal_event_on_state(mhi_sm_ctx->mhi_state,
+ chg_event->event)) {
+ MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+ mhi_sm_dev_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ }
+
+ switch (chg_event->event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ res = mhi_sm_change_to_M0();
+ if (res)
+ MHI_SM_ERR("Failed switching to M0 state\n");
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ res = mhi_sm_change_to_M3();
+ if (res)
+ MHI_SM_ERR("Failed switching to M3 state\n");
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ res = mhi_sm_wakeup_host(chg_event->event);
+ if (res)
+ MHI_SM_ERR("Failed to wakeup MHI host\n");
+ break;
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ case MHI_DEV_EVENT_M1_STATE:
+ case MHI_DEV_EVENT_M2_STATE:
+ MHI_SM_ERR("Error: %s event is not supported\n",
+ mhi_sm_dev_event_str(chg_event->event));
+ break;
+ default:
+ MHI_SM_ERR("Error: Invalid event, 0x%x", chg_event->event);
+ break;
+ }
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ atomic_dec(&mhi_sm_ctx->pending_device_events);
+ kfree(chg_event);
+
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_sm_pcie_event_manager() - performs EP-PCIe linke state change
+ * @work: work_struct used by the work queue
+ *
+ * This function is called from mhi_sm_wq, and performs ep-pcie link state
+ * change if possible according to current system state and MHI state machine
+ */
+static void mhi_sm_pcie_event_manager(struct work_struct *work)
+{
+ int res;
+ enum mhi_sm_ep_pcie_state old_dstate;
+ struct mhi_sm_ep_pcie_event *chg_event = container_of(work,
+ struct mhi_sm_ep_pcie_event, work);
+ enum ep_pcie_event pcie_event = chg_event->event;
+
+ MHI_SM_FUNC_ENTRY();
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ old_dstate = mhi_sm_ctx->d_state;
+
+ MHI_SM_DBG("Start handling %s event, current MHI state %s and %s\n",
+ mhi_sm_pcie_event_str(chg_event->event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(old_dstate));
+
+ if (mhi_sm_ctx->syserr_occurred &&
+ pcie_event != EP_PCIE_EVENT_LINKDOWN) {
+ MHI_SM_DBG("SYSERR occurred. Ignoring %s",
+ mhi_sm_pcie_event_str(pcie_event));
+ goto unlock_and_exit;
+ }
+
+ if (!mhi_sm_is_legal_pcie_event_on_state(mhi_sm_ctx->mhi_state,
+ old_dstate, pcie_event)) {
+ MHI_SM_ERR("%s: illegal in current MHI state: %s and %s\n",
+ mhi_sm_pcie_event_str(pcie_event),
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state),
+ mhi_sm_dstate_str(old_dstate));
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ }
+
+ switch (pcie_event) {
+ case EP_PCIE_EVENT_LINKUP:
+ if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_LINK_DISABLE)
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ case EP_PCIE_EVENT_LINKDOWN:
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("Failed switching to SYSERR state\n");
+ goto unlock_and_exit;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ if (old_dstate == MHI_SM_EP_PCIE_D3_HOT_STATE) {
+ MHI_SM_DBG("cannot move to D3_HOT from D3_COLD\n");
+ break;
+ }
+ /* Backup MMIO is done on the callback function*/
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_HOT_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ if (old_dstate == MHI_SM_EP_PCIE_D3_COLD_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D3_COLD state\n");
+ break;
+ }
+ ep_pcie_disable_endpoint(mhi_sm_ctx->mhi_dev->phandle);
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D3_COLD_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D0 state\n");
+ break;
+ }
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_POWER_ON);
+ if (res) {
+ MHI_SM_ERR("Failed to power on ep_pcie, returned %d\n",
+ res);
+ goto unlock_and_exit;
+ }
+
+ mhi_dev_restore_mmio(mhi_sm_ctx->mhi_dev);
+
+ res = ep_pcie_enable_endpoint(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_OPT_ENUM);
+ if (res) {
+ MHI_SM_ERR("ep-pcie failed to link train, return %d\n",
+ res);
+ goto unlock_and_exit;
+ }
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ if (old_dstate == MHI_SM_EP_PCIE_D0_STATE) {
+ MHI_SM_DBG("Nothing to do, already in D0 state\n");
+ break;
+ }
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ break;
+ default:
+ MHI_SM_ERR("Invalid EP_PCIE event, received 0x%x\n",
+ pcie_event);
+ break;
+ }
+
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ atomic_dec(&mhi_sm_ctx->pending_pcie_events);
+ kfree(chg_event);
+
+ MHI_SM_FUNC_EXIT();
+}
+
+/**
+ * mhi_dev_sm_init() - Initialize MHI state machine.
+ * @mhi_dev: pointer to mhi device instance
+ *
+ * Assuming MHISTATUS register is in RESET state.
+ *
+ * Return: 0 success
+ * -EINVAL: invalid param
+ * -ENOMEM: allocating memory error
+ */
+int mhi_dev_sm_init(struct mhi_dev *mhi_dev)
+{
+ int res;
+ enum ep_pcie_link_status link_state;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_dev) {
+ MHI_SM_ERR("Fail: Null argument\n");
+ return -EINVAL;
+ }
+
+ mhi_sm_ctx = devm_kzalloc(mhi_dev->dev, sizeof(*mhi_sm_ctx),
+ GFP_KERNEL);
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("devm_kzalloc err: mhi_sm_ctx\n");
+ return -ENOMEM;
+ }
+
+ /*init debugfs*/
+ mhi_sm_debugfs_init();
+ mhi_sm_ctx->mhi_sm_wq = create_singlethread_workqueue("mhi_sm_wq");
+ if (!mhi_sm_ctx->mhi_sm_wq) {
+ MHI_SM_ERR("Failed to create singlethread_workqueue: sm_wq\n");
+ res = -ENOMEM;
+ goto fail_init_wq;
+ }
+
+ mutex_init(&mhi_sm_ctx->mhi_state_lock);
+ mhi_sm_ctx->mhi_dev = mhi_dev;
+ mhi_sm_ctx->mhi_state = MHI_DEV_RESET_STATE;
+ mhi_sm_ctx->syserr_occurred = false;
+ atomic_set(&mhi_sm_ctx->pending_device_events, 0);
+ atomic_set(&mhi_sm_ctx->pending_pcie_events, 0);
+
+ link_state = ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle);
+ if (link_state == EP_PCIE_LINK_ENABLED)
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ else
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_LINK_DISABLE;
+
+ MHI_SM_FUNC_EXIT();
+ return 0;
+
+fail_init_wq:
+ mhi_sm_ctx = NULL;
+ mhi_sm_debugfs_destroy();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_init);
+
+/**
+ * mhi_dev_sm_get_mhi_state() -Get current MHI state.
+ * @state: return param
+ *
+ * Returns the current MHI state of the state machine.
+ *
+ * Return: 0 success
+ * -EINVAL: invalid param
+ * -EFAULT: state machine isn't initialized
+ */
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state)
+{
+ MHI_SM_FUNC_ENTRY();
+
+ if (!state) {
+ MHI_SM_ERR("Fail: Null argument\n");
+ return -EINVAL;
+ }
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Fail: MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+ *state = mhi_sm_ctx->mhi_state;
+ MHI_SM_DBG("state machine states are: %s and %s\n",
+ mhi_sm_mstate_str(*state),
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+
+ MHI_SM_FUNC_EXIT();
+ return 0;
+}
+EXPORT_SYMBOL(mhi_dev_sm_get_mhi_state);
+
+/**
+ * mhi_dev_sm_set_ready() -Set MHI state to ready.
+ *
+ * Set MHISTATUS register in mmio to READY.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * EINVAL: mhi state manager is not initialized
+ * EPERM: Operation not permitted as EP PCIE link is desable.
+ * EFAULT: MHI state is not RESET
+ * negative: other failure
+ */
+int mhi_dev_sm_set_ready(void)
+{
+ int res;
+ int is_ready;
+ enum mhi_dev_state state;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM isn't initialized\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ if (mhi_sm_ctx->mhi_state != MHI_DEV_RESET_STATE) {
+ MHI_SM_ERR("Can not switch to READY state from %s state\n",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+ res = -EFAULT;
+ goto unlock_and_exit;
+ }
+
+ if (mhi_sm_ctx->d_state != MHI_SM_EP_PCIE_D0_STATE) {
+ if (ep_pcie_get_linkstatus(mhi_sm_ctx->mhi_dev->phandle) ==
+ EP_PCIE_LINK_ENABLED) {
+ mhi_sm_ctx->d_state = MHI_SM_EP_PCIE_D0_STATE;
+ } else {
+ MHI_SM_ERR("ERROR: ep-pcie link is not enabled\n");
+ res = -EPERM;
+ goto unlock_and_exit;
+ }
+ }
+
+ /* verify that MHISTATUS is configured to RESET*/
+ mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev,
+ MHISTATUS, MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+
+ mhi_dev_mmio_masked_read(mhi_sm_ctx->mhi_dev, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &is_ready);
+
+ if (state != MHI_DEV_RESET_STATE || is_ready) {
+ MHI_SM_ERR("Cannot switch to READY, MHI is not in RESET state");
+ MHI_SM_ERR("-MHISTATE: %s, READY bit: 0x%x\n",
+ mhi_sm_mstate_str(state), is_ready);
+ res = -EFAULT;
+ goto unlock_and_exit;
+ }
+ mhi_sm_mmio_set_mhistatus(MHI_DEV_READY_STATE);
+
+unlock_and_exit:
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_set_ready);
+
+/**
+ * mhi_dev_notify_sm_event() - MHI-core notify SM on trigger occurred
+ * @event - enum of the requierd operation.
+ *
+ * Asynchronic function.
+ * No trigger is sent after operation is done.
+ *
+ * Return: 0: success
+ * -EFAULT: SM isn't initialized or event isn't supported
+ * -ENOMEM: allocating memory error
+ * -EINVAL: invalied event
+ */
+int mhi_dev_notify_sm_event(enum mhi_dev_event event)
+{
+ struct mhi_sm_device_event *state_change_event;
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+
+ MHI_SM_DBG("received: %s\n",
+ mhi_sm_dev_event_str(event));
+
+ switch (event) {
+ case MHI_DEV_EVENT_M0_STATE:
+ mhi_sm_ctx->stats.m0_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_M3_STATE:
+ mhi_sm_ctx->stats.m3_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_HW_ACC_WAKEUP:
+ mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_CORE_WAKEUP:
+ mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt++;
+ break;
+ case MHI_DEV_EVENT_CTRL_TRIG:
+ case MHI_DEV_EVENT_M1_STATE:
+ case MHI_DEV_EVENT_M2_STATE:
+ MHI_SM_ERR("Not supported event: %s\n",
+ mhi_sm_dev_event_str(event));
+ res = -EFAULT;
+ goto exit;
+ default:
+ MHI_SM_ERR("Invalid event, received: 0x%x event\n", event);
+ res = -EINVAL;
+ goto exit;
+ }
+
+ /*init work and push to queue*/
+ state_change_event = kzalloc(sizeof(*state_change_event), GFP_ATOMIC);
+ if (!state_change_event) {
+ MHI_SM_ERR("kzalloc error\n");
+ res = -ENOMEM;
+ goto exit;
+ }
+
+ state_change_event->event = event;
+ INIT_WORK(&state_change_event->work, mhi_sm_dev_event_manager);
+ atomic_inc(&mhi_sm_ctx->pending_device_events);
+ queue_work(mhi_sm_ctx->mhi_sm_wq, &state_change_event->work);
+ res = 0;
+
+exit:
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_notify_sm_event);
+
+/**
+ * mhi_dev_sm_pcie_handler() - handler of ep_pcie events
+ * @notify - pointer to structure contains the ep_pcie event
+ *
+ * Callback function, called by ep_pcie driver to notify on pcie state change
+ * Asynchronic function
+ */
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify)
+{
+ struct mhi_sm_ep_pcie_event *dstate_change_evt;
+ enum ep_pcie_event event;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!notify) {
+ MHI_SM_ERR("Null argument - notify\n");
+ return;
+ }
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return;
+ }
+
+ event = notify->event;
+ MHI_SM_DBG("received: %s\n",
+ mhi_sm_pcie_event_str(event));
+
+ dstate_change_evt = kzalloc(sizeof(*dstate_change_evt), GFP_ATOMIC);
+ if (!dstate_change_evt) {
+ MHI_SM_ERR("kzalloc error\n");
+ goto exit;
+ }
+
+ switch (event) {
+ case EP_PCIE_EVENT_LINKUP:
+ mhi_sm_ctx->stats.linkup_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D3_COLD:
+ mhi_sm_ctx->stats.d3_cold_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D3_HOT:
+ mhi_sm_ctx->stats.d3_hot_event_cnt++;
+ mhi_dev_backup_mmio(mhi_sm_ctx->mhi_dev);
+ break;
+ case EP_PCIE_EVENT_PM_RST_DEAST:
+ mhi_sm_ctx->stats.rst_deast_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_PM_D0:
+ mhi_sm_ctx->stats.d0_event_cnt++;
+ break;
+ case EP_PCIE_EVENT_LINKDOWN:
+ mhi_sm_ctx->stats.linkdown_event_cnt++;
+ mhi_sm_ctx->syserr_occurred = true;
+ MHI_SM_ERR("got %s, ERROR occurred\n",
+ mhi_sm_pcie_event_str(event));
+ break;
+ case EP_PCIE_EVENT_MHI_A7:
+ ep_pcie_mask_irq_event(mhi_sm_ctx->mhi_dev->phandle,
+ EP_PCIE_INT_EVT_MHI_A7, false);
+ mhi_dev_notify_a7_event(mhi_sm_ctx->mhi_dev);
+ goto exit;
+ default:
+ MHI_SM_ERR("Invalid ep_pcie event, received 0x%x event\n",
+ event);
+ kfree(dstate_change_evt);
+ goto exit;
+ }
+
+ dstate_change_evt->event = event;
+ INIT_WORK(&dstate_change_evt->work, mhi_sm_pcie_event_manager);
+ queue_work(mhi_sm_ctx->mhi_sm_wq, &dstate_change_evt->work);
+ atomic_inc(&mhi_sm_ctx->pending_pcie_events);
+
+exit:
+ MHI_SM_FUNC_EXIT();
+}
+EXPORT_SYMBOL(mhi_dev_sm_pcie_handler);
+
+/**
+ * mhi_dev_sm_syserr() - switch to system error state.
+ *
+ * Called on system error condition.
+ * Switch MHI to SYSERR state, notify MHI-host and ASSERT on the device.
+ * Synchronic function.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+int mhi_dev_sm_syserr(void)
+{
+ int res;
+
+ MHI_SM_FUNC_ENTRY();
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Failed, MHI SM is not initialized\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&mhi_sm_ctx->mhi_state_lock);
+ res = mhi_sm_handle_syserr();
+ if (res)
+ MHI_SM_ERR("mhi_sm_handle_syserr failed %d\n", res);
+ mutex_unlock(&mhi_sm_ctx->mhi_state_lock);
+
+ MHI_SM_FUNC_EXIT();
+ return res;
+}
+EXPORT_SYMBOL(mhi_dev_sm_syserr);
+
+static ssize_t mhi_sm_debugfs_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int nbytes = 0;
+
+ if (!mhi_sm_ctx) {
+ nbytes = scnprintf(dbg_buff, MHI_SM_MAX_MSG_LEN,
+ "Not initialized\n");
+ } else {
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "*************** MHI State machine status ***************\n");
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D state: %s\n",
+ mhi_sm_dstate_str(mhi_sm_ctx->d_state));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M state: %s\n",
+ mhi_sm_mstate_str(mhi_sm_ctx->mhi_state));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "pending device events: %d\n",
+ atomic_read(&mhi_sm_ctx->pending_device_events));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "pending pcie events: %d\n",
+ atomic_read(&mhi_sm_ctx->pending_pcie_events));
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "*************** Statistics ***************\n");
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M0 events: %d\n", mhi_sm_ctx->stats.m0_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "M3 events: %d\n", mhi_sm_ctx->stats.m3_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "HW_ACC wakeup events: %d\n",
+ mhi_sm_ctx->stats.hw_acc_wakeup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "CORE wakeup events: %d\n",
+ mhi_sm_ctx->stats.mhi_core_wakeup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "Linkup events: %d\n",
+ mhi_sm_ctx->stats.linkup_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "De-assert PERST events: %d\n",
+ mhi_sm_ctx->stats.rst_deast_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D0 events: %d\n",
+ mhi_sm_ctx->stats.d0_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D3_HOT events: %d\n",
+ mhi_sm_ctx->stats.d3_hot_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "D3_COLD events:%d\n",
+ mhi_sm_ctx->stats.d3_cold_event_cnt);
+ nbytes += scnprintf(dbg_buff + nbytes,
+ MHI_SM_MAX_MSG_LEN - nbytes,
+ "Linkdown events: %d\n",
+ mhi_sm_ctx->stats.linkdown_event_cnt);
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t mhi_sm_debugfs_write(struct file *file,
+ const char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ unsigned long missing;
+ s8 in_num = 0;
+
+ if (!mhi_sm_ctx) {
+ MHI_SM_ERR("Not initialized\n");
+ return -EFAULT;
+ }
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &in_num))
+ return -EFAULT;
+
+ switch (in_num) {
+ case 0:
+ if (atomic_read(&mhi_sm_ctx->pending_device_events) ||
+ atomic_read(&mhi_sm_ctx->pending_pcie_events))
+ MHI_SM_DBG("Note, there are pending events in sm_wq\n");
+
+ memset(&mhi_sm_ctx->stats, 0, sizeof(struct mhi_sm_stats));
+ break;
+ default:
+ MHI_SM_ERR("invalid argument: To reset statistics echo 0\n");
+ break;
+ }
+
+ return count;
+}
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.h b/drivers/platform/msm/mhi_dev/mhi_sm.h
new file mode 100644
index 000000000000..ebf465e1cc43
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MHI_SM_H
+#define MHI_SM_H
+
+#include "mhi.h"
+#include <linux/slab.h>
+#include <linux/msm_ep_pcie.h>
+
+
+/**
+ * enum mhi_dev_event - MHI state change events
+ * @MHI_DEV_EVENT_CTRL_TRIG: CTRL register change event.
+ * Not supported,for future use
+ * @MHI_DEV_EVENT_M0_STATE: M0 state change event
+ * @MHI_DEV_EVENT_M1_STATE: M1 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M2_STATE: M2 state change event. Not supported, for future use
+ * @MHI_DEV_EVENT_M3_STATE: M0 state change event
+ * @MHI_DEV_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup
+ * @MHI_DEV_EVENT_CORE_WAKEUP: MHI core initiate Host wakup
+ */
+enum mhi_dev_event {
+ MHI_DEV_EVENT_CTRL_TRIG,
+ MHI_DEV_EVENT_M0_STATE,
+ MHI_DEV_EVENT_M1_STATE,
+ MHI_DEV_EVENT_M2_STATE,
+ MHI_DEV_EVENT_M3_STATE,
+ MHI_DEV_EVENT_HW_ACC_WAKEUP,
+ MHI_DEV_EVENT_CORE_WAKEUP,
+ MHI_DEV_EVENT_MAX
+};
+
+int mhi_dev_sm_init(struct mhi_dev *dev);
+int mhi_dev_sm_set_ready(void);
+int mhi_dev_notify_sm_event(enum mhi_dev_event event);
+int mhi_dev_sm_get_mhi_state(enum mhi_dev_state *state);
+int mhi_dev_sm_syserr(void);
+void mhi_dev_sm_pcie_handler(struct ep_pcie_notify *notify);
+
+#endif /* MHI_SM_H */
+
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
new file mode 100644
index 000000000000..64b5e7a73ef5
--- /dev/null
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -0,0 +1,835 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/ipc_logging.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ipa.h>
+#include <linux/ipa.h>
+#include <uapi/linux/mhi.h>
+#include "mhi.h"
+
+#define MHI_DEV_NODE_NAME_LEN 13
+#define MHI_MAX_NR_OF_CLIENTS 23
+#define MHI_SOFTWARE_CLIENT_START 0
+#define MHI_SOFTWARE_CLIENT_LIMIT (MHI_MAX_SOFTWARE_CHANNELS/2)
+#define MHI_UCI_IPC_LOG_PAGES (100)
+
+#define MAX_NR_TRBS_PER_CHAN 1
+#define MHI_QTI_IFACE_ID 4
+#define DEVICE_NAME "mhi"
+
+enum uci_dbg_level {
+ UCI_DBG_VERBOSE = 0x0,
+ UCI_DBG_INFO = 0x1,
+ UCI_DBG_DBG = 0x2,
+ UCI_DBG_WARNING = 0x3,
+ UCI_DBG_ERROR = 0x4,
+ UCI_DBG_CRITICAL = 0x5,
+ UCI_DBG_reserved = 0x80000000
+};
+
+static enum uci_dbg_level mhi_uci_msg_lvl = UCI_DBG_CRITICAL;
+static enum uci_dbg_level mhi_uci_ipc_log_lvl = UCI_DBG_INFO;
+static void *mhi_uci_ipc_log;
+
+
+enum mhi_chan_dir {
+ MHI_DIR_INVALID = 0x0,
+ MHI_DIR_OUT = 0x1,
+ MHI_DIR_IN = 0x2,
+ MHI_DIR__reserved = 0x80000000
+};
+
+struct chan_attr {
+ /* SW maintained channel id */
+ enum mhi_client_channel chan_id;
+ /* maximum buffer size for this channel */
+ size_t max_packet_size;
+ /* number of buffers supported in this channel */
+ u32 nr_trbs;
+ /* direction of the channel, see enum mhi_chan_dir */
+ enum mhi_chan_dir dir;
+ u32 uci_ownership;
+};
+
+struct uci_client {
+ u32 client_index;
+ /* write channel - always odd*/
+ u32 out_chan;
+ /* read channel - always even */
+ u32 in_chan;
+ struct mhi_dev_client *out_handle;
+ struct mhi_dev_client *in_handle;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ atomic_t read_data_ready;
+ struct device *dev;
+ atomic_t ref_count;
+ int mhi_status;
+ void *pkt_loc;
+ size_t pkt_size;
+ struct mhi_dev_iov *in_buf_list;
+ atomic_t write_data_ready;
+ atomic_t mhi_chans_open;
+ struct mhi_uci_ctxt_t *uci_ctxt;
+ struct mutex in_chan_lock;
+ struct mutex out_chan_lock;
+};
+
+struct mhi_uci_ctxt_t {
+ struct chan_attr chan_attrib[MHI_MAX_SOFTWARE_CHANNELS];
+ struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
+ void (*event_notifier)(struct mhi_dev_client_cb_reason *cb);
+ dev_t start_ctrl_nr;
+ struct cdev cdev[MHI_MAX_SOFTWARE_CHANNELS];
+ struct class *mhi_uci_class;
+ atomic_t mhi_disabled;
+ atomic_t mhi_enable_notif_wq_active;
+};
+
+#define CHAN_TO_CLIENT(_CHAN_NR) (_CHAN_NR / 2)
+
+#define uci_log(_msg_lvl, _msg, ...) do { \
+ if (_msg_lvl >= mhi_uci_msg_lvl) { \
+ pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
+ } \
+ if (mhi_uci_ipc_log && (_msg_lvl >= mhi_uci_ipc_log_lvl)) { \
+ ipc_log_string(mhi_uci_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+
+module_param(mhi_uci_msg_lvl, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mhi_uci_msg_lvl, "uci dbg lvl");
+
+module_param(mhi_uci_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(mhi_uci_ipc_log_lvl, "ipc dbg lvl");
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t mhi_uci_client_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *offp);
+static int mhi_uci_client_open(struct inode *mhi_inode, struct file*);
+static int mhi_uci_client_release(struct inode *mhi_inode,
+ struct file *file_handle);
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait);
+static struct mhi_uci_ctxt_t uci_ctxt;
+
+static int mhi_init_read_chan(struct uci_client *client_handle,
+ enum mhi_client_channel chan)
+{
+ int rc = 0;
+ u32 i, j;
+ struct chan_attr *chan_attributes;
+ size_t buf_size;
+ void *data_loc;
+
+ if (client_handle == NULL) {
+ uci_log(UCI_DBG_ERROR, "Bad Input data, quitting\n");
+ return -EINVAL;
+ }
+ if (chan >= MHI_MAX_SOFTWARE_CHANNELS) {
+ uci_log(UCI_DBG_ERROR, "Incorrect channel number %d\n", chan);
+ return -EINVAL;
+ }
+
+ chan_attributes = &uci_ctxt.chan_attrib[chan];
+ buf_size = chan_attributes->max_packet_size;
+
+ for (i = 0; i < (chan_attributes->nr_trbs); i++) {
+ data_loc = kmalloc(buf_size, GFP_KERNEL);
+ if (!data_loc) {
+ rc = -ENOMEM;
+ goto free_memory;
+ }
+ client_handle->in_buf_list[i].addr = data_loc;
+ client_handle->in_buf_list[i].buf_size = buf_size;
+ }
+
+ return rc;
+
+free_memory:
+ for (j = 0; j < i; j++)
+ kfree(client_handle->in_buf_list[j].addr);
+
+ return rc;
+}
+
+static int mhi_uci_send_packet(struct mhi_dev_client **client_handle, void *buf,
+ u32 size, u32 is_uspace_buf)
+{
+ void *data_loc = NULL;
+ uintptr_t memcpy_result = 0;
+ u32 data_inserted_so_far = 0;
+ struct uci_client *uci_handle;
+
+ uci_handle = container_of(client_handle, struct uci_client,
+ out_handle);
+
+ if (!client_handle || !buf ||
+ !size || !uci_handle)
+ return -EINVAL;
+
+ if (is_uspace_buf) {
+ data_loc = kmalloc(size, GFP_KERNEL);
+ if (!data_loc) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to allocate memory 0x%x\n",
+ size);
+ return -ENOMEM;
+ }
+ memcpy_result = copy_from_user(data_loc, buf, size);
+ if (memcpy_result)
+ goto error_memcpy;
+ } else {
+ data_loc = buf;
+ }
+
+ data_inserted_so_far = mhi_dev_write_channel(*client_handle, data_loc,
+ size);
+
+error_memcpy:
+ kfree(data_loc);
+ return data_inserted_so_far;
+}
+
+static unsigned int mhi_uci_client_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct uci_client *uci_handle;
+
+ uci_handle = file->private_data;
+
+ if (!uci_handle)
+ return -ENODEV;
+
+ poll_wait(file, &uci_handle->read_wq, wait);
+ poll_wait(file, &uci_handle->write_wq, wait);
+ if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+ !mhi_dev_channel_isempty(uci_handle->in_handle)) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Client can read chan %d\n", uci_handle->in_chan);
+ mask |= POLLIN | POLLRDNORM;
+ }
+ if (!atomic_read(&uci_ctxt.mhi_disabled) &&
+ !mhi_dev_channel_isempty(uci_handle->out_handle)) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Client can write chan %d\n", uci_handle->out_chan);
+ mask |= POLLOUT | POLLWRNORM;
+ }
+
+ uci_log(UCI_DBG_VERBOSE,
+ "Client attempted to poll chan %d, returning mask 0x%x\n",
+ uci_handle->in_chan, mask);
+ return mask;
+}
+
+static int open_client_mhi_channels(struct uci_client *uci_client)
+{
+ int rc = 0;
+
+ uci_log(UCI_DBG_DBG,
+ "Starting channels %d %d.\n",
+ uci_client->out_chan,
+ uci_client->in_chan);
+ mutex_lock(&uci_client->out_chan_lock);
+ mutex_lock(&uci_client->in_chan_lock);
+ uci_log(UCI_DBG_DBG,
+ "Initializing inbound chan %d.\n",
+ uci_client->in_chan);
+
+ rc = mhi_init_read_chan(uci_client, uci_client->in_chan);
+ if (rc < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to init inbound 0x%x, ret 0x%x\n",
+ uci_client->in_chan, rc);
+ }
+
+ rc = mhi_dev_open_channel(uci_client->out_chan,
+ &uci_client->out_handle,
+ uci_ctxt.event_notifier);
+ if (rc < 0)
+ goto handle_not_rdy_err;
+
+ rc = mhi_dev_open_channel(uci_client->in_chan,
+ &uci_client->in_handle,
+ uci_ctxt.event_notifier);
+
+ if (rc < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to open chan %d, ret 0x%x\n",
+ uci_client->out_chan, rc);
+ goto handle_in_err;
+ }
+ atomic_set(&uci_client->mhi_chans_open, 1);
+ mutex_unlock(&uci_client->in_chan_lock);
+ mutex_unlock(&uci_client->out_chan_lock);
+
+ return 0;
+
+handle_in_err:
+ mhi_dev_close_channel(uci_client->out_handle);
+handle_not_rdy_err:
+ mutex_unlock(&uci_client->in_chan_lock);
+ mutex_unlock(&uci_client->out_chan_lock);
+ return rc;
+}
+
+static int mhi_uci_client_open(struct inode *mhi_inode,
+ struct file *file_handle)
+{
+ struct uci_client *uci_handle;
+ int rc = 0;
+
+ uci_handle =
+ &uci_ctxt.client_handles[iminor(mhi_inode)];
+
+ uci_log(UCI_DBG_DBG,
+ "Client opened struct device node 0x%x, ref count 0x%x\n",
+ iminor(mhi_inode), atomic_read(&uci_handle->ref_count));
+ if (atomic_add_return(1, &uci_handle->ref_count) == 1) {
+ if (!uci_handle) {
+ atomic_dec(&uci_handle->ref_count);
+ return -ENOMEM;
+ }
+ uci_handle->uci_ctxt = &uci_ctxt;
+ if (!atomic_read(&uci_handle->mhi_chans_open)) {
+ uci_log(UCI_DBG_INFO,
+ "Opening channels client %d\n",
+ iminor(mhi_inode));
+ rc = open_client_mhi_channels(uci_handle);
+ if (rc) {
+ uci_log(UCI_DBG_INFO,
+ "Failed to open channels ret %d\n", rc);
+ return rc;
+ }
+ }
+ }
+ file_handle->private_data = uci_handle;
+
+ return 0;
+
+}
+
+static int mhi_uci_client_release(struct inode *mhi_inode,
+ struct file *file_handle)
+{
+ struct uci_client *uci_handle = file_handle->private_data;
+ struct mhi_uci_ctxt_t *uci_ctxt = uci_handle->uci_ctxt;
+ u32 nr_in_bufs = 0;
+ int rc = 0;
+ int in_chan = 0;
+ u32 buf_size = 0;
+
+ in_chan = iminor(mhi_inode) + 1;
+ nr_in_bufs = uci_ctxt->chan_attrib[in_chan].nr_trbs;
+ buf_size = uci_ctxt->chan_attrib[in_chan].max_packet_size;
+
+ if (!uci_handle)
+ return -EINVAL;
+ if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
+ uci_log(UCI_DBG_DBG,
+ "Last client left, closing channel 0x%x\n",
+ iminor(mhi_inode));
+ if (atomic_read(&uci_handle->mhi_chans_open)) {
+ atomic_set(&uci_handle->mhi_chans_open, 0);
+
+ mutex_lock(&uci_handle->out_chan_lock);
+ rc = mhi_dev_close_channel(uci_handle->out_handle);
+ wake_up(&uci_handle->write_wq);
+ mutex_unlock(&uci_handle->out_chan_lock);
+
+ mutex_lock(&uci_handle->in_chan_lock);
+ rc = mhi_dev_close_channel(uci_handle->in_handle);
+ wake_up(&uci_handle->read_wq);
+ mutex_unlock(&uci_handle->in_chan_lock);
+
+ }
+ atomic_set(&uci_handle->read_data_ready, 0);
+ atomic_set(&uci_handle->write_data_ready, 0);
+ file_handle->private_data = NULL;
+ } else {
+ uci_log(UCI_DBG_DBG,
+ "Client close chan %d, ref count 0x%x\n",
+ iminor(mhi_inode),
+ atomic_read(&uci_handle->ref_count));
+ }
+ return rc;
+}
+
+static ssize_t mhi_uci_client_read(struct file *file, char __user *buf,
+ size_t uspace_buf_size, loff_t *bytes_pending)
+{
+ struct uci_client *uci_handle = NULL;
+ struct mhi_dev_client *client_handle = NULL;
+ int bytes_avail = 0;
+ int ret_val = 0;
+ struct mutex *mutex;
+ u32 chan = 0;
+ ssize_t bytes_copied = 0;
+ u32 addr_offset = 0;
+ uint32_t buf_size;
+ uint32_t chained = 0;
+ void *local_buf = NULL;
+
+ if (!file || !buf || !uspace_buf_size ||
+ !file->private_data)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+ client_handle = uci_handle->in_handle;
+ mutex = &uci_handle->in_chan_lock;
+ chan = uci_handle->in_chan;
+
+ mutex_lock(mutex);
+
+ local_buf = uci_handle->in_buf_list[0].addr;
+ buf_size = uci_handle->in_buf_list[0].buf_size;
+
+
+ uci_log(UCI_DBG_VERBOSE, "Client attempted read on chan %d\n", chan);
+ do {
+ if (!uci_handle->pkt_loc &&
+ !atomic_read(&uci_ctxt.mhi_disabled)) {
+
+ bytes_avail = mhi_dev_read_channel(client_handle,
+ local_buf, buf_size, &chained);
+
+ uci_log(UCI_DBG_VERBOSE,
+ "reading from mhi_core local_buf = %p,buf_size = 0x%x bytes_read = 0x%x\n",
+ local_buf, buf_size, bytes_avail);
+
+ if (bytes_avail < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to read channel ret %d\n",
+ bytes_avail);
+ ret_val = -EIO;
+ goto error;
+ }
+
+ if (bytes_avail > 0) {
+ uci_handle->pkt_loc = (void *)local_buf;
+ uci_handle->pkt_size = bytes_avail;
+
+ *bytes_pending = (loff_t)uci_handle->pkt_size;
+ uci_log(UCI_DBG_VERBOSE,
+ "Got pkt of size 0x%x at addr %p, chan %d\n",
+ uci_handle->pkt_size, local_buf, chan);
+ } else {
+ uci_handle->pkt_loc = 0;
+ uci_handle->pkt_size = 0;
+ }
+ }
+ if (bytes_avail == 0) {
+
+ /* If nothing was copied yet, wait for data */
+ uci_log(UCI_DBG_VERBOSE,
+ "No data read_data_ready %d, chan %d\n",
+ atomic_read(&uci_handle->read_data_ready),
+ chan);
+
+ ret_val = wait_event_interruptible(uci_handle->read_wq,
+ (!mhi_dev_channel_isempty(client_handle)));
+
+ if (ret_val == -ERESTARTSYS) {
+ uci_log(UCI_DBG_ERROR, "Exit signal caught\n");
+ goto error;
+ }
+ uci_log(UCI_DBG_VERBOSE,
+ "Thread woke up. Got data on chan %d read_data_ready %d\n",
+ chan,
+ atomic_read(&uci_handle->read_data_ready));
+
+ /* A valid packet was returned from MHI */
+ } else if (bytes_avail > 0) {
+ uci_log(UCI_DBG_VERBOSE,
+ "Got packet: avail pkts %d phy_adr %p, chan %d\n",
+ atomic_read(&uci_handle->read_data_ready),
+ local_buf,
+ chan);
+ break;
+ /*
+ * MHI did not return a valid packet, but we have one
+ * which we did not finish returning to user
+ */
+ } else {
+ uci_log(UCI_DBG_CRITICAL,
+ "chan %d err: avail pkts %d phy_adr %p",
+ chan,
+ atomic_read(&uci_handle->read_data_ready),
+ local_buf);
+ return -EIO;
+ }
+ } while (!uci_handle->pkt_loc);
+
+ if (uspace_buf_size >= *bytes_pending) {
+ addr_offset = uci_handle->pkt_size - *bytes_pending;
+ if (copy_to_user(buf, uci_handle->pkt_loc + addr_offset,
+ *bytes_pending)) {
+ ret_val = -EIO;
+ goto error;
+ }
+
+ bytes_copied = *bytes_pending;
+ *bytes_pending = 0;
+ uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x, chan %d\n",
+ bytes_copied, (u32)*bytes_pending, chan);
+ } else {
+ addr_offset = uci_handle->pkt_size - *bytes_pending;
+ if (copy_to_user(buf, (void *) (uintptr_t)uci_handle->pkt_loc +
+ addr_offset, uspace_buf_size)) {
+ ret_val = -EIO;
+ goto error;
+ }
+ bytes_copied = uspace_buf_size;
+ *bytes_pending -= uspace_buf_size;
+ uci_log(UCI_DBG_VERBOSE, "Copied 0x%x of 0x%x,chan %d\n",
+ bytes_copied,
+ (u32)*bytes_pending,
+ chan);
+ }
+ /* We finished with this buffer, map it back */
+ if (*bytes_pending == 0) {
+ uci_log(UCI_DBG_VERBOSE,
+ "All data consumed. Pkt loc %p ,chan %d\n",
+ uci_handle->pkt_loc, chan);
+ uci_handle->pkt_loc = 0;
+ uci_handle->pkt_size = 0;
+ }
+ uci_log(UCI_DBG_VERBOSE,
+ "Returning 0x%x bytes, 0x%x bytes left\n",
+ bytes_copied, (u32)*bytes_pending);
+ mutex_unlock(mutex);
+ return bytes_copied;
+error:
+ mutex_unlock(mutex);
+ uci_log(UCI_DBG_ERROR, "Returning %d\n", ret_val);
+ return ret_val;
+}
+
+static ssize_t mhi_uci_client_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ struct uci_client *uci_handle = NULL;
+ int ret_val = 0;
+ u32 chan = 0xFFFFFFFF;
+
+ if (file == NULL || buf == NULL ||
+ !count || file->private_data == NULL)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+
+ if (atomic_read(&uci_ctxt.mhi_disabled)) {
+ uci_log(UCI_DBG_ERROR,
+ "Client %d attempted to write while MHI is disabled\n",
+ uci_handle->out_chan);
+ return -EIO;
+ }
+ chan = uci_handle->out_chan;
+ mutex_lock(&uci_handle->out_chan_lock);
+ while (!ret_val) {
+ ret_val = mhi_uci_send_packet(&uci_handle->out_handle,
+ (void *)buf, count, 1);
+ if (ret_val < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Error while writing data to MHI, chan %d, buf %p, size %d\n",
+ chan, (void *)buf, count);
+ ret_val = -EIO;
+ break;
+ }
+ if (!ret_val) {
+ uci_log(UCI_DBG_VERBOSE,
+ "No descriptors available, did we poll, chan %d?\n",
+ chan);
+ mutex_unlock(&uci_handle->out_chan_lock);
+ ret_val = wait_event_interruptible(uci_handle->write_wq,
+ !mhi_dev_channel_isempty(
+ uci_handle->out_handle));
+
+ mutex_lock(&uci_handle->out_chan_lock);
+ if (-ERESTARTSYS == ret_val) {
+ uci_log(UCI_DBG_WARNING,
+ "Waitqueue cancelled by system\n");
+ break;
+ }
+ }
+ }
+ mutex_unlock(&uci_handle->out_chan_lock);
+ return ret_val;
+}
+
+static int uci_init_client_attributes(struct mhi_uci_ctxt_t *uci_ctxt)
+{
+ u32 i = 0;
+ u32 data_size = TRB_MAX_DATA_SIZE;
+ u32 index = 0;
+ struct uci_client *client;
+ struct chan_attr *chan_attrib = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(uci_ctxt->chan_attrib); i++) {
+ chan_attrib = &uci_ctxt->chan_attrib[i];
+ switch (i) {
+ case MHI_CLIENT_LOOPBACK_OUT:
+ case MHI_CLIENT_LOOPBACK_IN:
+ case MHI_CLIENT_SAHARA_OUT:
+ case MHI_CLIENT_SAHARA_IN:
+ case MHI_CLIENT_EFS_OUT:
+ case MHI_CLIENT_EFS_IN:
+ case MHI_CLIENT_QMI_OUT:
+ case MHI_CLIENT_QMI_IN:
+ case MHI_CLIENT_IP_CTRL_0_OUT:
+ case MHI_CLIENT_IP_CTRL_0_IN:
+ case MHI_CLIENT_IP_CTRL_1_OUT:
+ case MHI_CLIENT_IP_CTRL_1_IN:
+ case MHI_CLIENT_DUN_OUT:
+ case MHI_CLIENT_DUN_IN:
+ chan_attrib->uci_ownership = 1;
+ break;
+ default:
+ chan_attrib->uci_ownership = 0;
+ break;
+ }
+ if (chan_attrib->uci_ownership) {
+ chan_attrib->chan_id = i;
+ chan_attrib->max_packet_size = data_size;
+ index = CHAN_TO_CLIENT(i);
+ client = &uci_ctxt->client_handles[index];
+ chan_attrib->nr_trbs = 9;
+ client->in_buf_list =
+ kmalloc(sizeof(struct mhi_dev_iov) *
+ chan_attrib->nr_trbs,
+ GFP_KERNEL);
+ if (client->in_buf_list == NULL)
+ return -ENOMEM;
+ }
+ if (i % 2 == 0)
+ chan_attrib->dir = MHI_DIR_OUT;
+ else
+ chan_attrib->dir = MHI_DIR_IN;
+ }
+ return 0;
+}
+
+
+static void uci_event_notifier(struct mhi_dev_client_cb_reason *reason)
+{
+ int client_index = 0;
+ struct uci_client *uci_handle = NULL;
+
+ if (reason->reason == MHI_DEV_TRE_AVAILABLE) {
+ client_index = reason->ch_id / 2;
+ uci_handle = &uci_ctxt.client_handles[client_index];
+ uci_log(UCI_DBG_DBG,
+ "recived TRE available event for chan %d\n",
+ uci_handle->in_chan);
+
+ if (reason->ch_id % 2) {
+ atomic_set(&uci_handle->write_data_ready, 1);
+ wake_up(&uci_handle->write_wq);
+ } else {
+ atomic_set(&uci_handle->read_data_ready, 1);
+ wake_up(&uci_handle->read_wq);
+ }
+ }
+}
+
+static int mhi_register_client(struct uci_client *mhi_client, int index)
+{
+ init_waitqueue_head(&mhi_client->read_wq);
+ init_waitqueue_head(&mhi_client->write_wq);
+ mhi_client->out_chan = index * 2 + 1;
+ mhi_client->in_chan = index * 2;
+ mhi_client->client_index = index;
+
+ mutex_init(&mhi_client->in_chan_lock);
+ mutex_init(&mhi_client->out_chan_lock);
+
+ uci_log(UCI_DBG_DBG, "Registering chan %d.\n", mhi_client->out_chan);
+ return 0;
+}
+
+static long mhi_uci_client_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct uci_client *uci_handle = NULL;
+ int rc = 0;
+ struct ep_info epinfo;
+
+ if (file == NULL || file->private_data == NULL)
+ return -EINVAL;
+
+ uci_handle = file->private_data;
+
+ uci_log(UCI_DBG_DBG, "Received command %d for client:%d\n",
+ cmd, uci_handle->client_index);
+
+ if (cmd == MHI_UCI_EP_LOOKUP) {
+ uci_log(UCI_DBG_DBG, "EP_LOOKUP for client:%d\n",
+ uci_handle->client_index);
+ epinfo.ph_ep_info.ep_type = DATA_EP_TYPE_PCIE;
+ epinfo.ph_ep_info.peripheral_iface_id = MHI_QTI_IFACE_ID;
+ epinfo.ipa_ep_pair.cons_pipe_num =
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD);
+ epinfo.ipa_ep_pair.prod_pipe_num =
+ ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS);
+
+ uci_log(UCI_DBG_DBG, "client:%d ep_type:%d intf:%d\n",
+ uci_handle->client_index,
+ epinfo.ph_ep_info.ep_type,
+ epinfo.ph_ep_info.peripheral_iface_id);
+
+ uci_log(UCI_DBG_DBG, "ipa_cons_idx:%d ipa_prod_idx:%d\n",
+ epinfo.ipa_ep_pair.cons_pipe_num,
+ epinfo.ipa_ep_pair.prod_pipe_num);
+
+ rc = copy_to_user((void __user *)arg, &epinfo,
+ sizeof(epinfo));
+ if (rc)
+ uci_log(UCI_DBG_ERROR, "copying to user space failed");
+ } else {
+ uci_log(UCI_DBG_ERROR, "wrong parameter:%d\n", cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static const struct file_operations mhi_uci_client_fops = {
+ .read = mhi_uci_client_read,
+ .write = mhi_uci_client_write,
+ .open = mhi_uci_client_open,
+ .release = mhi_uci_client_release,
+ .poll = mhi_uci_client_poll,
+ .unlocked_ioctl = mhi_uci_client_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mhi_uci_client_ioctl,
+#endif
+};
+
+int mhi_uci_init(void)
+{
+ u32 i = 0;
+ int ret_val = 0;
+ struct uci_client *mhi_client = NULL;
+ s32 r = 0;
+
+ mhi_uci_ipc_log = ipc_log_context_create(MHI_UCI_IPC_LOG_PAGES,
+ "mhi-uci", 0);
+ if (mhi_uci_ipc_log == NULL) {
+ uci_log(UCI_DBG_WARNING,
+ "Failed to create IPC logging context\n");
+ }
+ uci_ctxt.event_notifier = uci_event_notifier;
+
+ uci_log(UCI_DBG_DBG, "Setting up channel attributes.\n");
+
+ ret_val = uci_init_client_attributes(&uci_ctxt);
+ if (ret_val < 0) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to init client attributes\n");
+ return -EIO;
+ }
+
+ uci_log(UCI_DBG_DBG, "Initializing clients\n");
+ uci_log(UCI_DBG_INFO, "Registering for MHI events.\n");
+
+ for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+ if (uci_ctxt.chan_attrib[i * 2].uci_ownership) {
+ mhi_client = &uci_ctxt.client_handles[i];
+
+ r = mhi_register_client(mhi_client, i);
+
+ if (r) {
+ uci_log(UCI_DBG_CRITICAL,
+ "Failed to reg client %d ret %d\n",
+ r, i);
+ }
+ }
+ }
+ uci_log(UCI_DBG_INFO, "Allocating char devices.\n");
+ r = alloc_chrdev_region(&uci_ctxt.start_ctrl_nr,
+ 0, MHI_MAX_SOFTWARE_CHANNELS,
+ DEVICE_NAME);
+
+ if (IS_ERR_VALUE(r)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to alloc char devs, ret 0x%x\n", r);
+ goto failed_char_alloc;
+ }
+ uci_log(UCI_DBG_INFO, "Creating class\n");
+ uci_ctxt.mhi_uci_class = class_create(THIS_MODULE,
+ DEVICE_NAME);
+ if (IS_ERR(uci_ctxt.mhi_uci_class)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to instantiate class, ret 0x%x\n", r);
+ r = -ENOMEM;
+ goto failed_class_add;
+ }
+
+ uci_log(UCI_DBG_INFO, "Setting up device nodes.\n");
+ for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) {
+ if (uci_ctxt.chan_attrib[i*2].uci_ownership) {
+ cdev_init(&uci_ctxt.cdev[i], &mhi_uci_client_fops);
+ uci_ctxt.cdev[i].owner = THIS_MODULE;
+ r = cdev_add(&uci_ctxt.cdev[i],
+ uci_ctxt.start_ctrl_nr + i, 1);
+ if (IS_ERR_VALUE(r)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d, ret 0x%x\n",
+ i, r);
+ goto failed_char_add;
+ }
+ uci_ctxt.client_handles[i].dev =
+ device_create(uci_ctxt.mhi_uci_class, NULL,
+ uci_ctxt.start_ctrl_nr + i,
+ NULL, DEVICE_NAME "_pipe_%d",
+ i * 2);
+
+ if (IS_ERR(uci_ctxt.client_handles[i].dev)) {
+ uci_log(UCI_DBG_ERROR,
+ "Failed to add cdev %d\n", i);
+ cdev_del(&uci_ctxt.cdev[i]);
+ goto failed_device_create;
+ }
+ }
+ }
+ return 0;
+
+failed_char_add:
+failed_device_create:
+ while (--i >= 0) {
+ cdev_del(&uci_ctxt.cdev[i]);
+ device_destroy(uci_ctxt.mhi_uci_class,
+ MKDEV(MAJOR(uci_ctxt.start_ctrl_nr), i * 2));
+ };
+ class_destroy(uci_ctxt.mhi_uci_class);
+failed_class_add:
+ unregister_chrdev_region(MAJOR(uci_ctxt.start_ctrl_nr),
+ MHI_MAX_SOFTWARE_CHANNELS);
+failed_char_alloc:
+ return r;
+}