diff options
| author | Andrei Danaila <adanaila@codeaurora.org> | 2014-02-15 20:52:26 -0800 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:10:50 -0700 |
| commit | 6f0be3c7a14fbd9a2786990482ab825f327c8ed8 (patch) | |
| tree | e92df5c1d58e2ad63e964b757e31cdc229036f5e /drivers/platform | |
| parent | fb5e04006943d5daab83e4b381149b39e9ef39c1 (diff) | |
msm: mhi: Add MHI core driver
Enable the MHI core driver for communication
between host and device using PCIe as an interconnect
and supporting MHI as the communication protocol.
The driver exposes several kernel space APIs
for use by other kernel entities to interface to
the PCIe device over MHI.
APIs for read and write and other notifications
are supported by this MHI driver.
Support for full power management and device reset
is also included.
CRs-Fixed: 689329
Change-Id: Ibc2fd7c2d5689001485f71b1133ada2c4ca236a9
Signed-off-by: Andrei Danaila <adanaila@codeaurora.org>
Diffstat (limited to 'drivers/platform')
| -rw-r--r-- | drivers/platform/msm/Kconfig | 9 | ||||
| -rw-r--r-- | drivers/platform/msm/Makefile | 1 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/Makefile | 12 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi.h | 587 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_bhi.c | 202 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_bhi.h | 55 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_hwio.h | 170 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_iface.c | 297 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_init.c | 695 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_isr.c | 244 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_macros.h | 341 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_main.c | 1368 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_mmio_ops.c | 211 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_pm.c | 272 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_ring_ops.c | 186 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_ssr.c | 248 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_states.c | 1063 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_sys.c | 365 | ||||
| -rw-r--r-- | drivers/platform/msm/mhi/mhi_sys.h | 79 |
19 files changed, 6405 insertions, 0 deletions
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index e233846ddb31..8b2f734bd132 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -133,6 +133,15 @@ config GPIO_USB_DETECT USB driver of VBUS presence/disconnection using the power_supply framework. +config MSM_MHI + tristate "Modem Host Interface Driver" + help + This kernel module is used to interact with PCIe endpoints + supporting MHI protocol. MHI is a data transmission protocol + involving communication between a host and a device over shared + memory. The MHI driver manages the shared memory by use of logical + unidirectional channels. + config MSM_MHI_UCI tristate "MHI Usperspace Control Interface Driver" depends on MSM_MHI diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile index 00414887b05e..a6ae434f3caf 100644 --- a/drivers/platform/msm/Makefile +++ b/drivers/platform/msm/Makefile @@ -3,6 +3,7 @@ # obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o +obj-$(CONFIG_MSM_MHI) += mhi/ obj-$(CONFIG_MSM_MHI_UCI) += mhi_uci/ obj-$(CONFIG_SPS) += sps/ obj-$(CONFIG_GSI) += gsi/ diff --git a/drivers/platform/msm/mhi/Makefile b/drivers/platform/msm/mhi/Makefile new file mode 100644 index 000000000000..4470a26defcb --- /dev/null +++ b/drivers/platform/msm/mhi/Makefile @@ -0,0 +1,12 @@ +# Makefile for MHI driver +obj-y += mhi_main.o +obj-y += mhi_iface.o +obj-y += mhi_init.o +obj-y += mhi_isr.o +obj-y += mhi_mmio_ops.o +obj-y += mhi_ring_ops.o +obj-y += mhi_states.o +obj-y += mhi_sys.o +obj-y += mhi_bhi.o +obj-y += mhi_pm.o +obj-y += mhi_ssr.o diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h new file mode 100644 index 000000000000..8c7b54c148f5 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi.h @@ -0,0 +1,587 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _H_MHI +#define _H_MHI + +#include "mhi_macros.h" +#include <linux/msm_mhi.h> +#include <linux/types.h> +#include <linux/pm.h> +#include <linux/completion.h> +#include <linux/atomic.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/cdev.h> +#include <mach/msm_pcie.h> + +extern struct mhi_pcie_devices mhi_devices; + +enum MHI_DEBUG_CLASS { + MHI_DBG_DATA = 0x1000, + MHI_DBG_POWER = 0x2000, + MHI_DBG_reserved = 0x80000000 +}; + +enum MHI_DEBUG_LEVEL { + MHI_MSG_VERBOSE = 0x1, + MHI_MSG_INFO = 0x2, + MHI_MSG_DBG = 0x4, + MHI_MSG_WARNING = 0x8, + MHI_MSG_ERROR = 0x10, + MHI_MSG_CRITICAL = 0x20, + MHI_MSG_reserved = 0x80000000 +}; + +struct pcie_core_info { + u32 dev_id; + u32 manufact_id; + u32 mhi_ver; + void __iomem *bar0_base; + void __iomem *bar0_end; + void __iomem *bar2_base; + void __iomem *bar2_end; + u32 device_wake_gpio; + u32 irq_base; + u32 max_nr_msis; + struct pci_saved_state *pcie_state; +}; + +struct bhi_ctxt_t { + void __iomem *bhi_base; + void *image_loc; + dma_addr_t phy_image_loc; + size_t image_size; + void *unaligned_image_loc; + dev_t bhi_dev; + struct cdev cdev; + struct class *bhi_class; + struct device *dev; +}; + +enum MHI_CHAN_TYPE { + MHI_INVALID = 0x0, + MHI_OUT = 0x1, + MHI_IN = 0x2, + MHI_CHAN_TYPE_reserved = 0x80000000 +}; + +enum MHI_CHAN_STATE { + MHI_CHAN_STATE_DISABLED = 0x0, + MHI_CHAN_STATE_ENABLED = 0x1, + MHI_CHAN_STATE_RUNNING = 0x2, + MHI_CHAN_STATE_SUSPENDED = 0x3, + MHI_CHAN_STATE_STOP = 0x4, + MHI_CHAN_STATE_ERROR = 0x5, + MHI_CHAN_STATE_LIMIT = 0x6, + MHI_CHAN_STATE_reserved = 0x80000000 +}; + +enum MHI_RING_TYPE { + MHI_RING_TYPE_CMD_RING = 0x0, + MHI_RING_TYPE_XFER_RING = 0x1, + MHI_RING_TYPE_EVENT_RING = 0x2, + MHI_RING_TYPE_MAX = 0x4, + MHI_RING_reserved = 0x80000000 +}; + +enum MHI_CHAIN { + MHI_TRE_CHAIN_OFF = 0x0, + MHI_TRE_CHAIN_ON = 0x1, + MHI_TRE_CHAIN_LIMIT = 0x2, + MHI_TRE_CHAIN_reserved = 0x80000000 +}; + +enum MHI_EVENT_RING_STATE { + MHI_EVENT_RING_UINIT = 0x0, + MHI_EVENT_RING_INIT = 0x1, + MHI_EVENT_RING_reserved = 0x80000000 +}; + +enum MHI_STATE { + MHI_STATE_RESET = 0x0, + MHI_STATE_READY = 0x1, + MHI_STATE_M0 = 0x2, + MHI_STATE_M1 = 0x3, + MHI_STATE_M2 = 0x4, + MHI_STATE_M3 = 0x5, + MHI_STATE_BHI = 0x7, + MHI_STATE_LIMIT = 0x8, + MHI_STATE_reserved = 0x80000000 +}; + +struct __packed mhi_event_ctxt { + u32 mhi_intmodt; + u32 mhi_event_er_type; + u32 mhi_msi_vector; + u64 mhi_event_ring_base_addr; + u64 mhi_event_ring_len; + u64 mhi_event_read_ptr; + u64 mhi_event_write_ptr; +}; + +struct __packed mhi_chan_ctxt { + enum MHI_CHAN_STATE mhi_chan_state; + enum MHI_CHAN_TYPE mhi_chan_type; + u32 mhi_event_ring_index; + u64 mhi_trb_ring_base_addr; + u64 mhi_trb_ring_len; + u64 mhi_trb_read_ptr; + u64 mhi_trb_write_ptr; +}; + +struct __packed mhi_cmd_ctxt { + u32 mhi_cmd_ctxt_reserved1; + u32 mhi_cmd_ctxt_reserved2; + u32 mhi_cmd_ctxt_reserved3; + u64 mhi_cmd_ring_base_addr; + u64 mhi_cmd_ring_len; + u64 mhi_cmd_ring_read_ptr; + u64 mhi_cmd_ring_write_ptr; +}; + +enum MHI_COMMAND { + MHI_COMMAND_NOOP = 0x0, + MHI_COMMAND_RESET_CHAN = 0x1, + MHI_COMMAND_STOP_CHAN = 0x2, + MHI_COMMAND_START_CHAN = 0x3, + MHI_COMMAND_RESUME_CHAN = 0x4, + MHI_COMMAND_MAX_NR = 0x5, + MHI_COMMAND_reserved = 0x80000000 +}; + +enum MHI_PKT_TYPE { + MHI_PKT_TYPE_RESERVED = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_EE_EVENT = 0x40, +}; + +struct __packed mhi_tx_pkt { + u64 buffer_ptr; + u32 buf_len; + u32 info; +}; + +struct __packed mhi_noop_tx_pkt { + u64 reserved1; + u32 reserved2; + u32 info; +}; + +struct __packed mhi_noop_cmd_pkt { + u64 reserved1; + u32 reserved2; + u32 info; +}; + +struct __packed mhi_reset_chan_cmd_pkt { + u32 reserved1; + u32 reserved2; + u32 reserved3; + u32 info; +}; + +struct __packed mhi_stop_chan_cmd_pkt { + u32 reserved1; + u32 reserved2; + u32 reserved3; + u32 info; +}; + +struct __packed mhi_ee_state_change_event { + u64 reserved1; + u32 exec_env; + u32 info; +}; + +struct __packed mhi_xfer_event_pkt { + u64 xfer_ptr; + u32 xfer_details; + u32 info; +}; + +struct __packed mhi_cmd_complete_event_pkt { + u64 ptr; + u32 code; + u32 info; +}; + +struct __packed mhi_state_change_event_pkt { + u64 reserved1; + u32 state; + u32 info; +}; + +union __packed mhi_xfer_pkt { + struct mhi_tx_pkt data_tx_pkt; + struct mhi_noop_tx_pkt noop_tx_pkt; + struct mhi_tx_pkt type; +}; + +union __packed mhi_cmd_pkt { + struct mhi_stop_chan_cmd_pkt stop_cmd_pkt; + struct mhi_reset_chan_cmd_pkt reset_cmd_pkt; + struct mhi_noop_cmd_pkt noop_cmd_pkt; + struct mhi_noop_cmd_pkt type; +}; + +union __packed mhi_event_pkt { + struct mhi_xfer_event_pkt xfer_event_pkt; + struct mhi_cmd_complete_event_pkt cmd_complete_event_pkt; + struct mhi_state_change_event_pkt state_change_event_pkt; + struct mhi_ee_state_change_event ee_event_pkt; + struct mhi_xfer_event_pkt type; +}; + +enum MHI_EVENT_CCS { + MHI_EVENT_CC_INVALID = 0x0, + MHI_EVENT_CC_SUCCESS = 0x1, + MHI_EVENT_CC_EOT = 0x2, + MHI_EVENT_CC_OVERFLOW = 0x3, + MHI_EVENT_CC_EOB = 0x4, + MHI_EVENT_CC_OOB = 0x5, + MHI_EVENT_CC_DB_MODE = 0x6, + MHI_EVENT_CC_UNDEFINED_ERR = 0x10, + MHI_EVENT_CC_RING_EL_ERR = 0x11, +}; + +struct mhi_ring { + void *base; + void *wp; + void *rp; + void *ack_rp; + uintptr_t len; + uintptr_t el_size; + u32 overwrite_en; +}; + +enum MHI_CMD_STATUS { + MHI_CMD_NOT_PENDING = 0x0, + MHI_CMD_PENDING = 0x1, + MHI_CMD_RESET_PENDING = 0x2, + MHI_CMD_RESERVED = 0x80000000 +}; + +enum MHI_EVENT_RING_TYPE { + MHI_EVENT_RING_TYPE_INVALID = 0x0, + MHI_EVENT_RING_TYPE_VALID = 0x1, + MHI_EVENT_RING_TYPE_reserved = 0x80000000 +}; + +enum MHI_INIT_ERROR_STAGE { + MHI_INIT_ERROR_STAGE_UNWIND_ALL = 0x1, + MHI_INIT_ERROR_STAGE_DEVICE_CTRL = 0x2, + MHI_INIT_ERROR_STAGE_THREADS = 0x3, + MHI_INIT_ERROR_STAGE_EVENTS = 0x4, + MHI_INIT_ERROR_STAGE_MEM_ZONES = 0x5, + MHI_INIT_ERROR_STAGE_SYNC = 0x6, + MHI_INIT_ERROR_STAGE_THREAD_QUEUES = 0x7, + MHI_INIT_ERROR_TIMERS = 0x8, + MHI_INIT_ERROR_STAGE_RESERVED = 0x80000000 +}; + +enum STATE_TRANSITION { + STATE_TRANSITION_RESET = 0x0, + STATE_TRANSITION_READY = 0x1, + STATE_TRANSITION_M0 = 0x2, + STATE_TRANSITION_M1 = 0x3, + STATE_TRANSITION_M2 = 0x4, + STATE_TRANSITION_M3 = 0x5, + STATE_TRANSITION_BHI = 0x6, + STATE_TRANSITION_SBL = 0x7, + STATE_TRANSITION_AMSS = 0x8, + STATE_TRANSITION_LINK_DOWN = 0x9, + STATE_TRANSITION_WAKE = 0xA, + STATE_TRANSITION_SYS_ERR = 0xFF, + STATE_TRANSITION_reserved = 0x80000000 +}; + +enum MHI_EXEC_ENV { + MHI_EXEC_ENV_PBL = 0x0, + MHI_EXEC_ENV_SBL = 0x1, + MHI_EXEC_ENV_AMSS = 0x2, + MHI_EXEC_ENV_reserved = 0x80000000 +}; + +struct mhi_client_handle { + struct mhi_device_ctxt *mhi_dev_ctxt; + struct mhi_client_info_t client_info; + struct completion chan_reset_complete; + struct completion chan_open_complete; + void *user_data; + u32 chan; + struct mhi_result result; + u32 device_index; + u32 event_ring_index; + u32 msi_vec; + u32 cb_mod; + u32 intmod_t; + u32 pkt_count; + int magic; + int chan_status; +}; + +enum MHI_EVENT_POLLING { + MHI_EVENT_POLLING_DISABLED = 0x0, + MHI_EVENT_POLLING_ENABLED = 0x1, + MHI_EVENT_POLLING_reserved = 0x80000000 +}; + +struct mhi_state_work_queue { + spinlock_t *q_lock; + struct mhi_ring q_info; + u32 queue_full_cntr; + enum STATE_TRANSITION buf[MHI_WORK_Q_MAX_SIZE]; +}; + +struct mhi_control_seg { + union mhi_xfer_pkt *xfer_trb_list[MHI_MAX_CHANNELS]; + union mhi_event_pkt *ev_trb_list[EVENT_RINGS_ALLOCATED]; + union mhi_cmd_pkt cmd_trb_list[NR_OF_CMD_RINGS][CMD_EL_PER_RING + 1]; + struct mhi_cmd_ctxt mhi_cmd_ctxt_list[NR_OF_CMD_RINGS]; + struct mhi_chan_ctxt mhi_cc_list[MHI_MAX_CHANNELS]; + struct mhi_event_ctxt mhi_ec_list[MHI_MAX_CHANNELS]; + u32 padding; +}; + +struct mhi_chan_counters { + u32 empty_ring_removal; + u32 pkts_xferd; + u32 ev_processed; +}; + +struct mhi_counters { + u32 m0_m1; + u32 m1_m0; + u32 m1_m2; + u32 m2_m0; + u32 m0_m3; + u32 m3_m0; + u32 m1_m3; + u32 mhi_reset_cntr; + u32 mhi_ready_cntr; + u32 m3_event_timeouts; + u32 m0_event_timeouts; + u32 msi_disable_cntr; + u32 msi_enable_cntr; + u32 nr_irq_migrations; + atomic_t outbound_acks; +}; + +struct mhi_flags { + u32 mhi_initialized; + u32 pending_M3; + u32 pending_M0; + u32 link_up; + u32 kill_threads; + atomic_t mhi_link_off; + atomic_t data_pending; + atomic_t events_pending; + atomic_t m0_work_enabled; + atomic_t m3_work_enabled; + atomic_t pending_resume; + atomic_t pending_ssr; + atomic_t pending_powerup; + int stop_threads; + u32 ssr; +}; + +struct mhi_device_ctxt { + struct mhi_pcie_dev_info *dev_info; + struct pcie_core_info *dev_props; + u64 channel_db_addr; + u64 event_db_addr; + u64 cmd_db_addr; + struct mhi_control_seg *mhi_ctrl_seg; + struct mhi_meminfo *mhi_ctrl_seg_info; + u64 nr_of_cc; + u64 nr_of_ec; + u64 nr_of_cmdc; + enum MHI_STATE mhi_state; + enum MHI_EXEC_ENV dev_exec_env; + void __iomem *mmio_addr; + u64 mmio_len; + struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS]; + struct mhi_ring mhi_local_event_ctxt[MHI_MAX_CHANNELS]; + struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS]; + struct mutex *mhi_chan_mutex; + struct mutex mhi_link_state; + spinlock_t *mhi_ev_spinlock_list; + struct mutex *mhi_cmd_mutex_list; + struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS]; + struct task_struct *event_thread_handle; + struct task_struct *st_thread_handle; + u32 ev_thread_stopped; + u32 st_thread_stopped; + wait_queue_head_t *event_handle; + wait_queue_head_t *state_change_event_handle; + wait_queue_head_t *M0_event; + wait_queue_head_t *M3_event; + wait_queue_head_t *bhi_event; + wait_queue_head_t *chan_start_complete; + + u32 mhi_chan_db_order[MHI_MAX_CHANNELS]; + u32 mhi_ev_db_order[MHI_MAX_CHANNELS]; + spinlock_t *db_write_lock; + + struct platform_device *mhi_uci_dev; + struct platform_device *mhi_rmnet_dev; + atomic_t link_ops_flag; + + struct mhi_state_work_queue state_change_work_item_list; + enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS]; + + atomic_t start_cmd_pending_ack; + u32 cmd_ring_order; + u32 alloced_ev_rings[EVENT_RINGS_ALLOCATED]; + u32 ev_ring_props[EVENT_RINGS_ALLOCATED]; + u32 msi_counter[EVENT_RINGS_ALLOCATED]; + u32 db_mode[MHI_MAX_CHANNELS]; + u32 uldl_enabled; + u32 hw_intmod_rate; + u32 outbound_evmod_rate; + struct mhi_counters counters; + struct mhi_flags flags; + + rwlock_t xfer_lock; + struct hrtimer m1_timer; + ktime_t m1_timeout; + struct delayed_work m3_work; + struct work_struct m0_work; + + struct workqueue_struct *work_queue; + struct mhi_chan_counters mhi_chan_cntr[MHI_MAX_CHANNELS]; + u32 ev_counter[MHI_MAX_CHANNELS]; + u32 bus_client; + struct esoc_desc *esoc_handle; + void *esoc_ssr_handle; + struct msm_bus_scale_pdata *bus_scale_table; + struct notifier_block mhi_cpu_notifier; + + unsigned long esoc_notif; + enum STATE_TRANSITION base_state; + atomic_t outbound_acks; + struct mutex pm_lock; + struct wakeup_source w_lock; + int enable_lpm; + char *chan_info; + struct dentry *mhi_parent_folder; +}; + +struct mhi_pcie_dev_info { + struct pcie_core_info core; + atomic_t ref_count; + struct mhi_device_ctxt mhi_ctxt; + struct msm_pcie_register_event mhi_pci_link_event; + struct pci_dev *pcie_device; + struct pci_driver *mhi_pcie_driver; + struct bhi_ctxt_t bhi_ctxt; + struct platform_device *plat_dev; + u32 link_down_cntr; + u32 link_up_cntr; +}; + +struct mhi_pcie_devices { + struct mhi_pcie_dev_info device_list[MHI_MAX_SUPPORTED_DEVICES]; + s32 nr_of_devices; +}; + +enum MHI_STATUS mhi_reset_all_thread_queues( + struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_add_elements_to_event_rings( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION new_state); +int get_nr_avail_ring_elements(struct mhi_ring *ring); +enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1, + void *loc_2, u32 *nr_el); +enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, + struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, + u32 nr_ev_el, u32 event_ring_index); +/*Mhi Initialization functions */ +enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_INIT_ERROR_STAGE cleanup_stage); +enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *dest_device, + enum MHI_COMMAND which_cmd, u32 chan); +enum MHI_STATUS mhi_queue_tx_pkt(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_CLIENT_CHANNEL chan, + void *payload, + size_t payload_size); +enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, + uintptr_t trb_list_phy, + uintptr_t trb_list_virt, + u64 el_per_ring, + enum MHI_CHAN_TYPE chan_type, + u32 event_ring, + struct mhi_ring *ring); +enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp, + void **wp, void **assigned_addr); +enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, void **assigned_addr); +enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, void **assigned_addr); +enum MHI_STATUS get_element_index(struct mhi_ring *ring, void *address, + uintptr_t *index); +enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_ring *ring, enum MHI_RING_TYPE ring_type, u32 ring_index); +enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, + union mhi_event_pkt *event); +enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *ctxt, + union mhi_event_pkt *event); +int parse_event_thread(void *ctxt); +enum MHI_STATUS mhi_test_for_device_ready( + struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr); +enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr); +int mhi_state_change_thread(void *ctxt); +enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION new_state); +enum MHI_STATUS mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt); +enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer); +int mhi_pci_suspend(struct pci_dev *dev, pm_message_t state); +int mhi_pci_resume(struct pci_dev *dev); +int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev); +int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev); +int mhi_init_pm_sysfs(struct device *dev); +void mhi_rem_pm_sysfs(struct device *dev); +void mhi_pci_remove(struct pci_dev *mhi_device); +int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev); +int mhi_get_chan_max_buffers(u32 chan); +int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt); +void mhi_link_state_cb(struct msm_pcie_notify *notify); +void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_CB_REASON reason); +void mhi_notify_client(struct mhi_client_handle *client_handle, + enum MHI_CB_REASON reason); +int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action, + void *hcpu); +enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt); +enum MHI_STATUS mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt); +void delayed_m3(struct work_struct *work); +void m0_work(struct work_struct *work); +int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt, + int index); +enum MHI_STATUS start_chan_sync(struct mhi_client_handle *client_handle); + +#endif diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c new file mode 100644 index 000000000000..ecfd2fdd02f3 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_bhi.c @@ -0,0 +1,202 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/fs.h> +#include <linux/uaccess.h> +#include <linux/slab.h> + +#include "mhi_sys.h" +#include "mhi.h" +#include "mhi_macros.h" +#include "mhi_hwio.h" +#include "mhi_bhi.h" + +static int bhi_open(struct inode *mhi_inode, struct file *file_handle) +{ + file_handle->private_data = &mhi_devices.device_list[0]; + return 0; +} + +static ssize_t bhi_write(struct file *file, + const char __user *buf, + size_t count, loff_t *offp) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + u32 pcie_word_val = 0; + u32 i = 0; + struct bhi_ctxt_t *bhi_ctxt = + &(((struct mhi_pcie_dev_info *)file->private_data)->bhi_ctxt); + struct mhi_device_ctxt *mhi_dev_ctxt = + &((struct mhi_pcie_dev_info *)file->private_data)->mhi_ctxt; + size_t amount_copied = 0; + uintptr_t align_len = 0x1000; + u32 tx_db_val = 0; + + if (buf == NULL || 0 == count) + return -EIO; + + if (count > BHI_MAX_IMAGE_SIZE) + return -ENOMEM; + + wait_event_interruptible(*mhi_dev_ctxt->bhi_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_BHI); + + mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%x\n", count); + + bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1), + GFP_KERNEL); + if (bhi_ctxt->unaligned_image_loc == NULL) + return -ENOMEM; + + mhi_log(MHI_MSG_INFO, "Unaligned Img Loc: %p\n", + bhi_ctxt->unaligned_image_loc); + bhi_ctxt->image_loc = + (void *)((uintptr_t)bhi_ctxt->unaligned_image_loc + + (align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) % + align_len))); + + mhi_log(MHI_MSG_INFO, "Aligned Img Loc: %p\n", bhi_ctxt->image_loc); + + bhi_ctxt->image_size = count; + + if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) { + ret_val = -ENOMEM; + goto bhi_copy_error; + } + amount_copied = count; + mhi_log(MHI_MSG_INFO, + "Copied image from user at addr: %p\n", bhi_ctxt->image_loc); + bhi_ctxt->phy_image_loc = dma_map_single(NULL, + bhi_ctxt->image_loc, + bhi_ctxt->image_size, + DMA_TO_DEVICE); + + if (dma_mapping_error(NULL, bhi_ctxt->phy_image_loc)) { + ret_val = -EIO; + goto bhi_copy_error; + } + mhi_log(MHI_MSG_INFO, + "Mapped image to DMA addr 0x%lx:\n", + (uintptr_t)bhi_ctxt->phy_image_loc); + + bhi_ctxt->image_size = count; + + /* Write the image size */ + pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc); + MHI_REG_WRITE_FIELD(bhi_ctxt->bhi_base, + BHI_IMGADDR_HIGH, + 0xFFFFFFFF, + 0, + pcie_word_val); + + pcie_word_val = LOW_WORD(bhi_ctxt->phy_image_loc); + + MHI_REG_WRITE_FIELD(bhi_ctxt->bhi_base, + BHI_IMGADDR_LOW, + 0xFFFFFFFF, + 0, + pcie_word_val); + + pcie_word_val = bhi_ctxt->image_size; + MHI_REG_WRITE_FIELD(bhi_ctxt->bhi_base, BHI_IMGSIZE, + 0xFFFFFFFF, 0, pcie_word_val); + + MHI_REG_READ(bhi_ctxt->bhi_base, BHI_IMGTXDB, pcie_word_val); + MHI_REG_WRITE_FIELD(bhi_ctxt->bhi_base, + BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val); + + for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) { + MHI_REG_READ(bhi_ctxt->bhi_base, BHI_STATUS, tx_db_val); + if ((0x80000000 | tx_db_val) == pcie_word_val) + break; + else + mhi_log(MHI_MSG_CRITICAL, + "BHI STATUS 0x%x\n", pcie_word_val); + usleep_range(20000, 25000); + } + dma_unmap_single(NULL, bhi_ctxt->phy_image_loc, + bhi_ctxt->image_size, DMA_TO_DEVICE); + + kfree(bhi_ctxt->unaligned_image_loc); + + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_RESET); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to start state change event\n"); + } + return amount_copied; + +bhi_copy_error: + kfree(bhi_ctxt->unaligned_image_loc); + return amount_copied; +} + +static const struct file_operations bhi_fops = { + .write = bhi_write, + .open = bhi_open, +}; + +int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device) +{ + struct bhi_ctxt_t *bhi_ctxt = &mhi_pcie_device->bhi_ctxt; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + u32 pcie_word_val = 0; + int r; + + if (NULL == mhi_pcie_device || 0 == mhi_pcie_device->core.bar0_base + || 0 == mhi_pcie_device->core.bar0_end) + return -EIO; + + ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi"); + if (IS_ERR_VALUE(ret_val)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to alloc char device %d\n", + ret_val); + return -EIO; + } + bhi_ctxt->bhi_class = class_create(THIS_MODULE, "bhi"); + if (IS_ERR(bhi_ctxt->bhi_class)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to instantiate class %d\n", + ret_val); + r = (int)bhi_ctxt->bhi_class; + goto err_class_create; + } + cdev_init(&bhi_ctxt->cdev, &bhi_fops); + bhi_ctxt->cdev.owner = THIS_MODULE; + ret_val = cdev_add(&bhi_ctxt->cdev, bhi_ctxt->bhi_dev, 1); + bhi_ctxt->dev = device_create(bhi_ctxt->bhi_class, NULL, + bhi_ctxt->bhi_dev, NULL, + "bhi"); + if (IS_ERR(bhi_ctxt->dev)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to add bhi cdev\n"); + r = (int)bhi_ctxt->dev; + goto err_dev_create; + } + + bhi_ctxt->bhi_base = mhi_pcie_device->core.bar0_base; + MHI_REG_READ(bhi_ctxt->bhi_base, BHIOFF, pcie_word_val); + bhi_ctxt->bhi_base += pcie_word_val; + + mhi_log(MHI_MSG_INFO, + "Successfully registered char dev. bhi base is: 0x%p.\n", + bhi_ctxt->bhi_base); + return 0; +err_dev_create: + cdev_del(&bhi_ctxt->cdev); + class_destroy(bhi_ctxt->bhi_class); +err_class_create: + unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1); + return r; +} diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h new file mode 100644 index 000000000000..cb5510f73e14 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_bhi.h @@ -0,0 +1,55 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _MHI_BHI_H +#define _MHI_BHI_H +#include "mhi.h" + +/* BHI Offsets */ +#define BHI_BHIVERSION_MINOR (0x00) +#define BHI_BHIVERSION_MAJOR (0x04) +#define BHI_IMGADDR_LOW (0x08) +#define BHI_IMGADDR_HIGH (0x0C) +#define BHI_IMGSIZE (0x10) +#define BHI_RSVD1 (0x14) +#define BHI_IMGTXDB (0x18) +#define BHI_RSVD2 (0x1C) +#define BHI_INTVEC (0x20) +#define BHI_RSVD3 (0x24) +#define BHI_EXECENV (0x28) +#define BHI_STATUS (0x2C) +#define BHI_ERRCODE (0x30) +#define BHI_ERRDBG1 (0x34) +#define BHI_ERRDBG2 (0x38) +#define BHI_ERRDBG3 (0x3C) +#define BHI_SERIALNUM (0x40) +#define BHI_SBLANTIROLLVER (0x44) +#define BHI_NUMSEG (0x48) +#define BHI_MSMHWID(n) (0x4C + 0x4 * (n)) +#define BHI_OEMPKHASH(n) (0x64 + 0x4 * (n)) +#define BHI_RSVD5 (0xC4) + +#define BHI_MAJOR_VERSION 0x0 +#define BHI_MINOR_VERSION 0x1 + +#define MSMHWID_NUMDWORDS 6 /* Number of dwords that make the MSMHWID */ +#define OEMPKHASH_NUMDWORDS 24 /* Number of dwords that make the OEM PK HASH */ + +#define BHI_READBUF_SIZE sizeof(bhi_info_type) + +#define BHI_MAX_IMAGE_SIZE (256 * 1024) + +#define BHI_POLL_SLEEP_TIME 1000 +#define BHI_POLL_NR_RETRIES 1 + +int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device); + +#endif diff --git a/drivers/platform/msm/mhi/mhi_hwio.h b/drivers/platform/msm/mhi/mhi_hwio.h new file mode 100644 index 000000000000..370ee0ebab7e --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_hwio.h @@ -0,0 +1,170 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MHI_HWIO_ +#define _MHI_HWIO_ +#define MHIREGLEN (0x0) +#define MHIREGLEN_MHIREGLEN_MASK 0xffffffff +#define MHIREGLEN_MHIREGLEN_SHIFT 0x0 + + +#define MHIVER (0x8) +#define MHIVER_MHIVER_MASK 0xffffffff +#define MHIVER_MHIVER_SHIFT 0x0 + + +#define MHICFG (0x10) +#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000 +#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18 +#define MHICFG_NER_MASK 0xff0000 +#define MHICFG_NER_SHIFT 0x10 +#define MHICFG_RESERVED_BITS15_8_MASK 0xff00 +#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8 +#define MHICFG_NCH_MASK 0xff +#define MHICFG_NCH_SHIFT 0x0 + + +#define CHDBOFF (0x18) +#define CHDBOFF_CHDBOFF_MASK 0xffffffff +#define CHDBOFF_CHDBOFF_SHIFT 0x0 + + +#define ERDBOFF (0x20) +#define ERDBOFF_ERDBOFF_MASK 0xffffffff +#define ERDBOFF_ERDBOFF_SHIFT 0x0 + + +#define BHIOFF (0x28) +#define BHIOFF_BHIOFF_MASK 0xffffffff +#define BHIOFF_BHIOFF_SHIFT 0x0 + + +#define DEBUGOFF (0x30) +#define DEBUGOFF_DEBUGOFF_MASK 0xffffffff +#define DEBUGOFF_DEBUGOFF_SHIFT 0x0 + + +#define MHICTRL (0x38) +#define MHICTRL_MHISTATE_MASK 0x0000FF00 +#define MHICTRL_MHISTATE_SHIFT 0x8 +#define MHICTRL_RESET_MASK 0x2 +#define MHICTRL_RESET_SHIFT 0x1 + + +#define MHISTATUS (0x48) +#define MHISTATUS_MHISTATE_MASK 0x0000ff00 +#define MHISTATUS_MHISTATE_SHIFT 0x8 +#define MHISTATUS_SYSERR_MASK 0x4 +#define MHISTATUS_SYSERR_SHIFT 0x2 +#define MHISTATUS_READY_MASK 0x1 +#define MHISTATUS_READY_SHIFT 0x0 + + +#define CCABAP_LOWER (0x58) +#define CCABAP_LOWER_CCABAP_LOWER_MASK 0xffffffff +#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0x0 + + +#define CCABAP_HIGHER (0x5c) +#define CCABAP_HIGHER_CCABAP_HIGHER_MASK 0xffffffff +#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0x0 + + +#define ECABAP_LOWER (0x60) +#define ECABAP_LOWER_ECABAP_LOWER_MASK 0xffffffff +#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0x0 + + +#define ECABAP_HIGHER (0x64) +#define ECABAP_HIGHER_ECABAP_HIGHER_MASK 0xffffffff +#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0x0 + + +#define CRCBAP_LOWER (0x68) +#define CRCBAP_LOWER_CRCBAP_LOWER_MASK 0xffffffff +#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0x0 + + +#define CRCBAP_HIGHER (0x6c) +#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK 0xffffffff +#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0x0 + + +#define CRDB_LOWER (0x70) +#define CRDB_LOWER_CRDB_LOWER_MASK 0xffffffff +#define CRDB_LOWER_CRDB_LOWER_SHIFT 0x0 + + +#define CRDB_HIGHER (0x74) +#define CRDB_HIGHER_CRDB_HIGHER_MASK 0xffffffff +#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0x0 + + +#define MHICTRLBASE_LOWER (0x80) +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK 0xffffffff +#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0x0 + + +#define MHICTRLBASE_HIGHER (0x84) +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK 0xffffffff +#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0x0 + + +#define MHICTRLLIMIT_LOWER (0x88) +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK 0xffffffff +#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0x0 + + +#define MHICTRLLIMIT_HIGHER (0x8c) +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK 0xffffffff +#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0x0 + + +#define MHIDATABASE_LOWER (0x98) +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK 0xffffffff +#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0x0 + + +#define MHIDATABASE_HIGHER (0x9c) +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK 0xffffffff +#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0x0 + + +#define MHIDATALIMIT_LOWER (0xa0) +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK 0xffffffff +#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0x0 + + +#define MHIDATALIMIT_HIGHER (0xa4) +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK 0xffffffff +#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0x0 + +#define CHDB_LOWER_n(n) (0x0400 + 0x8 * (n)) +#define CHDB_LOWER_n_CHDB_LOWER_MASK 0xffffffff +#define CHDB_LOWER_n_CHDB_LOWER_SHIFT 0x0 + + +#define CHDB_HIGHER_n(n) (0x0404 + 0x8 * (n)) +#define CHDB_HIGHER_n_CHDB_HIGHER_MASK 0xffffffff +#define CHDB_HIGHER_n_CHDB_HIGHER_SHIFT 0x0 + + +#define ERDB_LOWER_n(n) (0x0800 + 0x8 * (n)) +#define ERDB_LOWER_n_ERDB_LOWER_MASK 0xffffffff +#define ERDB_LOWER_n_ERDB_LOWER_SHIFT 0x0 + + +#define ERDB_HIGHER_n(n) (0x0804 + 0x8 * (n)) +#define ERDB_HIGHER_n_ERDB_HIGHER_MASK 0xffffffff +#define ERDB_HIGHER_n_ERDB_HIGHER_SHIFT 0x0 + +#endif diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c new file mode 100644 index 000000000000..6b064ba26a00 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_iface.c @@ -0,0 +1,297 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/pci.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/msm-bus.h> +#include <linux/delay.h> +#include <linux/debugfs.h> + +#include "mhi_sys.h" +#include "mhi.h" +#include "mhi_macros.h" +#include "mhi_hwio.h" +#include "mhi_bhi.h" + +struct mhi_pcie_devices mhi_devices; + +static int mhi_pci_probe(struct pci_dev *pcie_device, + const struct pci_device_id *mhi_device_id); +static int __exit mhi_plat_remove(struct platform_device *pdev); +void *mhi_ipc_log; + +static DEFINE_PCI_DEVICE_TABLE(mhi_pcie_device_id) = { + { MHI_PCIE_VENDOR_ID, MHI_PCIE_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { 0, }, +}; + +static const struct of_device_id mhi_plat_match[] = { + { + .compatible = "qcom,mhi", + }, + {}, +}; + +static void mhi_msm_fixup(struct pci_dev *pcie_device) +{ + if (pcie_device->class == PCI_CLASS_NOT_DEFINED) { + mhi_log(MHI_MSG_INFO, "Setting msm pcie class\n"); + pcie_device->class = PCI_CLASS_STORAGE_SCSI; + } +} + +int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev) +{ + int ret_val = 0; + u32 i = 0; + u32 retry_count = 0; + struct pci_dev *pcie_device = NULL; + + if (NULL == mhi_pcie_dev) + return -EINVAL; + pcie_device = mhi_pcie_dev->pcie_device; + + ret_val = mhi_init_pcie_device(mhi_pcie_dev); + if (0 != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to initialize pcie device, ret %d\n", + ret_val); + return -ENODEV; + } + ret_val = mhi_init_device_ctxt(mhi_pcie_dev, + &mhi_pcie_dev->mhi_ctxt); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to initialize main MHI ctxt ret %d\n", + ret_val); + goto msi_config_err; + } + ret_val = mhi_esoc_register(&mhi_pcie_dev->mhi_ctxt); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to register with esoc ret %d.\n", + ret_val); + } + mhi_pcie_dev->mhi_ctxt.bus_scale_table = + msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev); + mhi_pcie_dev->mhi_ctxt.bus_client = + msm_bus_scale_register_client( + mhi_pcie_dev->mhi_ctxt.bus_scale_table); + if (!mhi_pcie_dev->mhi_ctxt.bus_client) { + mhi_log(MHI_MSG_CRITICAL, + "Could not register for bus control ret: %d.\n", + mhi_pcie_dev->mhi_ctxt.bus_client); + } else { + ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1); + if (ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Could not set bus frequency ret: %d\n", + ret_val); + } + + device_disable_async_suspend(&pcie_device->dev); + ret_val = pci_enable_msi_block(pcie_device, MAX_NR_MSI + 1); + if (0 != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to enable MSIs for pcie dev ret_val %d.\n", + ret_val); + goto msi_config_err; + } + for (i = 0; i < MAX_NR_MSI; ++i) { + ret_val = request_irq(pcie_device->irq + i, + mhi_msi_handlr, + IRQF_NO_SUSPEND, + "mhi_drv", + (void *)&pcie_device->dev); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to register handler for MSI.\n"); + goto msi_config_err; + } + } + mhi_pcie_dev->core.irq_base = pcie_device->irq; + mhi_log(MHI_MSG_VERBOSE, + "Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base); + mhi_pcie_dev->core.max_nr_msis = MAX_NR_MSI; + do { + ret_val = mhi_init_gpios(mhi_pcie_dev); + switch (ret_val) { + case -EPROBE_DEFER: + mhi_log(MHI_MSG_VERBOSE, + "DT requested probe defer, wait and retry\n"); + break; + case 0: + break; + default: + mhi_log(MHI_MSG_CRITICAL, + "Could not get gpio from struct device tree!\n"); + goto msi_config_err; + } + retry_count++; + } while ((retry_count < DT_WAIT_RETRIES) && (ret_val == -EPROBE_DEFER)); + ret_val = mhi_init_pm_sysfs(&pcie_device->dev); + if (ret_val != 0) { + mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs.\n"); + goto sysfs_config_err; + } + if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt)) + mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n"); + + mhi_pcie_dev->mhi_ctxt.mmio_addr = mhi_pcie_dev->core.bar0_base; + pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt; + if (mhi_pcie_dev->mhi_ctxt.base_state == STATE_TRANSITION_BHI) { + ret_val = bhi_probe(mhi_pcie_dev); + if (ret_val) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n"); + goto mhi_state_transition_error; + } + } + if (MHI_STATUS_SUCCESS != mhi_reg_notifiers(&mhi_pcie_dev->mhi_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to register for notifiers\n"); + return MHI_STATUS_ERROR; + } + mhi_log(MHI_MSG_INFO, + "Finished all driver probing returning ret_val %d.\n", + ret_val); + return ret_val; + +mhi_state_transition_error: + if (MHI_STATUS_SUCCESS != mhi_clean_init_stage(&mhi_pcie_dev->mhi_ctxt, + MHI_INIT_ERROR_STAGE_UNWIND_ALL)) + mhi_log(MHI_MSG_ERROR, "Could not clean up context\n"); + mhi_rem_pm_sysfs(&pcie_device->dev); +sysfs_config_err: + gpio_free(mhi_pcie_dev->core.device_wake_gpio); + for (; i >= 0; --i) + free_irq(pcie_device->irq + i, &pcie_device->dev); + debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder); +msi_config_err: + pci_disable_msi(pcie_device); + pci_disable_device(pcie_device); + return ret_val; +} + +static struct pci_driver mhi_pcie_driver = { + .name = "mhi_pcie_drv", + .id_table = mhi_pcie_device_id, + .probe = mhi_pci_probe, + .suspend = mhi_pci_suspend, + .resume = mhi_pci_resume, +}; + +static int mhi_pci_probe(struct pci_dev *pcie_device, + const struct pci_device_id *mhi_device_id) +{ + int ret_val = 0; + struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; + struct platform_device *plat_dev; + u32 nr_dev = mhi_devices.nr_of_devices; + + mhi_log(MHI_MSG_INFO, "Entering.\n"); + mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; + if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) { + mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n"); + return -ENOMEM; + } + + mhi_devices.nr_of_devices++; + plat_dev = mhi_devices.device_list[nr_dev].plat_dev; + pcie_device->dev.of_node = plat_dev->dev.of_node; + mhi_pcie_dev->pcie_device = pcie_device; + mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver; + mhi_pcie_dev->mhi_pci_link_event.events = + (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP | + MSM_PCIE_EVENT_WAKEUP); + mhi_pcie_dev->mhi_pci_link_event.user = pcie_device; + mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb; + mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev; + ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event); + if (ret_val) + mhi_log(MHI_MSG_ERROR, + "Failed to register for link notifications %d.\n", + ret_val); + return ret_val; +} + +static int mhi_plat_probe(struct platform_device *pdev) +{ + u32 nr_dev = mhi_devices.nr_of_devices; + mhi_log(MHI_MSG_INFO, "Entered\n"); + mhi_devices.device_list[nr_dev].plat_dev = pdev; + mhi_log(MHI_MSG_INFO, "Exited\n"); + return 0; +} + +static struct platform_driver mhi_plat_driver = { + .probe = mhi_plat_probe, + .remove = mhi_plat_remove, + .driver = { + .name = "mhi", + .owner = THIS_MODULE, + .of_match_table = mhi_plat_match, + }, +}; + +static void __exit mhi_exit(void) +{ + ipc_log_context_destroy(mhi_ipc_log); + pci_unregister_driver(&mhi_pcie_driver); + platform_driver_unregister(&mhi_plat_driver); +} + +static int __exit mhi_plat_remove(struct platform_device *pdev) +{ + platform_driver_unregister(&mhi_plat_driver); + return 0; +} + +static int __init mhi_init(void) +{ + int r; + + mhi_log(MHI_MSG_INFO, "Entered\n"); + r = platform_driver_register(&mhi_plat_driver); + if (r) { + mhi_log(MHI_MSG_INFO, "Failed to probe platform ret %d\n", r); + return r; + } + r = pci_register_driver(&mhi_pcie_driver); + if (r) { + mhi_log(MHI_MSG_INFO, + "Failed to register pcie drv ret %d\n", r); + goto error; + } + mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0); + if (!mhi_ipc_log) { + mhi_log(MHI_MSG_ERROR, + "Failed to create IPC logging context\n"); + } + mhi_log(MHI_MSG_INFO, "Exited\n"); + return 0; +error: + pci_unregister_driver(&mhi_pcie_driver); + return r; +} + +DECLARE_PCI_FIXUP_HEADER(MHI_PCIE_VENDOR_ID, + MHI_PCIE_DEVICE_ID, + mhi_msm_fixup); + +module_exit(mhi_exit); +module_init(mhi_init); + +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("MHI_CORE"); +MODULE_DESCRIPTION("MHI Host Driver"); diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c new file mode 100644 index 000000000000..30527d48c9e2 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_init.c @@ -0,0 +1,695 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mhi_sys.h" +#include "mhi.h" +#include "mhi_hwio.h" + +#include <linux/hrtimer.h> +#include <linux/cpu.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/completion.h> + +static enum MHI_STATUS mhi_create_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int i; + if (NULL == mhi_dev_ctxt) + return MHI_STATUS_ALLOC_ERROR; + mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; + mhi_dev_ctxt->nr_of_cc = MHI_MAX_CHANNELS; + mhi_dev_ctxt->nr_of_ec = EVENT_RINGS_ALLOCATED; + mhi_dev_ctxt->nr_of_cmdc = NR_OF_CMD_RINGS; + + mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING] = 0; + mhi_dev_ctxt->alloced_ev_rings[SOFTWARE_EV_RING] = + SOFTWARE_EV_RING; + mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING] = + MHI_CLIENT_IP_HW_0_OUT; + mhi_dev_ctxt->alloced_ev_rings[IPA_IN_EV_RING] = + MHI_CLIENT_IP_HW_0_IN; + MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING, + mhi_dev_ctxt->ev_ring_props[PRIMARY_EVENT_RING], + MHI_EVENT_POLLING_ENABLED); + MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING, + mhi_dev_ctxt->ev_ring_props[SOFTWARE_EV_RING], + MHI_EVENT_POLLING_ENABLED); + MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING, + mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], + MHI_EVENT_POLLING_ENABLED); + MHI_SET_EVENT_RING_INFO(EVENT_RING_POLLING, + mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], + MHI_EVENT_POLLING_DISABLED); + + for (i = 0; i < MAX_NR_MSI; ++i) { + MHI_SET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, + mhi_dev_ctxt->ev_ring_props[i], + i); + } + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_clean_init_stage(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_INIT_ERROR_STAGE cleanup_stage) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + switch (cleanup_stage) { + case MHI_INIT_ERROR_STAGE_UNWIND_ALL: + case MHI_INIT_ERROR_TIMERS: + case MHI_INIT_ERROR_STAGE_DEVICE_CTRL: + mhi_freememregion(mhi_dev_ctxt->mhi_ctrl_seg_info); + case MHI_INIT_ERROR_STAGE_THREAD_QUEUES: + case MHI_INIT_ERROR_STAGE_THREADS: + kfree(mhi_dev_ctxt->event_handle); + kfree(mhi_dev_ctxt->state_change_event_handle); + kfree(mhi_dev_ctxt->M0_event); + case MHI_INIT_ERROR_STAGE_EVENTS: + kfree(mhi_dev_ctxt->mhi_ctrl_seg_info); + case MHI_INIT_ERROR_STAGE_MEM_ZONES: + kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); + kfree(mhi_dev_ctxt->mhi_chan_mutex); + kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); + case MHI_INIT_ERROR_STAGE_SYNC: + kfree(mhi_dev_ctxt); + break; + default: + ret_val = MHI_STATUS_ERROR; + break; + } + return ret_val; +} + +static enum MHI_STATUS mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 i = 0; + + mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) * + MHI_MAX_CHANNELS, + GFP_KERNEL); + if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list) + goto ev_mutex_free; + mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) * + MHI_MAX_CHANNELS, GFP_KERNEL); + if (NULL == mhi_dev_ctxt->mhi_chan_mutex) + goto chan_mutex_free; + mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) * + NR_OF_CMD_RINGS, GFP_KERNEL); + if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list) + goto cmd_mutex_free; + + mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) * + MHI_MAX_CHANNELS, GFP_KERNEL); + if (NULL == mhi_dev_ctxt->db_write_lock) + goto db_write_lock_free; + for (i = 0; i < mhi_dev_ctxt->nr_of_cc; ++i) + mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]); + for (i = 0; i < MHI_MAX_CHANNELS; ++i) + spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]); + for (i = 0; i < mhi_dev_ctxt->nr_of_cmdc; ++i) + mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]); + for (i = 0; i < MHI_MAX_CHANNELS; ++i) + spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]); + rwlock_init(&mhi_dev_ctxt->xfer_lock); + mutex_init(&mhi_dev_ctxt->mhi_link_state); + mutex_init(&mhi_dev_ctxt->pm_lock); + return MHI_STATUS_SUCCESS; + +db_write_lock_free: + kfree(mhi_dev_ctxt->mhi_cmd_mutex_list); +cmd_mutex_free: + kfree(mhi_dev_ctxt->mhi_chan_mutex); +chan_mutex_free: + kfree(mhi_dev_ctxt->mhi_ev_spinlock_list); +ev_mutex_free: + return MHI_STATUS_ALLOC_ERROR; +} + +static enum MHI_STATUS mhi_init_ctrl_zone(struct mhi_pcie_dev_info *dev_info, + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + mhi_dev_ctxt->mhi_ctrl_seg_info = kmalloc(sizeof(struct mhi_meminfo), + GFP_KERNEL); + if (NULL == mhi_dev_ctxt->mhi_ctrl_seg_info) + return MHI_STATUS_ALLOC_ERROR; + mhi_dev_ctxt->mhi_ctrl_seg_info->dev = &dev_info->pcie_device->dev; + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + + mhi_dev_ctxt->event_handle = kmalloc(sizeof(wait_queue_head_t), + GFP_KERNEL); + if (NULL == mhi_dev_ctxt->event_handle) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + return MHI_STATUS_ERROR; + } + mhi_dev_ctxt->state_change_event_handle = + kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (NULL == mhi_dev_ctxt->state_change_event_handle) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + goto error_event_handle_alloc; + } + /* Initialize the event which signals M0 */ + mhi_dev_ctxt->M0_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (NULL == mhi_dev_ctxt->M0_event) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + goto error_state_change_event_handle; + } + /* Initialize the event which signals M0 */ + mhi_dev_ctxt->M3_event = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (NULL == mhi_dev_ctxt->M3_event) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + goto error_M0_event; + } + /* Initialize the event which signals M0 */ + mhi_dev_ctxt->bhi_event = kmalloc(sizeof(wait_queue_head_t), + GFP_KERNEL); + if (NULL == mhi_dev_ctxt->bhi_event) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + goto error_bhi_event; + } + mhi_dev_ctxt->chan_start_complete = + kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (NULL == mhi_dev_ctxt->chan_start_complete) { + mhi_log(MHI_MSG_ERROR, "Failed to init event"); + goto error_chan_complete; + } + /* Initialize the event which starts the event parsing thread */ + init_waitqueue_head(mhi_dev_ctxt->event_handle); + /* Initialize the event which starts the state change thread */ + init_waitqueue_head(mhi_dev_ctxt->state_change_event_handle); + /* Initialize the event which triggers clients waiting to send */ + init_waitqueue_head(mhi_dev_ctxt->M0_event); + /* Initialize the event which triggers D3hot */ + init_waitqueue_head(mhi_dev_ctxt->M3_event); + init_waitqueue_head(mhi_dev_ctxt->bhi_event); + init_waitqueue_head(mhi_dev_ctxt->chan_start_complete); + + return MHI_STATUS_SUCCESS; +error_chan_complete: + kfree(mhi_dev_ctxt->bhi_event); +error_bhi_event: + kfree(mhi_dev_ctxt->M3_event); +error_M0_event: + kfree(mhi_dev_ctxt->M0_event); +error_state_change_event_handle: + kfree(mhi_dev_ctxt->state_change_event_handle); +error_event_handle_alloc: + kfree(mhi_dev_ctxt->event_handle); + return MHI_STATUS_ERROR; +} + +static enum MHI_STATUS mhi_init_state_change_thread_work_queue( + struct mhi_state_work_queue *q) +{ + bool lock_acquired = 0; + unsigned long flags; + + if (NULL == q->q_lock) { + q->q_lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); + if (NULL == q->q_lock) + return MHI_STATUS_ALLOC_ERROR; + spin_lock_init(q->q_lock); + } else { + spin_lock_irqsave(q->q_lock, flags); + lock_acquired = 1; + } + q->queue_full_cntr = 0; + q->q_info.base = q->buf; + q->q_info.rp = q->buf; + q->q_info.wp = q->buf; + q->q_info.len = MHI_WORK_Q_MAX_SIZE * sizeof(enum STATE_TRANSITION); + q->q_info.el_size = sizeof(enum STATE_TRANSITION); + q->q_info.overwrite_en = 0; + if (lock_acquired) + spin_unlock_irqrestore(q->q_lock, flags); + + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_device_ctrl(struct mhi_device_ctxt + *mhi_dev_ctxt) +{ + size_t ctrl_seg_size = 0; + size_t ctrl_seg_offset = 0; + u32 i = 0; + u32 align_len = sizeof(u64)*2; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + + mhi_dev_ctxt->enable_lpm = 1; + if (NULL == mhi_dev_ctxt || NULL == mhi_dev_ctxt->mhi_ctrl_seg_info || + NULL == mhi_dev_ctxt->mhi_ctrl_seg_info->dev) + return MHI_STATUS_ERROR; + + mhi_log(MHI_MSG_INFO, "Allocating control segment.\n"); + ctrl_seg_size += sizeof(struct mhi_control_seg); + /* Calculate the size of the control segment needed */ + ctrl_seg_size += align_len - (ctrl_seg_size % align_len); + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + if (IS_HARDWARE_CHANNEL(i)) + ctrl_seg_size += sizeof(union mhi_xfer_pkt) * + (MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP); + else if (IS_SOFTWARE_CHANNEL(i)) + ctrl_seg_size += sizeof(union mhi_xfer_pkt) * + (MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP); + } + ctrl_seg_size += align_len - (ctrl_seg_size % align_len); + + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) + ctrl_seg_size += sizeof(union mhi_event_pkt)* + (EV_EL_PER_RING + ELEMENT_GAP); + + ctrl_seg_size += align_len - (ctrl_seg_size % align_len); + ret_val = mhi_mallocmemregion(mhi_dev_ctxt->mhi_ctrl_seg_info, + ctrl_seg_size); + if (MHI_STATUS_SUCCESS != ret_val) + return MHI_STATUS_ERROR; + (mhi_dev_ctxt->mhi_ctrl_seg = + mhi_get_virt_addr(mhi_dev_ctxt->mhi_ctrl_seg_info)); + + if (0 == mhi_dev_ctxt->mhi_ctrl_seg) + return MHI_STATUS_ALLOC_ERROR; + + /* Set the channel contexts, event contexts and cmd context */ + ctrl_seg_offset = (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg + + sizeof(struct mhi_control_seg); + ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); + /* Set the TRB lists */ + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + if (IS_HARDWARE_CHANNEL(i)) { + mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] = + (union mhi_xfer_pkt *)ctrl_seg_offset; + ctrl_seg_offset += sizeof(union mhi_xfer_pkt) * + (MAX_NR_TRBS_PER_HARD_CHAN + ELEMENT_GAP); + + } else if (IS_SOFTWARE_CHANNEL(i)) { + mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i] = + (union mhi_xfer_pkt *)ctrl_seg_offset; + ctrl_seg_offset += sizeof(union mhi_xfer_pkt) * + (MAX_NR_TRBS_PER_SOFT_CHAN + ELEMENT_GAP); + } + } + + ctrl_seg_offset += align_len - (ctrl_seg_offset % align_len); + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { + mhi_dev_ctxt->mhi_ctrl_seg->ev_trb_list[i] = + (union mhi_event_pkt *)ctrl_seg_offset; + ctrl_seg_offset += sizeof(union mhi_event_pkt) * + (EV_EL_PER_RING + ELEMENT_GAP); + } + return MHI_STATUS_SUCCESS; +} +/** + * mhi_event_ring_init - Initialize an event ring + * + * @ev_list: Event ring context to initialize + * @trb_list_phy_addr: Pointer to phy mem to the tre list for event ring + * @trb_list_virt_addr: Pointer to virt mem to the tre list for event ring + * @el_per_ring: Number of event ring elements in this ring + * @ring: Pointer to the shadow context of this event ring + * + * @Return MHI_STATUS + */ +static enum MHI_STATUS mhi_event_ring_init(struct mhi_event_ctxt *ev_list, + uintptr_t trb_list_phy_addr, uintptr_t trb_list_virt_addr, + size_t el_per_ring, struct mhi_ring *ring, + u32 intmodt_val, u32 msi_vec) +{ + ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID; + ev_list->mhi_msi_vector = msi_vec; + ev_list->mhi_event_ring_base_addr = trb_list_phy_addr; + ev_list->mhi_event_ring_len = el_per_ring*sizeof(union mhi_event_pkt); + ev_list->mhi_event_read_ptr = trb_list_phy_addr; + ev_list->mhi_event_write_ptr = trb_list_phy_addr; + MHI_SET_EV_CTXT(EVENT_CTXT_INTMODT, ev_list, intmodt_val); + ring->wp = (void *)(uintptr_t)trb_list_virt_addr; + ring->rp = (void *)(uintptr_t)trb_list_virt_addr; + ring->base = (void *)(uintptr_t)(trb_list_virt_addr); + ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt)); + ring->el_size = sizeof(union mhi_event_pkt); + ring->overwrite_en = 0; + return MHI_STATUS_SUCCESS; +} +/** + * mhi_cmd_ring_init- Initialization of the command ring + * + * @cmd_ctxt: command ring context to initialize + * @trb_list_phy_addr: Pointer to the pysical address of the tre ring + * @trb_list_virt_addr: Pointer to the virtual address of the tre ring + * @el_per_ring: Number of elements in this command ring + * @ring: Pointer to the shadow command context + * + * @Return MHI_STATUS + */ +static enum MHI_STATUS mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt, + uintptr_t trb_list_phy_addr, + uintptr_t trb_list_virt_addr, + size_t el_per_ring, struct mhi_ring *ring) +{ + cmd_ctxt->mhi_cmd_ring_base_addr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_read_ptr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_write_ptr = trb_list_phy_addr; + cmd_ctxt->mhi_cmd_ring_len = + (size_t)el_per_ring*sizeof(union mhi_cmd_pkt); + ring[PRIMARY_CMD_RING].wp = (void *)trb_list_virt_addr; + ring[PRIMARY_CMD_RING].rp = (void *)trb_list_virt_addr; + ring[PRIMARY_CMD_RING].base = (void *)trb_list_virt_addr; + ring[PRIMARY_CMD_RING].len = + (size_t)el_per_ring*sizeof(union mhi_cmd_pkt); + ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt); + ring[PRIMARY_CMD_RING].overwrite_en = 0; + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_timers(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + hrtimer_init(&mhi_dev_ctxt->m1_timer, + CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + mhi_dev_ctxt->m1_timeout = + ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L); + mhi_dev_ctxt->m1_timer.function = mhi_initiate_m1; + mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, + "Starting M1 timer\n"); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source"); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_contexts(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 i = 0; + struct mhi_control_seg *mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; + struct mhi_event_ctxt *event_ctxt = NULL; + u32 event_ring_index = 0; + union mhi_xfer_pkt *trb_list = NULL; + struct mhi_chan_ctxt *chan_ctxt = NULL; + struct mhi_ring *local_event_ctxt = NULL; + u32 msi_vec = 0; + u32 intmod_t = 0; + uintptr_t ev_ring_addr; + + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { + MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, + mhi_dev_ctxt->ev_ring_props[i], + msi_vec); + switch (i) { + case IPA_OUT_EV_RING: + intmod_t = 10; + break; + case IPA_IN_EV_RING: + intmod_t = 6; + break; + } + event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; + event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; + local_event_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; + + ev_ring_addr = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_ctrl->ev_trb_list[i]); + mhi_log(MHI_MSG_VERBOSE, + "Setting msi_vec 0x%x, for ev ring ctxt 0x%x\n", + msi_vec, event_ring_index); + mhi_event_ring_init(event_ctxt, ev_ring_addr, + (uintptr_t)mhi_ctrl->ev_trb_list[i], + EV_EL_PER_RING, local_event_ctxt, + intmod_t, msi_vec); + } + + /* Init Command Ring */ + mhi_cmd_ring_init(&mhi_ctrl->mhi_cmd_ctxt_list[PRIMARY_CMD_RING], + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING]), + (uintptr_t)mhi_ctrl->cmd_trb_list[PRIMARY_CMD_RING], + CMD_EL_PER_RING, + &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]); + + mhi_log(MHI_MSG_INFO, "Initializing contexts\n"); + /* Initialize Channel Contexts */ + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + trb_list = mhi_dev_ctxt->mhi_ctrl_seg->xfer_trb_list[i]; + chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; + if (IS_SOFTWARE_CHANNEL(i)) { + mhi_init_chan_ctxt(chan_ctxt, + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)trb_list), + (uintptr_t)trb_list, + MAX_NR_TRBS_PER_SOFT_CHAN, + (i % 2) ? MHI_IN : MHI_OUT, + 0, + &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); + } else if (IS_HARDWARE_CHANNEL(i)) { + mhi_init_chan_ctxt(chan_ctxt, + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)trb_list), + (uintptr_t)trb_list, + MAX_NR_TRBS_PER_HARD_CHAN, + (i % 2) ? MHI_IN : MHI_OUT, + i, + &mhi_dev_ctxt->mhi_local_chan_ctxt[i]); + } + } + mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; + + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_init_work_queues( + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + mhi_dev_ctxt->work_queue = create_singlethread_workqueue("mhi"); + if (NULL == mhi_dev_ctxt->work_queue) { + mhi_log(MHI_MSG_CRITICAL | MHI_DBG_POWER, + "Failed to create MHI work queue.\n"); + return MHI_STATUS_ERROR; + } + INIT_DELAYED_WORK(&mhi_dev_ctxt->m3_work, delayed_m3); + INIT_WORK(&mhi_dev_ctxt->m0_work, m0_work); + return MHI_STATUS_SUCCESS; +} + +/** + * @brief Spawn all the MHI threads + * + * @param mhi_dev_ctxt mhi mhi_dev_ctxt context + * + * @return MHI_STATUS + * + */ +static enum MHI_STATUS mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread, + mhi_dev_ctxt, + "mhi_ev_thrd"); + if (-ENOMEM == (int)mhi_dev_ctxt->event_thread_handle) + return MHI_STATUS_ERROR; + mhi_dev_ctxt->st_thread_handle = kthread_run(mhi_state_change_thread, + mhi_dev_ctxt, + "mhi_st_thrd"); + if (-ENOMEM == (int)mhi_dev_ctxt->event_thread_handle) + return MHI_STATUS_ERROR; + return MHI_STATUS_SUCCESS; +} + +/** + * @brief Main initialization function for a mhi struct device context + * All threads, events mutexes, mhi specific data structures + * are initialized here + * + * @param dev_info [IN ] pcie struct device information structure to + which this mhi context belongs + * @param mhi_struct device [IN/OUT] reference to a mhi context to be populated + * + * @return MHI_STATUS + */ +enum MHI_STATUS mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info, + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + if (NULL == dev_info || NULL == mhi_dev_ctxt) + return MHI_STATUS_ERROR; + mhi_log(MHI_MSG_VERBOSE, "mhi_init_device_ctxt>Init MHI dev ctxt\n"); + + if (MHI_STATUS_SUCCESS != mhi_create_ctxt(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi dev ctxt\n"); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_sync(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n"); + mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_SYNC); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_ctrl_zone(dev_info, mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize memory zones\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_MEM_ZONES); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_events(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi events\n"); + mhi_clean_init_stage(mhi_dev_ctxt, MHI_INIT_ERROR_STAGE_EVENTS); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_reset_all_thread_queues(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize work queues\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_THREAD_QUEUES); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_device_ctrl(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize ctrl seg\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_THREAD_QUEUES); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_contexts(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed initializing contexts\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_DEVICE_CTRL); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_spawn_threads(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to spawn threads\n"); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_timers(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed initializing timers\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_DEVICE_CTRL); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_wakelock(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, "Failed to initialize wakelock\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_DEVICE_CTRL); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != mhi_init_work_queues(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, + "Failed initializing work queues\n"); + mhi_clean_init_stage(mhi_dev_ctxt, + MHI_INIT_ERROR_STAGE_DEVICE_CTRL); + return MHI_STATUS_ERROR; + } + mhi_dev_ctxt->dev_info = dev_info; + mhi_dev_ctxt->dev_props = &dev_info->core; + + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_init_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt, + u32 nr_ev_el, u32 event_ring_index) +{ + union mhi_event_pkt *ev_pkt = NULL; + u32 i = 0; + unsigned long flags = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + spinlock_t *lock = + &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index]; + struct mhi_ring *event_ctxt = NULL; + event_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; + + if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) { + mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n"); + return MHI_STATUS_ERROR; + } + + spin_lock_irqsave(lock, flags); + + mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n", + mhi_dev_ctxt->mmio_addr, mhi_dev_ctxt->mmio_len); + mhi_log(MHI_MSG_INFO, + "Initializing event ring %d\n", event_ring_index); + + for (i = 0; i < nr_ev_el - 1; ++i) { + ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to insert el in ev ctxt\n"); + ret_val = MHI_STATUS_ERROR; + break; + } + } + + spin_unlock_irqrestore(lock, flags); + return ret_val; +} + +/** + * @brief Initialize the channel context and shadow context + * + * @cc_list: Context to initialize + * @trb_list_phy: Physical base address for the TRE ring + * @trb_list_virt: Virtual base address for the TRE ring + * @el_per_ring: Number of TREs this ring will contain + * @chan_type: Type of channel IN/OUT + * @event_ring: Event ring to be mapped to this channel context + * @ring: Shadow context to be initialized alongside + * + * @Return MHI_STATUS + */ +enum MHI_STATUS mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list, + uintptr_t trb_list_phy, uintptr_t trb_list_virt, + u64 el_per_ring, enum MHI_CHAN_TYPE chan_type, + u32 event_ring, struct mhi_ring *ring) +{ + cc_list->mhi_chan_state = MHI_CHAN_STATE_DISABLED; + cc_list->mhi_chan_type = chan_type; + cc_list->mhi_event_ring_index = event_ring; + cc_list->mhi_trb_ring_base_addr = trb_list_phy; + cc_list->mhi_trb_ring_len = + ((size_t)(el_per_ring)*sizeof(struct mhi_tx_pkt)); + cc_list->mhi_trb_read_ptr = trb_list_phy; + cc_list->mhi_trb_write_ptr = trb_list_phy; + ring->rp = (void *)(trb_list_virt); + ring->ack_rp = ring->rp; + ring->wp = (void *)(trb_list_virt); + ring->base = (void *)(trb_list_virt); + ring->len = ((size_t)(el_per_ring)*sizeof(struct mhi_tx_pkt)); + ring->el_size = sizeof(struct mhi_tx_pkt); + ring->overwrite_en = 0; + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_reset_all_thread_queues( + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + + mhi_init_state_change_thread_work_queue( + &mhi_dev_ctxt->state_change_work_item_list); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n"); + return ret_val; + } + return ret_val; +} + +enum MHI_STATUS mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 ret_val; + if (NULL == mhi_dev_ctxt) + return MHI_STATUS_ERROR; + mhi_dev_ctxt->mhi_cpu_notifier.notifier_call = mhi_cpu_notifier_cb; + ret_val = register_cpu_notifier(&mhi_dev_ctxt->mhi_cpu_notifier); + if (ret_val) + return MHI_STATUS_ERROR; + else + return MHI_STATUS_SUCCESS; +} + diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c new file mode 100644 index 000000000000..90572210ef20 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_isr.c @@ -0,0 +1,244 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/interrupt.h> + +#include "mhi_sys.h" + + +irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id) +{ + struct device *mhi_device = dev_id; + u32 client_index; + struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data; + struct mhi_client_handle *client_handle; + struct mhi_client_info_t *client_info; + struct mhi_cb_info cb_info; + + if (NULL == mhi_dev_ctxt) { + mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n"); + return IRQ_HANDLED; + } + mhi_dev_ctxt->msi_counter[IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++; + mhi_log(MHI_MSG_VERBOSE, + "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number)); + switch (IRQ_TO_MSI(mhi_dev_ctxt, irq_number)) { + case 0: + case 1: + case 2: + atomic_inc(&mhi_dev_ctxt->flags.events_pending); + wake_up_interruptible(mhi_dev_ctxt->event_handle); + break; + case 3: + client_index = + mhi_dev_ctxt->alloced_ev_rings[IPA_IN_EV_RING]; + client_handle = mhi_dev_ctxt->client_handle_list[client_index]; + client_info = &client_handle->client_info; + + if (likely(NULL != client_handle)) { + client_handle->result.user_data = + client_handle->user_data; + if (likely(NULL != &client_info->mhi_client_cb)) { + cb_info.result = &client_handle->result; + cb_info.cb_reason = MHI_CB_XFER; + cb_info.chan = client_handle->chan; + cb_info.result->transaction_status = + MHI_STATUS_SUCCESS; + client_info->mhi_client_cb(&cb_info); + } + } + break; + } + return IRQ_HANDLED; +} + +static enum MHI_STATUS mhi_process_event_ring( + struct mhi_device_ctxt *mhi_dev_ctxt, + u32 ev_index, + u32 event_quota) +{ + union mhi_event_pkt *local_rp = NULL; + union mhi_event_pkt *device_rp = NULL; + union mhi_event_pkt event_to_process; + struct mhi_event_ctxt *ev_ctxt = NULL; + struct mhi_ring *local_ev_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[ev_index]; + + ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[ev_index]; + + device_rp = + (union mhi_event_pkt *)mhi_p2v_addr( + mhi_dev_ctxt->mhi_ctrl_seg_info, + ev_ctxt->mhi_event_read_ptr); + local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp; + + + if (unlikely(MHI_STATUS_SUCCESS != validate_ev_el_addr(local_ev_ctxt, + (uintptr_t)device_rp))) + mhi_log(MHI_MSG_ERROR, + "Failed to validate event ring element 0x%p\n", + device_rp); + + while ((local_rp != device_rp) && (event_quota > 0) && + (device_rp != NULL) && (local_rp != NULL)) { + event_to_process = *local_rp; + if (unlikely(MHI_STATUS_SUCCESS != + recycle_trb_and_ring(mhi_dev_ctxt, + local_ev_ctxt, + MHI_RING_TYPE_EVENT_RING, + ev_index))) + mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n"); + switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) { + case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: + mhi_log(MHI_MSG_INFO, + "MHI CCE received ring 0x%x\n", + ev_index); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + __pm_relax(&mhi_dev_ctxt->w_lock); + parse_cmd_event(mhi_dev_ctxt, + &event_to_process); + break; + case MHI_PKT_TYPE_TX_EVENT: + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + parse_xfer_event(mhi_dev_ctxt, &event_to_process); + __pm_relax(&mhi_dev_ctxt->w_lock); + break; + case MHI_PKT_TYPE_STATE_CHANGE_EVENT: + { + enum STATE_TRANSITION new_state; + new_state = MHI_READ_STATE(&event_to_process); + mhi_log(MHI_MSG_INFO, + "MHI STE received ring 0x%x\n", + ev_index); + mhi_init_state_transition(mhi_dev_ctxt, new_state); + break; + } + case MHI_PKT_TYPE_EE_EVENT: + { + enum STATE_TRANSITION new_state; + mhi_log(MHI_MSG_INFO, + "MHI EEE received ring 0x%x\n", + ev_index); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + __pm_relax(&mhi_dev_ctxt->w_lock); + switch (MHI_READ_EXEC_ENV(&event_to_process)) { + case MHI_EXEC_ENV_SBL: + new_state = STATE_TRANSITION_SBL; + mhi_init_state_transition(mhi_dev_ctxt, + new_state); + break; + case MHI_EXEC_ENV_AMSS: + new_state = STATE_TRANSITION_AMSS; + mhi_init_state_transition(mhi_dev_ctxt, + new_state); + break; + } + break; + } + default: + mhi_log(MHI_MSG_ERROR, + "Unsupported packet type code 0x%x\n", + MHI_TRB_READ_INFO(EV_TRB_TYPE, + &event_to_process)); + break; + } + local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp; + device_rp = (union mhi_event_pkt *)mhi_p2v_addr( + mhi_dev_ctxt->mhi_ctrl_seg_info, + (u64)ev_ctxt->mhi_event_read_ptr); + --event_quota; + } + return MHI_STATUS_SUCCESS; +} + +int parse_event_thread(void *ctxt) +{ + struct mhi_device_ctxt *mhi_dev_ctxt = ctxt; + u32 i = 0; + u32 ev_poll_en = 0; + int ret_val = 0; + + /* Go through all event rings */ + for (;;) { + ret_val = + wait_event_interruptible(*mhi_dev_ctxt->event_handle, + ((atomic_read( + &mhi_dev_ctxt->flags.events_pending) > 0) && + !mhi_dev_ctxt->flags.stop_threads) || + mhi_dev_ctxt->flags.kill_threads || + (mhi_dev_ctxt->flags.stop_threads && + !mhi_dev_ctxt->ev_thread_stopped)); + + switch (ret_val) { + case -ERESTARTSYS: + return 0; + break; + default: + if (mhi_dev_ctxt->flags.kill_threads) { + mhi_log(MHI_MSG_INFO, + "Caught exit signal, quitting\n"); + return 0; + } + if (mhi_dev_ctxt->flags.stop_threads) { + mhi_dev_ctxt->ev_thread_stopped = 1; + continue; + } + break; + } + mhi_dev_ctxt->ev_thread_stopped = 0; + atomic_dec(&mhi_dev_ctxt->flags.events_pending); + + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { + MHI_GET_EVENT_RING_INFO(EVENT_RING_POLLING, + mhi_dev_ctxt->ev_ring_props[i], + ev_poll_en) + if (ev_poll_en) { + mhi_process_event_ring(mhi_dev_ctxt, + mhi_dev_ctxt->alloced_ev_rings[i], + EV_EL_PER_RING); + } + } + } + return 0; +} + +struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle) +{ + enum MHI_STATUS ret_val; + client_handle->result.payload_buf = 0; + client_handle->result.bytes_xferd = 0; + ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt, + client_handle->event_ring_index, + 1); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n"); + return &(client_handle->result); +} + +void mhi_mask_irq(struct mhi_client_handle *client_handle) +{ + disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt, + client_handle->msi_vec)); + client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++; + if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr > + (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1)) + mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n"); +} + +void mhi_unmask_irq(struct mhi_client_handle *client_handle) +{ + client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++; + enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt, + client_handle->msi_vec)); + if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr > + client_handle->mhi_dev_ctxt->counters.msi_disable_cntr) + mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n"); +} diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h new file mode 100644 index 000000000000..23ce3feb97c1 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_macros.h @@ -0,0 +1,341 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef _H_MHI_MACROS +#define _H_MHI_MACROS + +#define MHI_IPC_LOG_PAGES (50) +#define MHI_LOG_SIZE 0x1000 +#define MHI_LINK_STABILITY_WAIT_MS 100 +#define MHI_MAX_LINK_RETRIES 9 +#define DT_WAIT_RETRIES 30 +#define MHI_MAX_SUSPEND_RETRIES 1000 +#define MHI_VERSION 0x01000000 +#define ALIGNMENT_OFFSET 0xFFF +#define NR_OF_CMD_RINGS 1 +#define EV_EL_PER_RING (256 + 16) +#define CMD_EL_PER_RING 128 +#define ELEMENT_GAP 1 +#define MHI_EPID 4 +#define MHI_MAX_RESUME_TIMEOUT 5000 +#define MHI_MAX_SUSPEND_TIMEOUT 5000 +#define MHI_MAX_CMD_TIMEOUT 500 + +#define MAX_NR_MSI 4 + +#define EVENT_RINGS_ALLOCATED 4 +#define PRIMARY_EVENT_RING 0 +#define SOFTWARE_EV_RING 1 +#define IPA_OUT_EV_RING 2 +#define IPA_IN_EV_RING 3 + +#define PRIMARY_CMD_RING 0 +#define MHI_WORK_Q_MAX_SIZE 128 + +#define MAX_XFER_WORK_ITEMS 100 +#define MHI_MAX_SUPPORTED_DEVICES 1 + +#define MAX_NR_TRBS_PER_SOFT_CHAN 10 +#define MAX_NR_TRBS_PER_HARD_CHAN (128 + 16) +#define MHI_PCIE_VENDOR_ID 0x17CB +#define MHI_PCIE_DEVICE_ID 0x0300 +#define TRB_MAX_DATA_SIZE 0x1000 + + +#define MHI_DATA_SEG_WINDOW_START_ADDR 0x0ULL +#define MHI_DATA_SEG_WINDOW_END_ADDR 0x3E800000ULL + +#define MHI_M1_ENTRY_DELAY_MS 100 +#define MHI_XFER_DB_INTERVAL 8 +#define MHI_EV_DB_INTERVAL 32 + +#define MHI_HANDLE_MAGIC 0x12344321 +/* PCIe Device Info */ + +#define MHI_PCIE_DEVICE_BAR0_OFFSET_LOW (16) +#define MHI_PCIE_DEVICE_BAR0_OFFSET_HIGH (20) +#define MHI_PCIE_DEVICE_MANUFACT_ID_OFFSET (0) +#define MHI_PCIE_DEVICE_ID_OFFSET (2) + +#define IS_HARDWARE_CHANNEL(_CHAN_NR) \ + (((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) > \ + MHI_CLIENT_RESERVED_1_UPPER) && \ + ((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) < MHI_CLIENT_RESERVED_2_LOWER)) + +#define IS_SOFTWARE_CHANNEL(_CHAN_NR) \ + (((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) >= 0) && \ + ((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) < MHI_CLIENT_RESERVED_1_LOWER)) + +#define IRQ_TO_MSI(_MHI_DEV_CTXT, _IRQ_NR) \ + ((_IRQ_NR) - (_MHI_DEV_CTXT)->dev_info->core.irq_base) +#define MSI_TO_IRQ(_MHI_DEV_CTXT, _MSI_NR) \ + ((_MHI_DEV_CTXT)->dev_info->core.irq_base + (_MSI_NR)) +#define VALID_CHAN_NR(_CHAN_NR) (IS_HARDWARE_CHANNEL(_CHAN_NR) || \ + IS_SOFTWARE_CHANNEL(_CHAN_NR)) + +#define VALID_BUF(_BUF_ADDR, _BUF_LEN) \ + (((uintptr_t)(_BUF_ADDR) >= MHI_DATA_SEG_WINDOW_START_ADDR) && \ + (((uintptr_t)(_BUF_ADDR) + (uintptr_t)(_BUF_LEN) < \ + MHI_DATA_SEG_WINDOW_END_ADDR))) + +#define MHI_HW_INTMOD_VAL_MS 2 +/* Timeout Values */ +#define MHI_READY_STATUS_TIMEOUT_MS 50 +#define MHI_THREAD_SLEEP_TIMEOUT_MS 20 +#define MHI_RESUME_WAKE_RETRIES 20 + +/* Debugging Capabilities*/ +#define MHI_DBG_MAX_EVENT_HISTORY 10 + +/* MHI Transfer Ring Elements 7.4.1*/ +#define TX_TRB_LEN +#define MHI_TX_TRB_LEN__SHIFT (0) +#define MHI_TX_TRB_LEN__MASK (0xFFFF) + +#define MHI_TX_TRB_SET_LEN(_FIELD, _PKT, _VAL) \ +{ \ + u32 new_val = ((_PKT)->data_tx_pkt).buf_len; \ + new_val &= (~((MHI_##_FIELD ## __MASK) << MHI_##_FIELD ## __SHIFT)); \ + new_val |= (_VAL) << MHI_##_FIELD ## __SHIFT; \ + new_val &= (((MHI_##_FIELD ## __MASK) << MHI_##_FIELD ## __SHIFT)); \ + ((_PKT)->data_tx_pkt).buf_len = new_val; \ +} +#define MHI_TX_TRB_GET_LEN(_FIELD, _PKT) \ + (((_PKT)->data_tx_pkt).buf_len & (((MHI_##_FIELD ## __MASK) << \ + MHI_##_FIELD ## __SHIFT))); \ + +/* MHI Event Ring Elements 7.4.1*/ +#define EV_TRB_CODE +#define MHI_EV_TRB_CODE__MASK (0xFF) +#define MHI_EV_TRB_CODE__SHIFT (24) +#define MHI_EV_READ_CODE(_FIELD, _PKT) (((_PKT->type).xfer_details >> \ + MHI_##_FIELD ## __SHIFT) & \ + MHI_ ##_FIELD ## __MASK) +#define EV_LEN +#define MHI_EV_LEN__MASK (0xFFFF) +#define MHI_EV_LEN__SHIFT (0) +#define MHI_EV_READ_LEN(_FIELD, _PKT) (((_PKT->xfer_event_pkt).xfer_details >> \ + MHI_##_FIELD ## __SHIFT) & \ + MHI_ ##_FIELD ## __MASK) + +#define EV_CHID +#define MHI_EV_CHID__MASK (0xFF) +#define MHI_EV_CHID__SHIFT (24) +#define MHI_EV_READ_CHID(_FIELD, _PKT) ((((_PKT)->xfer_event_pkt).info >> \ + MHI_##_FIELD ## __SHIFT) & \ + MHI_ ##_FIELD ## __MASK) + +#define EV_PTR +#define MHI_EV_PTR__MASK (0xFFFFFFFFFFFFFFFFULL) +#define MHI_EV_PTR__SHIFT (0) + +#define MHI_EV_READ_PTR(_FIELD, _PKT) ((((_PKT)->xfer_event_pkt).xfer_ptr >> \ + MHI_##_FIELD ## __SHIFT) & \ + MHI_ ##_FIELD ## __MASK) + +#define EV_STATE +#define MHI_EV_STATE__MASK (0xFF) +#define MHI_EV_STATE__SHIFT (24) +#define MHI_READ_STATE(_PKT) ((((_PKT)->state_change_event_pkt).state >> \ + MHI_EV_STATE__SHIFT) & \ + MHI_EV_STATE__MASK) + +#define EXEC_ENV +#define MHI_EXEC_ENV__MASK (0xFF) +#define MHI_EXEC_ENV__SHIFT (24) +#define MHI_READ_EXEC_ENV(_PKT) ((((_PKT)->ee_event_pkt).exec_env>> \ + MHI_EXEC_ENV__SHIFT) & \ + MHI_EXEC_ENV__MASK) + +/* MacroS for reading common "info" field for TRBs*/ +#define TX_TRB_CHAIN +#define MHI_TX_TRB_CHAIN__SHIFT (0) +#define MHI_TX_TRB_CHAIN__MASK (0x1) +#define TX_TRB_IEOB +#define MHI_TX_TRB_IEOB__MASK (0x1) +#define MHI_TX_TRB_IEOB__SHIFT (8) +#define TX_TRB_IEOT +#define MHI_TX_TRB_IEOT__MASK (0x1) +#define MHI_TX_TRB_IEOT__SHIFT (9) +#define TX_TRB_BEI +#define MHI_TX_TRB_BEI__MASK (0x1) +#define MHI_TX_TRB_BEI__SHIFT (10) +#define TX_TRB_TYPE +#define MHI_TX_TRB_TYPE__MASK (0xFF) +#define MHI_TX_TRB_TYPE__SHIFT (16) + +#define EV_TRB_TYPE +#define MHI_EV_TRB_TYPE__MASK (0xFF) +#define MHI_EV_TRB_TYPE__SHIFT (16) + +#define CMD_TRB_TYPE +#define MHI_CMD_TRB_TYPE__MASK (0xFF) +#define MHI_CMD_TRB_TYPE__SHIFT (16) + +#define CMD_TRB_CHID +#define MHI_CMD_TRB_CHID__MASK (0xFF) +#define MHI_CMD_TRB_CHID__SHIFT (24) + +#define MHI_TRB_SET_INFO(_FIELD, _PKT, _VAL) \ + do { \ + u32 new_val = ((_PKT)->type).info; \ + new_val &= (~((MHI_##_FIELD ## __MASK) << \ + MHI_##_FIELD ## __SHIFT)); \ + new_val |= _VAL << MHI_##_FIELD ## __SHIFT; \ + (_PKT->type).info = new_val; \ + } while (0) + +#define MHI_TRB_GET_INFO(_FIELD, _PKT, _DEST) \ + do { \ + _DEST = ((_PKT)->type).info; \ + _DEST &= (((MHI_##_FIELD ## __MASK) << \ + MHI_##_FIELD ## __SHIFT)); \ + _DEST >>= MHI_##_FIELD ## __SHIFT; \ + } while (0) + +#define MHI_TRB_READ_INFO(_FIELD, _PKT) \ + ((((_PKT)->type).info >> MHI_##_FIELD ## __SHIFT) & \ + MHI_##_FIELD ## __MASK) + +#define HIGH_WORD(_x) ((u32)((((u64)(_x)) >> 32) & 0xFFFFFFFF)) +#define LOW_WORD(_x) ((u32)(((u64)(_x)) & 0xFFFFFFFF)) + +#define MHI_REG_WRITE(_base, _offset, _val) \ + do { \ + u32 addr; \ + (addr) = (u32)(_base) + (u32)(_offset); \ + *(u32 *)(addr) = (_val); \ + wmb(); \ + mhi_log(MHI_MSG_INFO, "d.s 0x%x %%LONG 0x%x\n", \ + (u32)(_offset),\ + (u32)(_val)); \ + } while (0) + +#define MHI_REG_WRITE_FIELD(_base, _offset, _mask, _shift, _val) \ + do { \ + u32 reg_val; \ + MHI_REG_READ(_base, _offset, reg_val); \ + reg_val &= ~(_mask); \ + reg_val = reg_val | ((u32)(_val) << ((u32)(_shift))); \ + MHI_REG_WRITE(_base, _offset, reg_val); \ + } while (0) + +#define MHI_REG_READ(_base, _offset, _dest) \ + do { \ + u32 addr; \ + (addr) = (u32)(_base) + (u32)(_offset); \ + (_dest) = *(u32 *)(addr); \ + } while (0) + +#define MHI_REG_READ_FIELD(_base, _offset, _mask, _shift, _dest) \ + do { \ + MHI_REG_READ(_base, _offset, (_dest)); \ + (_dest) &= (u32)(_mask); \ + (_dest) >>= (u32)(_shift); \ + } while (0) + +#define MHI_READ_FIELD(_val, _mask, _shift) \ + do { \ + _val &= (u32)(_mask); \ + _val >>= (u32)(_shift); \ + } while (0) + +#define MHI_WRITE_DB(_mhi_dev_ctxt, _addr, _index, _val) \ +{ \ + u32 word; \ + void *offset = (void *)(_index * sizeof(u64)); \ + mhi_log(MHI_MSG_VERBOSE, \ + "db.set addr: 0x%llX offset 0x%x val:0x%llX\n", \ + (u64)_addr, (unsigned int)_index, (u64)_val); \ + if (mhi_dev_ctxt->channel_db_addr == (_addr)) { \ + (_mhi_dev_ctxt)->mhi_ctrl_seg->mhi_cc_list[_index]. \ + mhi_trb_write_ptr = (_val); \ + } else if (mhi_dev_ctxt->event_db_addr == (_addr)) { \ + (_mhi_dev_ctxt)->mhi_ctrl_seg->mhi_ec_list[_index]. \ + mhi_event_write_ptr = (_val); \ + } \ + if (_addr == mhi_dev_ctxt->channel_db_addr) { \ + if (!(IS_HARDWARE_CHANNEL(_index) && \ + mhi_dev_ctxt->uldl_enabled && \ + !mhi_dev_ctxt->db_mode[_index])) { \ + wmb(); \ + word = HIGH_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset + 4); \ + word = LOW_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset); \ + wmb(); \ + mhi_dev_ctxt->db_mode[_index] = 0; \ + } \ + } else if (_addr == mhi_dev_ctxt->event_db_addr) { \ + if (IS_SOFTWARE_CHANNEL(_index) || \ + !mhi_dev_ctxt->uldl_enabled) { \ + wmb(); \ + word = HIGH_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset + 4); \ + wmb(); \ + word = LOW_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset); \ + wmb(); \ + mhi_dev_ctxt->db_mode[_index] = 0; \ + } \ + } else { \ + wmb(); \ + word = HIGH_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset + 4); \ + wmb(); \ + word = LOW_WORD((u64)(_val)); \ + writel_relaxed(word, _addr + offset); \ + wmb(); \ + mhi_dev_ctxt->db_mode[_index] = 0; \ + } \ +} + +#define EVENT_RING_MSI_VEC +#define MHI_EVENT_RING_MSI_VEC__MASK (0xf) +#define MHI_EVENT_RING_MSI_VEC__SHIFT (2) +#define EVENT_RING_POLLING +#define MHI_EVENT_RING_POLLING__MASK (0x1) +#define MHI_EVENT_RING_POLLING__SHIFT (0) +#define EVENT_RING_STATE_FIELD +#define MHI_EVENT_RING_STATE_FIELD__MASK (0x1) +#define MHI_EVENT_RING_STATE_FIELD__SHIFT (1) + +#define MHI_SET_EVENT_RING_INFO(_FIELD, _PKT, _VAL) \ +{ \ + u32 new_val = (_PKT); \ + new_val &= (~((MHI_##_FIELD ## __MASK) << MHI_##_FIELD ## __SHIFT));\ + new_val |= _VAL << MHI_##_FIELD ## __SHIFT; \ + (_PKT) = new_val; \ +}; + +#define MHI_GET_EVENT_RING_INFO(_FIELD, _PKT, _DEST) \ +{ \ + _DEST = (_PKT); \ + _DEST &= (((MHI_##_FIELD ## __MASK) << MHI_##_FIELD ## __SHIFT));\ + _DEST >>= MHI_##_FIELD ## __SHIFT; \ +}; + +#define EVENT_CTXT_INTMODT +#define MHI_EVENT_CTXT_INTMODT__MASK (0xFFFF) +#define MHI_EVENT_CTXT_INTMODT__SHIFT (16) +#define MHI_SET_EV_CTXT(_FIELD, _CTXT, _VAL) \ +{ \ + u32 new_val = (_VAL << MHI_##_FIELD ## __SHIFT); \ + new_val &= (MHI_##_FIELD ## __MASK << MHI_##_FIELD ## __SHIFT); \ + (_CTXT)->mhi_intmodt &= (~((MHI_##_FIELD ## __MASK) << \ + MHI_##_FIELD ## __SHIFT)); \ + (_CTXT)->mhi_intmodt |= new_val; \ +}; + +#define MHI_GET_EV_CTXT(_FIELD, _CTXT) \ + (((_CTXT)->mhi_intmodt >> MHI_##_FIELD ## __SHIFT) & \ + MHI_##_FIELD ## __MASK) +#endif diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c new file mode 100644 index 000000000000..b3f16a9f1f09 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_main.c @@ -0,0 +1,1368 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/completion.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/msm-bus.h> +#include <linux/cpu.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/completion.h> + +#include "mhi_sys.h" +#include "mhi.h" +#include "mhi_hwio.h" +#include "mhi_macros.h" + +int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev) +{ + int ret_val = 0; + long int sleep_time = 100000; + struct pci_dev *pcie_device = + (struct pci_dev *)mhi_pcie_dev->pcie_device; + do { + ret_val = pci_enable_device(mhi_pcie_dev->pcie_device); + if (0 != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to enable pcie struct device ret_val %d\n", + ret_val); + mhi_log(MHI_MSG_ERROR, + "Sleeping for ~ %li uS, and retrying.\n", + sleep_time); + usleep(sleep_time); + } + } while (ret_val != 0); + + mhi_log(MHI_MSG_INFO, "Successfully enabled pcie device.\n"); + + mhi_pcie_dev->core.bar0_base = + ioremap_nocache(pci_resource_start(pcie_device, 0), + pci_resource_len(pcie_device, 0)); + if (!mhi_pcie_dev->core.bar0_base) { + mhi_log(MHI_MSG_ERROR, + "Failed to map bar 0 addr 0x%x len 0x%x.\n", + pci_resource_start(pcie_device, 0), + pci_resource_len(pcie_device, 0)); + goto mhi_device_list_error; + } + + mhi_pcie_dev->core.bar0_end = mhi_pcie_dev->core.bar0_base + + pci_resource_len(pcie_device, 0); + mhi_pcie_dev->core.bar2_base = + ioremap_nocache(pci_resource_start(pcie_device, 2), + pci_resource_len(pcie_device, 2)); + if (!mhi_pcie_dev->core.bar2_base) { + mhi_log(MHI_MSG_ERROR, + "Failed to map bar 2 addr 0x%x len 0x%x.\n", + pci_resource_start(pcie_device, 2), + pci_resource_len(pcie_device, 2)); + goto io_map_err; + } + + mhi_pcie_dev->core.bar2_end = mhi_pcie_dev->core.bar2_base + + pci_resource_len(pcie_device, 2); + + if (!mhi_pcie_dev->core.bar0_base) { + mhi_log(MHI_MSG_ERROR, + "Failed to register for pcie resources\n"); + goto mhi_pcie_read_ep_config_err; + } + + mhi_log(MHI_MSG_INFO, "Device BAR0 address is at 0x%p\n", + mhi_pcie_dev->core.bar0_base); + ret_val = pci_request_region(pcie_device, 0, "mhi"); + if (ret_val) + mhi_log(MHI_MSG_ERROR, "Could not request BAR0 region\n"); + + mhi_pcie_dev->core.manufact_id = pcie_device->vendor; + mhi_pcie_dev->core.dev_id = pcie_device->device; + + if (mhi_pcie_dev->core.manufact_id != MHI_PCIE_VENDOR_ID || + mhi_pcie_dev->core.dev_id != MHI_PCIE_DEVICE_ID) { + mhi_log(MHI_MSG_ERROR, "Incorrect device/manufacturer ID\n"); + goto cfg_err; + } + return 0; +cfg_err: + iounmap((void *)mhi_pcie_dev->core.bar2_base); +io_map_err: + iounmap((void *)mhi_pcie_dev->core.bar0_base); +mhi_device_list_error: + pci_disable_device(pcie_device); +mhi_pcie_read_ep_config_err: + return -EIO; +} + +static void mhi_move_interrupts(struct mhi_device_ctxt *mhi_dev_ctxt, u32 cpu) +{ + u32 irq_to_affin = 0; + + MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, + mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], irq_to_affin); + irq_to_affin += mhi_dev_ctxt->dev_props->irq_base; + irq_set_affinity(irq_to_affin, get_cpu_mask(cpu)); + MHI_GET_EVENT_RING_INFO(EVENT_RING_MSI_VEC, + mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], irq_to_affin); + irq_to_affin += mhi_dev_ctxt->dev_props->irq_base; + irq_set_affinity(irq_to_affin, get_cpu_mask(cpu)); +} + +int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + u32 cpu = (u32)hcpu; + struct mhi_device_ctxt *mhi_dev_ctxt = container_of(nfb, + struct mhi_device_ctxt, + mhi_cpu_notifier); + + switch (action) { + case CPU_ONLINE: + if (cpu > 0) + mhi_move_interrupts(mhi_dev_ctxt, cpu); + break; + + case CPU_DEAD: + for_each_online_cpu(cpu) { + if (cpu > 0) { + mhi_move_interrupts(mhi_dev_ctxt, cpu); + break; + } + } + break; + default: + break; + } + return NOTIFY_OK; +} + +int mhi_init_gpios(struct mhi_pcie_dev_info *mhi_pcie_dev) +{ + int ret_val = 0; + struct device *dev = &mhi_pcie_dev->pcie_device->dev; + struct device_node *np; + + np = dev->of_node; + mhi_log(MHI_MSG_VERBOSE, + "Attempting to grab DEVICE_WAKE gpio\n"); + ret_val = of_get_named_gpio(np, "mhi-device-wake-gpio", 0); + switch (ret_val) { + case -EPROBE_DEFER: + mhi_log(MHI_MSG_VERBOSE, "DT is not ready\n"); + return ret_val; + case -ENOENT: + mhi_log(MHI_MSG_ERROR, "Failed to find device wake gpio\n"); + return ret_val; + case 0: + mhi_log(MHI_MSG_CRITICAL, + "Could not get gpio from struct device tree!\n"); + return -EIO; + default: + mhi_pcie_dev->core.device_wake_gpio = ret_val; + mhi_log(MHI_MSG_CRITICAL, + "Got DEVICE_WAKE GPIO nr 0x%x from struct device tree\n", + mhi_pcie_dev->core.device_wake_gpio); + break; + } + + ret_val = gpio_request(mhi_pcie_dev->core.device_wake_gpio, "mhi"); + if (ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Could not obtain struct device WAKE gpio\n"); + return ret_val; + } + mhi_log(MHI_MSG_VERBOSE, + "Attempting to set output direction to DEVICE_WAKE gpio\n"); + /* This GPIO must never sleep as it can be set in timer ctxt */ + gpio_set_value_cansleep(mhi_pcie_dev->core.device_wake_gpio, 0); + + ret_val = gpio_direction_output(mhi_pcie_dev->core.device_wake_gpio, 1); + + if (ret_val) { + mhi_log(MHI_MSG_VERBOSE, + "Failed to set output direction of DEVICE_WAKE gpio\n"); + goto mhi_gpio_dir_err; + } + return 0; + +mhi_gpio_dir_err: + gpio_free(mhi_pcie_dev->core.device_wake_gpio); + return -EIO; +} + +enum MHI_STATUS mhi_open_channel(struct mhi_client_handle *client_handle) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + int chan = 0; + struct mhi_device_ctxt *mhi_dev_ctxt; + struct mhi_control_seg *mhi_ctrl_seg = NULL; + + if (NULL == client_handle || + client_handle->magic != MHI_HANDLE_MAGIC) + return MHI_STATUS_ERROR; + chan = client_handle->chan; + mhi_log(MHI_MSG_INFO, + "Entered: Client opening chan 0x%x\n", chan); + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + switch (mhi_dev_ctxt->dev_exec_env) { + case MHI_EXEC_ENV_PBL: + mhi_log(MHI_MSG_INFO, + "Chan %d MHI exec_env %d, not ready!\n", + chan, mhi_dev_ctxt->dev_exec_env); + return MHI_STATUS_DEVICE_NOT_READY; + break; + case MHI_EXEC_ENV_SBL: + if (chan != MHI_CLIENT_SAHARA_OUT && + chan != MHI_CLIENT_SAHARA_IN) { + mhi_log(MHI_MSG_INFO, + "Chan %d, MHI exec_env %d, not ready!\n", + chan, mhi_dev_ctxt->dev_exec_env); + return MHI_STATUS_DEVICE_NOT_READY; + } + case MHI_EXEC_ENV_AMSS: + default: + break; + } + mhi_ctrl_seg = client_handle->mhi_dev_ctxt->mhi_ctrl_seg; + + client_handle->event_ring_index = + mhi_ctrl_seg->mhi_cc_list[chan].mhi_event_ring_index; + client_handle->msi_vec = + mhi_ctrl_seg->mhi_ec_list[ + client_handle->event_ring_index].mhi_msi_vector; + ret_val = start_chan_sync(client_handle); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_ERROR, + "Failed to start chan 0x%x\n", chan); + client_handle->chan_status = 1; + mhi_log(MHI_MSG_INFO, + "Exited chan 0x%x\n", chan); + return ret_val; +} +EXPORT_SYMBOL(mhi_open_channel); + +enum MHI_STATUS mhi_register_channel(struct mhi_client_handle **client_handle, + enum MHI_CLIENT_CHANNEL chan, s32 device_index, + struct mhi_client_info_t *client_info, void *UserData) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + struct mhi_device_ctxt *mhi_dev_ctxt = + &(mhi_devices.device_list[device_index].mhi_ctxt); + + if (!VALID_CHAN_NR(chan)) { + ret_val = MHI_STATUS_INVALID_CHAN_ERR; + goto error_handle; + } + + if (NULL == client_handle || device_index < 0) { + ret_val = MHI_STATUS_ERROR; + goto error_handle; + } + if (NULL != mhi_dev_ctxt->client_handle_list[chan]) + return MHI_STATUS_ALREADY_REGISTERED; + + mhi_log(MHI_MSG_INFO, + "Opened channel 0x%x for client\n", chan); + + *client_handle = kmalloc(sizeof(struct mhi_client_handle), GFP_KERNEL); + if (NULL == *client_handle) { + ret_val = MHI_STATUS_ALLOC_ERROR; + goto error_handle; + } + memset(*client_handle, 0, sizeof(struct mhi_client_handle)); + (*client_handle)->chan = chan; + (*client_handle)->mhi_dev_ctxt = + &mhi_devices.device_list[device_index].mhi_ctxt; + + + (*client_handle)->mhi_dev_ctxt->client_handle_list[chan] = + *client_handle; + if (NULL != client_info) + (*client_handle)->client_info = *client_info; + + (*client_handle)->user_data = UserData; + + init_completion(&(*client_handle)->chan_reset_complete); + init_completion(&(*client_handle)->chan_open_complete); + + (*client_handle)->cb_mod = 1; + (*client_handle)->chan_status = 0; + (*client_handle)->magic = MHI_HANDLE_MAGIC; + + if (MHI_CLIENT_IP_HW_0_OUT == chan) + (*client_handle)->intmod_t = 10; + if (MHI_CLIENT_IP_HW_0_IN == chan) + (*client_handle)->intmod_t = 10; + + if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS) { + mhi_log(MHI_MSG_INFO, + "Exec env is AMSS notifing client now chan: 0x%x\n", + chan); + mhi_notify_client(*client_handle, MHI_CB_MHI_ENABLED); + } + + mhi_log(MHI_MSG_VERBOSE, + "Successfuly registered chan 0x%x\n", chan); + return ret_val; + +error_handle: + return ret_val; +} +EXPORT_SYMBOL(mhi_register_channel); + +void mhi_close_channel(struct mhi_client_handle *mhi_handle) +{ + u32 index = 0; + u32 chan = 0; + int r = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + if (NULL == mhi_handle || + mhi_handle->magic != MHI_HANDLE_MAGIC) + return; + chan = mhi_handle->chan; + mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan); + index = mhi_handle->device_index; + if (!atomic_read(&mhi_handle->mhi_dev_ctxt->flags.pending_ssr)) { + ret_val = mhi_send_cmd(mhi_handle->mhi_dev_ctxt, + MHI_COMMAND_RESET_CHAN, chan); + if (ret_val != MHI_STATUS_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Failed to send reset cmd for chan %d ret %d\n", + chan, ret_val); + } + r = wait_for_completion_interruptible_timeout( + &mhi_handle->chan_reset_complete, + msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT)); + + if (0 == r || -ERESTARTSYS == r) { + mhi_log(MHI_MSG_ERROR, + "Failed to reset chan %d ret %d\n", + chan, r); + } + } else { + /* + * Assumption: Device is not playing with our + * buffers after BEFORE_SHUTDOWN + */ + mhi_log(MHI_MSG_INFO, + "Pending SSR local free only chan %d.\n", chan); + } + + mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan); + atomic_dec(&(mhi_devices.device_list[index].ref_count)); + mhi_handle->chan_status = 0; +} +EXPORT_SYMBOL(mhi_close_channel); + +void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index) +{ + struct mhi_ring *event_ctxt = NULL; + u64 db_value = 0; + event_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index]; + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)event_ctxt->wp); + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, + event_ring_index, db_value); +} + +enum MHI_STATUS mhi_add_elements_to_event_rings( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION new_state) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + enum MHI_EVENT_RING_STATE event_ring_state = MHI_EVENT_RING_UINIT; + switch (new_state) { + case STATE_TRANSITION_READY: + MHI_GET_EVENT_RING_INFO(EVENT_RING_STATE_FIELD, + mhi_dev_ctxt->ev_ring_props[PRIMARY_EVENT_RING], + event_ring_state); + ret_val = mhi_init_event_ring(mhi_dev_ctxt, + EV_EL_PER_RING, + mhi_dev_ctxt-> + alloced_ev_rings[PRIMARY_EVENT_RING]); + if (MHI_STATUS_SUCCESS != ret_val) { + + mhi_log(MHI_MSG_ERROR, + "Failed to add ev el on event ring\n"); + return MHI_STATUS_ERROR; + } + MHI_SET_EVENT_RING_INFO(EVENT_RING_STATE_FIELD, + mhi_dev_ctxt-> + ev_ring_props[PRIMARY_EVENT_RING], + MHI_EVENT_RING_INIT); + mhi_log(MHI_MSG_ERROR, + "Event ring initialized ringing, EV DB to resume\n"); + ring_ev_db(mhi_dev_ctxt, + mhi_dev_ctxt->alloced_ev_rings[PRIMARY_EVENT_RING]); + break; + case STATE_TRANSITION_AMSS: + MHI_GET_EVENT_RING_INFO(EVENT_RING_STATE_FIELD, + mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], + event_ring_state); + if (MHI_EVENT_RING_UINIT == event_ring_state) { + ret_val = mhi_init_event_ring(mhi_dev_ctxt, + EV_EL_PER_RING, + mhi_dev_ctxt-> + alloced_ev_rings[IPA_OUT_EV_RING]); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to add ev el on event ring\n"); + return MHI_STATUS_ERROR; + } + ret_val = mhi_init_event_ring(mhi_dev_ctxt, + EV_EL_PER_RING, + mhi_dev_ctxt->alloced_ev_rings[IPA_IN_EV_RING]); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failed to add ev el on event ring\n"); + return MHI_STATUS_ERROR; + } + MHI_SET_EVENT_RING_INFO(EVENT_RING_STATE_FIELD, + mhi_dev_ctxt->ev_ring_props[IPA_OUT_EV_RING], + MHI_EVENT_RING_INIT); + MHI_SET_EVENT_RING_INFO(EVENT_RING_STATE_FIELD, + mhi_dev_ctxt->ev_ring_props[IPA_IN_EV_RING], + MHI_EVENT_RING_INIT); + } + ring_ev_db(mhi_dev_ctxt, + mhi_dev_ctxt->alloced_ev_rings[SOFTWARE_EV_RING]); + ring_ev_db(mhi_dev_ctxt, + mhi_dev_ctxt->alloced_ev_rings[IPA_OUT_EV_RING]); + ring_ev_db(mhi_dev_ctxt, + mhi_dev_ctxt->alloced_ev_rings[IPA_IN_EV_RING]); + break; + default: + mhi_log(MHI_MSG_ERROR, + "Unrecognized event stage, %d\n", new_state); + ret_val = MHI_STATUS_ERROR; + break; + } + return ret_val; +} + +static enum MHI_STATUS mhi_wake_dev_from_m3( + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int r = 0; + if (!atomic_cmpxchg(&mhi_dev_ctxt->flags.m0_work_enabled, 0, 1)) { + mhi_log(MHI_MSG_INFO, + "Initiating M0 work...\n"); + if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { + mhi_log(MHI_MSG_INFO, + "Resume is pending, quitting ...\n"); + atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + __pm_relax(&mhi_dev_ctxt->w_lock); + return MHI_STATUS_SUCCESS; + } + r = queue_work(mhi_dev_ctxt->work_queue, + &mhi_dev_ctxt->m0_work); + if (!r) + mhi_log(MHI_MSG_CRITICAL, + "Failed to start M0 work.\n"); + } else { + mhi_log(MHI_MSG_VERBOSE, + "M0 work pending.\n"); + } + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_notify_device( + struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan) +{ + unsigned long flags = 0; + u64 db_value; + struct mhi_chan_ctxt *chan_ctxt; + chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags); + if (likely(((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) || + (MHI_STATE_M1 == mhi_dev_ctxt->mhi_state)) && + (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR) && + !mhi_dev_ctxt->flags.pending_M3)) { + + mhi_dev_ctxt->mhi_chan_db_order[chan]++; + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); + if (IS_HARDWARE_CHANNEL(chan) && (chan % 2)) { + if (unlikely(mhi_xfer_db_interval != 0)) { + if ((mhi_dev_ctxt-> + mhi_chan_cntr[chan].pkts_xferd % + mhi_xfer_db_interval) == 0) { + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->channel_db_addr, + chan, db_value); + } + } else { + if ((mhi_dev_ctxt-> + mhi_chan_cntr[chan].pkts_xferd % + MHI_XFER_DB_INTERVAL) == 0) { + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->channel_db_addr, + chan, db_value); + } + } + } else { + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->channel_db_addr, + chan, db_value); + } + } else { + mhi_log(MHI_MSG_VERBOSE, + "Wakeup, pending data MHI state %d, chan state %d\n", + mhi_dev_ctxt->mhi_state, chan_ctxt->mhi_chan_state); + if (mhi_dev_ctxt->flags.pending_M3 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { + mhi_wake_dev_from_m3(mhi_dev_ctxt); + } + } + spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); + /* + * If there are no clients still sending we can trigger our + * inactivity timer + */ + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_queue_xfer(struct mhi_client_handle *client_handle, + dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags) +{ + union mhi_xfer_pkt *pkt_loc; + enum MHI_STATUS ret_val; + enum MHI_CLIENT_CHANNEL chan; + struct mhi_device_ctxt *mhi_dev_ctxt; + unsigned long flags; + uintptr_t trb_index; + + if (NULL == client_handle || !VALID_CHAN_NR(client_handle->chan) || + 0 == buf || 0 == buf_len) { + mhi_log(MHI_MSG_CRITICAL, "Bad input args\n"); + return MHI_STATUS_ERROR; + } + MHI_ASSERT(VALID_BUF(buf, buf_len), + "Client buffer is of invalid length\n"); + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + chan = client_handle->chan; + + /* Bump up the vote for pending data */ + read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + + atomic_inc(&mhi_dev_ctxt->flags.data_pending); + mhi_dev_ctxt->counters.m1_m0++; + if (mhi_dev_ctxt->flags.link_up) + mhi_assert_device_wake(mhi_dev_ctxt); + read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + + /* Add the TRB to the correct transfer ring */ + ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + (void *)&pkt_loc); + if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to insert trb in xfer ring\n"); + goto error; + } + + pkt_loc->data_tx_pkt.buffer_ptr = buf; + + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + pkt_loc, &trb_index); + + pkt_loc->type.info = mhi_flags; + if (likely(0 != client_handle->intmod_t)) + MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1); + else + MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0); + + MHI_TRB_SET_INFO(TX_TRB_TYPE, pkt_loc, MHI_PKT_TYPE_TRANSFER); + MHI_TX_TRB_SET_LEN(TX_TRB_LEN, pkt_loc, buf_len); + mhi_log(MHI_MSG_VERBOSE, + "Channel %d Has buf size of %d and buf addr %lx, flags 0x%x\n", + chan, buf_len, (uintptr_t)buf, mhi_flags); + + if (chan % 2 == 0) { + atomic_inc(&mhi_dev_ctxt->counters.outbound_acks); + mhi_log(MHI_MSG_VERBOSE, + "Queued outbound pkt. Pending Acks %d\n", + atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); + } + mhi_notify_device(mhi_dev_ctxt, chan); + atomic_dec(&mhi_dev_ctxt->flags.data_pending); + return MHI_STATUS_SUCCESS; +error: + atomic_dec(&mhi_dev_ctxt->flags.data_pending); + return ret_val; +} +EXPORT_SYMBOL(mhi_queue_xfer); + +enum MHI_STATUS mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_COMMAND cmd, u32 chan) +{ + u64 db_value = 0; + union mhi_cmd_pkt *cmd_pkt = NULL; + enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED; + enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED; + enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD; + struct mutex *chan_mutex = NULL; + + mhi_log(MHI_MSG_INFO, + "Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n", + mhi_dev_ctxt->mhi_state, + mhi_dev_ctxt->dev_exec_env, + chan, cmd); + if (chan >= MHI_MAX_CHANNELS || + cmd >= MHI_COMMAND_MAX_NR || NULL == mhi_dev_ctxt) { + mhi_log(MHI_MSG_ERROR, + "Invalid channel id, received id: 0x%x", chan); + goto error_general; + } + mhi_assert_device_wake(mhi_dev_ctxt); + /* + * If there is a cmd pending a struct device confirmation, + * do not send anymore for this channel + */ + if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) + return MHI_STATUS_CMD_PENDING; + + from_state = + mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan].mhi_chan_state; + + switch (cmd) { + case MHI_COMMAND_NOOP: + ring_el_type = MHI_PKT_TYPE_NOOP_CMD; + break; + case MHI_COMMAND_RESET_CHAN: + to_state = MHI_CHAN_STATE_DISABLED; + ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD; + break; + case MHI_COMMAND_START_CHAN: + switch (from_state) { + case MHI_CHAN_STATE_ENABLED: + case MHI_CHAN_STATE_STOP: + to_state = MHI_CHAN_STATE_RUNNING; + break; + default: + mhi_log(MHI_MSG_ERROR, + "Invalid state transition for " + "cmd 0x%x, from_state 0x%x\n", + cmd, from_state); + goto error_general; + } + ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD; + break; + case MHI_COMMAND_STOP_CHAN: + switch (from_state) { + case MHI_CHAN_STATE_RUNNING: + case MHI_CHAN_STATE_SUSPENDED: + to_state = MHI_CHAN_STATE_STOP; + break; + default: + mhi_log(MHI_MSG_ERROR, + "Invalid state transition for " + "cmd 0x%x, from_state 0x%x\n", + cmd, from_state); + goto error_invalid; + } + ring_el_type = MHI_PKT_TYPE_STOP_CHAN_CMD; + break; + default: + mhi_log(MHI_MSG_ERROR, "Bad command received\n"); + } + + mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); + + if (MHI_STATUS_SUCCESS != + ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, + (void *)&cmd_pkt)) { + mhi_log(MHI_MSG_ERROR, "Failed to insert element\n"); + goto error_general; + } + chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; + if (MHI_COMMAND_NOOP != cmd) { + mutex_lock(chan_mutex); + MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type); + MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan); + mutex_unlock(chan_mutex); + } + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt->wp); + mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING; + + if (MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || + MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) { + mhi_dev_ctxt->cmd_ring_order++; + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, 0, + db_value); + } else { + mhi_log(MHI_MSG_INFO, + "Waking dev from M3 for cmd %d on chan %d\n", + cmd, chan); + mhi_wake_dev_from_m3(mhi_dev_ctxt); + } + + mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n", + cmd, chan); + mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); + + mhi_log(MHI_MSG_INFO, "Exited.\n"); + return MHI_STATUS_SUCCESS; + +error_general: + mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]); +error_invalid: + mhi_log(MHI_MSG_INFO, "Exited due to error.\n"); + return MHI_STATUS_ERROR; +} + +static enum MHI_STATUS parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt, + u32 chan, union mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len) +{ + struct mhi_result *result = NULL; + enum MHI_STATUS ret_val = 0; + struct mhi_client_handle *client_handle = NULL; + struct mhi_ring *local_chan_ctxt = NULL; + struct mhi_cb_info cb_info; + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + client_handle = mhi_dev_ctxt->client_handle_list[chan]; + + /* If ring is empty */ + if (mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp) { + mhi_dev_ctxt->mhi_chan_cntr[chan].empty_ring_removal++; + mhi_wait_for_mdm(mhi_dev_ctxt); + return mhi_send_cmd(mhi_dev_ctxt, + MHI_COMMAND_RESET_CHAN, + chan); + } + + if (NULL != client_handle) { + result = &mhi_dev_ctxt->client_handle_list[chan]->result; + + if (NULL != (&client_handle->client_info.mhi_client_cb) && + (0 == (client_handle->pkt_count % client_handle->cb_mod))) { + cb_info.cb_reason = MHI_CB_XFER; + cb_info.result = &client_handle->result; + cb_info.result->transaction_status = + MHI_STATUS_SUCCESS; + cb_info.chan = chan; + client_handle->client_info.mhi_client_cb(&cb_info); + } + } + ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + NULL); + atomic_dec(&mhi_dev_ctxt->counters.outbound_acks); + mhi_log(MHI_MSG_VERBOSE, + "Processed outbound ack chan %d Pending acks %d.\n", + chan, atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt, + u32 chan, union mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len) +{ + struct mhi_client_handle *client_handle; + struct mhi_ring *local_chan_ctxt; + struct mhi_result *result; + struct mhi_cb_info cb_info; + + client_handle = mhi_dev_ctxt->client_handle_list[chan]; + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + + if (unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp == + mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp)) { + mhi_dev_ctxt->mhi_chan_cntr[chan].empty_ring_removal++; + mhi_wait_for_mdm(mhi_dev_ctxt); + return mhi_send_cmd(mhi_dev_ctxt, + MHI_COMMAND_RESET_CHAN, + chan); + } + + if (NULL != mhi_dev_ctxt->client_handle_list[chan]) + result = &mhi_dev_ctxt->client_handle_list[chan]->result; + + /* If a client is registered */ + if (unlikely(IS_SOFTWARE_CHANNEL(chan))) { + MHI_TX_TRB_SET_LEN(TX_TRB_LEN, + local_ev_trb_loc, + xfer_len); + ctxt_del_element(local_chan_ctxt, NULL); + if (NULL != client_handle->client_info.mhi_client_cb && + (0 == (client_handle->pkt_count % client_handle->cb_mod))) { + cb_info.cb_reason = MHI_CB_XFER; + cb_info.result = &client_handle->result; + cb_info.result->transaction_status = + MHI_STATUS_SUCCESS; + cb_info.chan = chan; + client_handle->client_info.mhi_client_cb(&cb_info); + } + } else { + /* IN Hardware channel with no client + * registered, we are done with this TRB*/ + if (likely(NULL != client_handle)) { + ctxt_del_element(local_chan_ctxt, NULL); + /* A client is not registred for this IN channel */ + } else {/* Hardware Channel, no client registerered, + drop data */ + recycle_trb_and_ring(mhi_dev_ctxt, + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan], + MHI_RING_TYPE_XFER_RING, + chan); + } + } + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS validate_xfer_el_addr(struct mhi_chan_ctxt *ring, + uintptr_t addr) +{ + return (addr < (ring->mhi_trb_ring_base_addr) || + addr > (ring->mhi_trb_ring_base_addr) + + (ring->mhi_trb_ring_len - 1)) ? + MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS parse_xfer_event(struct mhi_device_ctxt *ctxt, + union mhi_event_pkt *event) +{ + struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt; + struct mhi_result *result; + u32 chan = MHI_MAX_CHANNELS; + u16 xfer_len; + uintptr_t phy_ev_trb_loc; + union mhi_xfer_pkt *local_ev_trb_loc; + struct mhi_client_handle *client_handle; + union mhi_xfer_pkt *local_trb_loc; + struct mhi_chan_ctxt *chan_ctxt; + u32 nr_trb_to_parse; + u32 i; + + switch (MHI_EV_READ_CODE(EV_TRB_CODE, event)) { + case MHI_EVENT_CC_EOB: + chan = MHI_EV_READ_CHID(EV_CHID, event); + phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); + mhi_log(MHI_MSG_VERBOSE, "IEOB condition detected\n"); + case MHI_EVENT_CC_OVERFLOW: + chan = MHI_EV_READ_CHID(EV_CHID, event); + phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); + mhi_log(MHI_MSG_VERBOSE, + "Overflow condition detected chan %d, ptr 0x%lx\n", + chan, phy_ev_trb_loc); + case MHI_EVENT_CC_EOT: + { + dma_addr_t trb_data_loc; + u32 ieot_flag; + enum MHI_STATUS ret_val; + struct mhi_ring *local_chan_ctxt; + + chan = MHI_EV_READ_CHID(EV_CHID, event); + local_chan_ctxt = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event); + + if (unlikely(!VALID_CHAN_NR(chan))) { + mhi_log(MHI_MSG_ERROR, "Bad ring id.\n"); + break; + } + chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + ret_val = validate_xfer_el_addr(chan_ctxt, + phy_ev_trb_loc); + + if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { + mhi_log(MHI_MSG_ERROR, "Bad event trb ptr.\n"); + break; + } + + /* Get the TRB this event points to */ + local_ev_trb_loc = + (union mhi_xfer_pkt *)mhi_p2v_addr( + mhi_dev_ctxt->mhi_ctrl_seg_info, + phy_ev_trb_loc); + local_trb_loc = (union mhi_xfer_pkt *)local_chan_ctxt->rp; + + ret_val = get_nr_enclosed_el(local_chan_ctxt, + local_trb_loc, + local_ev_trb_loc, + &nr_trb_to_parse); + if (unlikely(MHI_STATUS_SUCCESS != ret_val)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to get nr available trbs ret: %d.\n", + ret_val); + return MHI_STATUS_ERROR; + } + do { + u64 phy_buf_loc; + MHI_TRB_GET_INFO(TX_TRB_IEOT, local_trb_loc, ieot_flag); + phy_buf_loc = local_trb_loc->data_tx_pkt.buffer_ptr; + trb_data_loc = (dma_addr_t)phy_buf_loc; + if (chan % 2) + xfer_len = MHI_EV_READ_LEN(EV_LEN, event); + else + xfer_len = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, + local_trb_loc); + + if (!VALID_BUF(trb_data_loc, xfer_len)) { + mhi_log(MHI_MSG_CRITICAL, + "Bad buffer ptr: %lx.\n", + (uintptr_t)trb_data_loc); + return MHI_STATUS_ERROR; + } + + client_handle = mhi_dev_ctxt->client_handle_list[chan]; + if (NULL != client_handle) { + client_handle->pkt_count++; + result = &client_handle->result; + result->payload_buf = trb_data_loc; + result->bytes_xferd = xfer_len; + result->user_data = client_handle->user_data; + } + if (chan % 2) { + parse_inbound(mhi_dev_ctxt, chan, + local_ev_trb_loc, xfer_len); + } else { + parse_outbound(mhi_dev_ctxt, chan, + local_ev_trb_loc, xfer_len); + } + mhi_dev_ctxt->mhi_chan_cntr[chan].pkts_xferd++; + if (local_trb_loc == + (union mhi_xfer_pkt *)local_chan_ctxt->rp) { + mhi_log(MHI_MSG_CRITICAL, + "Done. Processed until: %lx.\n", + (uintptr_t)trb_data_loc); + break; + } else { + local_trb_loc = + (union mhi_xfer_pkt *)local_chan_ctxt-> + rp; + } + i++; + } while (i <= nr_trb_to_parse); + break; + } /* CC_EOT */ + case MHI_EVENT_CC_OOB: + case MHI_EVENT_CC_DB_MODE: + { + struct mhi_ring *chan_ctxt = NULL; + u64 db_value = 0; + mhi_dev_ctxt->uldl_enabled = 1; + chan = MHI_EV_READ_CHID(EV_CHID, event); + mhi_dev_ctxt->db_mode[chan] = 1; + chan_ctxt = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan); + if (chan_ctxt->wp != chan_ctxt->rp) { + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)chan_ctxt->wp); + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->channel_db_addr, chan, + db_value); + } + client_handle = mhi_dev_ctxt->client_handle_list[chan]; + if (NULL != client_handle) { + result->transaction_status = + MHI_STATUS_DEVICE_NOT_READY; + } + break; + } + default: + mhi_log(MHI_MSG_ERROR, + "Unknown TX completion.\n"); + break; + } /*switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ + return 0; +} + +enum MHI_STATUS recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt, + struct mhi_ring *ring, + enum MHI_RING_TYPE ring_type, + u32 ring_index) +{ + enum MHI_STATUS ret_val = MHI_STATUS_ERROR; + u64 db_value = 0; + void *removed_element = NULL; + void *added_element = NULL; + + if (NULL == mhi_dev_ctxt || NULL == ring || + ring_type > (MHI_RING_TYPE_MAX - 1) || + ring_index > (MHI_MAX_CHANNELS - 1)) { + mhi_log(MHI_MSG_ERROR, "Bad input params\n"); + return ret_val; + } + ret_val = ctxt_del_element(ring, &removed_element); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n"); + return MHI_STATUS_ERROR; + } + ret_val = ctxt_add_element(ring, &added_element); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n"); + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)ring->wp); + if (MHI_STATUS_SUCCESS != ret_val) + return ret_val; + if (MHI_RING_TYPE_XFER_RING == ring_type) { + union mhi_xfer_pkt *removed_xfer_pkt = + (union mhi_xfer_pkt *)removed_element; + union mhi_xfer_pkt *added_xfer_pkt = + (union mhi_xfer_pkt *)added_element; + added_xfer_pkt->data_tx_pkt = + *(struct mhi_tx_pkt *)removed_xfer_pkt; + } + atomic_inc(&mhi_dev_ctxt->flags.data_pending); + /* Asserting Device Wake here, will imediately wake mdm */ + if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state || + MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) && + mhi_dev_ctxt->flags.link_up) { + switch (ring_type) { + case MHI_RING_TYPE_CMD_RING: + { + struct mutex *cmd_mutex = NULL; + cmd_mutex = + &mhi_dev_ctxt-> + mhi_cmd_mutex_list[PRIMARY_CMD_RING]; + mutex_lock(cmd_mutex); + mhi_dev_ctxt->cmd_ring_order = 1; + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, + ring_index, db_value); + mutex_unlock(cmd_mutex); + break; + } + case MHI_RING_TYPE_EVENT_RING: + { + spinlock_t *lock = NULL; + unsigned long flags = 0; + lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index]; + spin_lock_irqsave(lock, flags); + mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1; + if ((mhi_dev_ctxt->ev_counter[ring_index] % + MHI_EV_DB_INTERVAL) == 0) { + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->event_db_addr, + ring_index, db_value); + } + mhi_dev_ctxt->ev_counter[ring_index]++; + spin_unlock_irqrestore(lock, flags); + break; + } + case MHI_RING_TYPE_XFER_RING: + { + unsigned long flags = 0; + spin_lock_irqsave( + &mhi_dev_ctxt->db_write_lock[ring_index], + flags); + mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1; + MHI_WRITE_DB(mhi_dev_ctxt, + mhi_dev_ctxt->channel_db_addr, + ring_index, db_value); + spin_unlock_irqrestore( + &mhi_dev_ctxt->db_write_lock[ring_index], + flags); + break; + } + default: + mhi_log(MHI_MSG_ERROR, "Bad ring type\n"); + } + } + atomic_dec(&mhi_dev_ctxt->flags.data_pending); + return ret_val; +} + +enum MHI_STATUS mhi_change_chan_state(struct mhi_device_ctxt *mhi_dev_ctxt, + u32 chan_id, enum MHI_CHAN_STATE new_state) +{ + struct mutex *chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan_id]; + if (chan_id > (MHI_MAX_CHANNELS - 1) || NULL == mhi_dev_ctxt || + new_state > MHI_CHAN_STATE_LIMIT) { + mhi_log(MHI_MSG_ERROR, "Bad input parameters\n"); + return MHI_STATUS_ERROR; + } + + mutex_lock(chan_mutex); + + /* Set the new state of the channel context */ + mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan_id].mhi_chan_state = + MHI_CHAN_STATE_ENABLED; + mutex_unlock(chan_mutex); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, + union mhi_cmd_pkt *cmd_pkt) +{ + u32 chan = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + struct mhi_ring *local_chan_ctxt; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_client_handle *client_handle = NULL; + struct mutex *chan_mutex; + + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); + + if (!VALID_CHAN_NR(chan)) { + mhi_log(MHI_MSG_ERROR, + "Bad channel number for CCE\n"); + return MHI_STATUS_ERROR; + } + + chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; + mutex_lock(chan_mutex); + client_handle = mhi_dev_ctxt->client_handle_list[chan]; + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n"); + + /* Reset the local channel context */ + local_chan_ctxt->rp = local_chan_ctxt->base; + local_chan_ctxt->wp = local_chan_ctxt->base; + local_chan_ctxt->ack_rp = local_chan_ctxt->base; + + /* Reset the mhi channel context */ + chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; + chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr; + chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr; + + mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; + mutex_unlock(chan_mutex); + mhi_log(MHI_MSG_INFO, "Reset complete.\n"); + if (NULL != client_handle) + complete(&client_handle->chan_reset_complete); + return ret_val; +} + +static enum MHI_STATUS start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt, + union mhi_cmd_pkt *cmd_pkt) +{ + u32 chan; + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); + if (!VALID_CHAN_NR(chan)) + mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan); + mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = + MHI_CMD_NOT_PENDING; + mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan); + if (NULL != mhi_dev_ctxt->client_handle_list[chan]) + complete( + &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete); + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt, + union mhi_event_pkt *ev_pkt) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + union mhi_cmd_pkt *cmd_pkt = NULL; + uintptr_t phy_trb_loc = 0; + if (NULL != ev_pkt) + phy_trb_loc = (uintptr_t)MHI_EV_READ_PTR(EV_PTR, + ev_pkt); + else + return MHI_STATUS_ERROR; + + cmd_pkt = (union mhi_cmd_pkt *)mhi_p2v_addr( + mhi_dev_ctxt->mhi_ctrl_seg_info, phy_trb_loc); + + switch (MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt)) { + case MHI_EVENT_CC_SUCCESS: + { + u32 chan; + MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan); + switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) { + + mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", + chan, + MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)); + + case MHI_PKT_TYPE_RESET_CHAN_CMD: + if (MHI_STATUS_SUCCESS != reset_chan_cmd(mhi_dev_ctxt, + cmd_pkt)) + mhi_log(MHI_MSG_INFO, + "Failed to process reset cmd\n"); + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_INFO, + "Failed to set chan state\n"); + return MHI_STATUS_ERROR; + } + break; + case MHI_PKT_TYPE_START_CHAN_CMD: + if (MHI_STATUS_SUCCESS != start_chan_cmd(mhi_dev_ctxt, + cmd_pkt)) + mhi_log(MHI_MSG_INFO, + "Failed to process reset cmd\n"); + atomic_dec(&mhi_dev_ctxt->start_cmd_pending_ack); + wake_up_interruptible( + mhi_dev_ctxt->chan_start_complete); + break; + default: + mhi_log(MHI_MSG_INFO, + "Bad cmd type 0x%x\n", + MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)); + break; + } + mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING; + break; + } + default: + mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n"); + break; + } + ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL); + return MHI_STATUS_SUCCESS; +} + +int mhi_poll_inbound(struct mhi_client_handle *client_handle, + struct mhi_result *result) +{ + struct mhi_tx_pkt *pending_trb = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = NULL; + u32 chan = 0; + struct mhi_ring *local_chan_ctxt; + struct mutex *chan_mutex = NULL; + int ret_val = 0; + + if (NULL == client_handle || NULL == result || + NULL == client_handle->mhi_dev_ctxt) + return -EINVAL; + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + chan = client_handle->chan; + local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan]; + mutex_lock(chan_mutex); + if ((local_chan_ctxt->rp != local_chan_ctxt->ack_rp)) { + pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp); + result->payload_buf = pending_trb->buffer_ptr; + result->bytes_xferd = MHI_TX_TRB_GET_LEN(TX_TRB_LEN, + (union mhi_xfer_pkt *)pending_trb); + result->flags = pending_trb->info; + result->transaction_status = MHI_STATUS_SUCCESS; + } else { + result->payload_buf = 0; + result->bytes_xferd = 0; + result->transaction_status = MHI_STATUS_SUCCESS; + } + ret_val = delete_element(local_chan_ctxt, &local_chan_ctxt->ack_rp, + &local_chan_ctxt->rp, NULL); + if (ret_val != MHI_STATUS_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Failed to remove from inbound ring ret %d chan %d\n", + ret_val, chan); + result->payload_buf = 0; + result->bytes_xferd = 0; + result->transaction_status = MHI_STATUS_SUCCESS; + } + mutex_unlock(chan_mutex); + return ret_val; +} +EXPORT_SYMBOL(mhi_poll_inbound); + + +enum MHI_STATUS validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr) +{ + return (addr < (uintptr_t)(ring->base) || + addr > ((uintptr_t)(ring->base) + + (ring->len - 1))) ? + MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr) +{ + return (addr < (uintptr_t)(ring->base) || + addr > ((uintptr_t)(ring->base) + + (ring->len - 1))) ? + MHI_STATUS_ERROR : MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 j = 0; + while (readl_relaxed((void *)(mhi_dev_ctxt->mmio_addr + + MHIREGLEN)) == 0xFFFFFFFF + && j <= MHI_MAX_LINK_RETRIES) { + mhi_log(MHI_MSG_CRITICAL, + "Could not access MDM retry %d\n", j); + msleep(MHI_LINK_STABILITY_WAIT_MS); + if (MHI_MAX_LINK_RETRIES == j) { + mhi_log(MHI_MSG_CRITICAL, + "Could not access MDM, FAILING!\n"); + return MHI_STATUS_ERROR; + } + j++; + } + return MHI_STATUS_SUCCESS; +} + +int mhi_get_chan_max_buffers(u32 chan) +{ + if (IS_SOFTWARE_CHANNEL(chan)) + return MAX_NR_TRBS_PER_SOFT_CHAN - 1; + else + return MAX_NR_TRBS_PER_HARD_CHAN - 1; +} + +int mhi_get_max_desc(struct mhi_client_handle *client_handle) +{ + return mhi_get_chan_max_buffers(client_handle->chan); +} +EXPORT_SYMBOL(mhi_get_max_desc); + +int mhi_get_epid(struct mhi_client_handle *client_handle) +{ + return MHI_EPID; +} + +int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n", + mhi_dev_ctxt->dev_props->device_wake_gpio); + gpio_direction_output(mhi_dev_ctxt->dev_props->device_wake_gpio, 1); + return 0; +} + +inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + mhi_log(MHI_MSG_VERBOSE, "GPIO %d\n", + mhi_dev_ctxt->dev_props->device_wake_gpio); + if (mhi_dev_ctxt->enable_lpm) + gpio_direction_output( + mhi_dev_ctxt->dev_props->device_wake_gpio, 0); + else + mhi_log(MHI_MSG_VERBOSE, "LPM Enabled\n"); + return 0; +} + +int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm) +{ + mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm); + client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0; + return 0; +} + +int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt, + int index) +{ + mhi_log(MHI_MSG_INFO, "Setting bus request to index %d\n", index); + return msm_bus_scale_client_update_request(mhi_dev_ctxt->bus_client, + index); +} + +enum MHI_STATUS mhi_deregister_channel(struct mhi_client_handle + *client_handle) { + if (NULL == client_handle || + client_handle->magic != MHI_HANDLE_MAGIC) + return MHI_STATUS_ERROR; + client_handle->mhi_dev_ctxt->client_handle_list[client_handle->chan] = + NULL; + kfree(client_handle); + return MHI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(mhi_deregister_channel); diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c new file mode 100644 index 000000000000..dd700717642e --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c @@ -0,0 +1,211 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include "mhi_sys.h" +#include "mhi_hwio.h" +#include "mhi.h" + +enum MHI_STATUS mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 pcie_word_val = 0; + u32 expiry_counter; + mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n"); + + /* Read MMIO and poll for READY bit to be set */ + pcie_word_val = readl_relaxed((void *)(mhi_dev_ctxt->mmio_addr + + MHISTATUS)); + MHI_READ_FIELD(pcie_word_val, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT); + + if (pcie_word_val == 0xFFFFFFFF) + return MHI_STATUS_LINK_DOWN; + expiry_counter = 0; + while (MHI_STATE_READY != pcie_word_val && expiry_counter < 50) { + expiry_counter++; + mhi_log(MHI_MSG_ERROR, + "Device is not ready, sleeping and retrying.\n"); + msleep(MHI_READY_STATUS_TIMEOUT_MS); + pcie_word_val = readl_relaxed((void *)(mhi_dev_ctxt->mmio_addr + + MHISTATUS)); + MHI_READ_FIELD(pcie_word_val, + MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT); + } + + if (pcie_word_val != MHI_STATE_READY) + return MHI_STATUS_DEVICE_NOT_READY; + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u64 pcie_dword_val = 0; + u32 pcie_word_val = 0; + u32 i = 0; + enum MHI_STATUS ret_val; + + mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n"); + mhi_dev_ctxt->mmio_addr = mhi_dev_ctxt->dev_props->bar0_base; + + mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n", + mhi_dev_ctxt->mmio_addr); + + mhi_dev_ctxt->mmio_len = readl_relaxed((void *)(mhi_dev_ctxt->mmio_addr + + MHIREGLEN)); + + if (0 == mhi_dev_ctxt->mmio_len) { + mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n"); + return MHI_STATUS_ERROR; + } + + mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n"); + mhi_dev_ctxt->dev_props->mhi_ver = readl_relaxed( + (void *)(mhi_dev_ctxt->mmio_addr + MHIVER)); + if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) { + mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", + mhi_dev_ctxt->dev_props->mhi_ver); + + if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF) + ret_val = mhi_wait_for_mdm(mhi_dev_ctxt); + if (ret_val) + return MHI_STATUS_ERROR; + } + /* Enable the channels */ + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + struct mhi_chan_ctxt *chan_ctxt = + &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[i]; + if (VALID_CHAN_NR(i)) + chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED; + else + chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED; + } + mhi_log(MHI_MSG_INFO, + "Read back MMIO Ready bit successfully. Moving on..\n"); + mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n"); + + MHI_REG_READ_FIELD(mhi_dev_ctxt->mmio_addr, + CHDBOFF, CHDBOFF_CHDBOFF_MASK, + CHDBOFF_CHDBOFF_SHIFT, mhi_dev_ctxt->channel_db_addr); + + mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n"); + MHI_REG_READ_FIELD(mhi_dev_ctxt->mmio_addr, + ERDBOFF, ERDBOFF_ERDBOFF_MASK, + ERDBOFF_ERDBOFF_SHIFT, mhi_dev_ctxt->event_db_addr); + + mhi_dev_ctxt->channel_db_addr += (uintptr_t)mhi_dev_ctxt->mmio_addr; + mhi_dev_ctxt->event_db_addr += (uintptr_t)mhi_dev_ctxt->mmio_addr; + + mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n"); + + pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list); + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CCABAP_HIGHER, + CCABAP_HIGHER_CCABAP_HIGHER_MASK, + CCABAP_HIGHER_CCABAP_HIGHER_SHIFT, pcie_word_val); + pcie_word_val = LOW_WORD(pcie_dword_val); + + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CCABAP_LOWER, + CCABAP_LOWER_CCABAP_LOWER_MASK, + CCABAP_LOWER_CCABAP_LOWER_SHIFT, + pcie_word_val); + + /* Write the Event Context Base Address Register High and Low parts */ + pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list); + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, ECABAP_HIGHER, + ECABAP_HIGHER_ECABAP_HIGHER_MASK, + ECABAP_HIGHER_ECABAP_HIGHER_SHIFT, pcie_word_val); + pcie_word_val = LOW_WORD(pcie_dword_val); + + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, ECABAP_LOWER, + ECABAP_LOWER_ECABAP_LOWER_MASK, + ECABAP_LOWER_ECABAP_LOWER_SHIFT, pcie_word_val); + + + /* Write the Command Ring Control Register High and Low parts */ + pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list); + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CRCBAP_HIGHER, + CRCBAP_HIGHER_CRCBAP_HIGHER_MASK, + CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT, + pcie_word_val); + pcie_word_val = LOW_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, CRCBAP_LOWER, + CRCBAP_LOWER_CRCBAP_LOWER_MASK, + CRCBAP_LOWER_CRCBAP_LOWER_SHIFT, + pcie_word_val); + + mhi_dev_ctxt->cmd_db_addr = (uintptr_t)mhi_dev_ctxt->mmio_addr + + CRDB_LOWER; + /* Set the control segment in the MMIO */ + pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg); + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLBASE_HIGHER, + MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK, + MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT, + pcie_word_val); + + pcie_word_val = LOW_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLBASE_LOWER, + MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK, + MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT, + pcie_word_val); + + pcie_dword_val = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_ctrl_seg) + + mhi_get_memregion_len(mhi_dev_ctxt->mhi_ctrl_seg_info) - 1; + + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_HIGHER, + MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK, + MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT, + pcie_word_val); + pcie_word_val = LOW_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRLLIMIT_LOWER, + MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK, + MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT, + pcie_word_val); + + /* Set the data segment in the MMIO */ + pcie_dword_val = MHI_DATA_SEG_WINDOW_START_ADDR; + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATABASE_HIGHER, + MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK, + MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT, + pcie_word_val); + + pcie_word_val = LOW_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATABASE_LOWER, + MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK, + MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT, + pcie_word_val); + + pcie_dword_val = MHI_DATA_SEG_WINDOW_END_ADDR; + + pcie_word_val = HIGH_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_HIGHER, + MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK, + MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT, + (pcie_word_val)); + pcie_word_val = LOW_WORD(pcie_dword_val); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHIDATALIMIT_LOWER, + MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK, + MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT, + (pcie_word_val)); + + mhi_log(MHI_MSG_INFO, "Done..\n"); + return MHI_STATUS_SUCCESS; +} + diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c new file mode 100644 index 000000000000..17f3a473b264 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_pm.c @@ -0,0 +1,272 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/msm_mhi.h> +#include <linux/workqueue.h> +#include <linux/pm.h> +#include <linux/fs.h> +#include <linux/hrtimer.h> + +#include "mhi_sys.h" +#include "mhi.h" +#include "mhi_hwio.h" + +/* Write only sysfs attributes */ +static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3); +static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0); + +/* Read only sysfs attributes */ + +static struct attribute *mhi_attributes[] = { + &dev_attr_MHI_M3.attr, + &dev_attr_MHI_M0.attr, + NULL, +}; + +static struct attribute_group mhi_attribute_group = { + .attrs = mhi_attributes, +}; + +int mhi_pci_suspend(struct pci_dev *pcie_dev, pm_message_t state) +{ + int r = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = pcie_dev->dev.platform_data; + + if (NULL == mhi_dev_ctxt) + return -EINVAL; + mhi_log(MHI_MSG_INFO, "Entered, sys state %d, MHI state %d\n", + state.event, mhi_dev_ctxt->mhi_state); + atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1); + r = cancel_work_sync(&mhi_dev_ctxt->m0_work); + if (!r) { + atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); + mhi_log(MHI_MSG_INFO, "M0 work cancelled\n"); + } + + r = mhi_initiate_m3(mhi_dev_ctxt); + + if (!r) + return r; + + atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0); + mhi_log(MHI_MSG_ERROR, "Failing suspend sequence ret: %d\n", + r); + return r; +} + +int mhi_pci_resume(struct pci_dev *pcie_dev) +{ + int r = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = pcie_dev->dev.platform_data; + r = mhi_initiate_m0(mhi_dev_ctxt); + if (r) + goto exit; + r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1, + msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); + switch (r) { + case 0: + mhi_log(MHI_MSG_CRITICAL, + "Timeout: No M0 event after %d ms\n", + MHI_MAX_SUSPEND_TIMEOUT); + mhi_dev_ctxt->counters.m0_event_timeouts++; + r = -ETIME; + break; + case -ERESTARTSYS: + mhi_log(MHI_MSG_CRITICAL, + "Going Down...\n"); + break; + default: + mhi_log(MHI_MSG_INFO, + "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state); + r = 0; + } +exit: + atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0); + return r; +} + +enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer) +{ + int ret_val = 0; + unsigned long flags; + ktime_t curr_time, timer_inc; + struct mhi_device_ctxt *mhi_dev_ctxt = container_of(timer, + struct mhi_device_ctxt, + m1_timer); + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + + /* + * We will allow M1 if no data is pending, the current + * state is M0 and no M3 transition is pending + */ + if ((0 == atomic_read(&mhi_dev_ctxt->flags.data_pending)) && + (MHI_STATE_M1 == mhi_dev_ctxt->mhi_state || + MHI_STATE_M0 == mhi_dev_ctxt->mhi_state) && + (0 == mhi_dev_ctxt->flags.pending_M3) && + mhi_dev_ctxt->flags.mhi_initialized && + (0 == atomic_read( + &mhi_dev_ctxt->counters.outbound_acks))) { + mhi_dev_ctxt->mhi_state = MHI_STATE_M1; + ret_val = mhi_deassert_device_wake(mhi_dev_ctxt); + mhi_dev_ctxt->counters.m0_m1++; + if (ret_val) + mhi_log(MHI_MSG_ERROR, + "Could not set DEVICE WAKE GPIO LOW\n"); + } + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1 || + mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { + curr_time = ktime_get(); + timer_inc = ktime_set(0, MHI_M1_ENTRY_DELAY_MS * 1E6L); + hrtimer_forward(timer, curr_time, timer_inc); + return HRTIMER_RESTART; + } + return HRTIMER_NORESTART; +} + +int mhi_init_pm_sysfs(struct device *dev) +{ + return sysfs_create_group(&dev->kobj, &mhi_attribute_group); +} + +void mhi_rem_pm_sysfs(struct device *dev) +{ + return sysfs_remove_group(&dev->kobj, &mhi_attribute_group); +} + +ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int r = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + r = mhi_initiate_m3(mhi_dev_ctxt); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to suspend %d\n", r); + return r; + } + if (MHI_STATUS_SUCCESS != mhi_turn_off_pcie_link(mhi_dev_ctxt)) + mhi_log(MHI_MSG_CRITICAL, + "Failed to turn off link\n"); + + return count; +} + +ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + if (MHI_STATUS_SUCCESS != mhi_turn_on_pcie_link(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to resume link\n"); + return count; + } + mhi_initiate_m0(mhi_dev_ctxt); + mhi_log(MHI_MSG_CRITICAL, + "Current mhi_state = 0x%x\n", + mhi_dev_ctxt->mhi_state); + + return count; +} + +enum MHI_STATUS mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int r; + struct pci_dev *pcie_dev; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Entered...\n"); + pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; + mutex_lock(&mhi_dev_ctxt->mhi_link_state); + if (0 == mhi_dev_ctxt->flags.link_up) { + mhi_log(MHI_MSG_CRITICAL, + "Link already marked as down, nothing to do\n"); + goto exit; + } + /* Disable shadow to avoid restoring D3 hot struct device */ + r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0); + if (r) + mhi_log(MHI_MSG_CRITICAL, + "Failed to stop shadow config space: %d\n", r); + + r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to set pcie power state to D3 hotret: %x\n", r); + ret_val = MHI_STATUS_ERROR; + goto exit; + } + r = msm_pcie_pm_control(MSM_PCIE_SUSPEND, + mhi_dev_ctxt->dev_info->pcie_device->bus->number, + mhi_dev_ctxt->dev_info->pcie_device, + NULL, + 0); + if (r) + mhi_log(MHI_MSG_CRITICAL, + "Failed to suspend pcie bus ret 0x%x\n", r); + mhi_dev_ctxt->flags.link_up = 0; +exit: + mutex_unlock(&mhi_dev_ctxt->mhi_link_state); + mhi_log(MHI_MSG_INFO, "Exited...\n"); + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int r = 0; + struct pci_dev *pcie_dev; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + pcie_dev = mhi_dev_ctxt->dev_info->pcie_device; + + mutex_lock(&mhi_dev_ctxt->mhi_link_state); + mhi_log(MHI_MSG_INFO, "Entered...\n"); + if (mhi_dev_ctxt->flags.link_up) + goto exit; + r = msm_pcie_pm_control(MSM_PCIE_RESUME, + mhi_dev_ctxt->dev_info->pcie_device->bus->number, + mhi_dev_ctxt->dev_info->pcie_device, + NULL, 0); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to resume pcie bus ret %d\n", r); + ret_val = MHI_STATUS_ERROR; + goto exit; + } + + atomic_dec(&mhi_dev_ctxt->flags.mhi_link_off); + + r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, + PCI_D0); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to load stored state %d\n", r); + ret_val = MHI_STATUS_ERROR; + goto exit; + } + r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device); + if (r) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to Recover config space ret: %d\n", r); + ret_val = MHI_STATUS_ERROR; + goto exit; + } + mhi_dev_ctxt->flags.link_up = 1; +exit: + mutex_unlock(&mhi_dev_ctxt->mhi_link_state); + mhi_log(MHI_MSG_INFO, "Exited...\n"); + return ret_val; +} + diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c new file mode 100644 index 000000000000..4faff3a6df22 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_ring_ops.c @@ -0,0 +1,186 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mhi_sys.h" +#include "mhi.h" + +static enum MHI_STATUS add_element(struct mhi_ring *ring, void **rp, + void **wp, void **assigned_addr) +{ + uintptr_t d_wp = 0, d_rp = 0, ring_size = 0; + + if (0 == ring->el_size || NULL == ring + || NULL == ring->base || 0 == ring->len) { + mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); + return MHI_STATUS_ERROR; + } + + if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); + return MHI_STATUS_ERROR; + } + ring_size = ring->len / ring->el_size; + + if ((d_wp + 1) % ring_size == d_rp) { + if (ring->overwrite_en) { + ctxt_del_element(ring, NULL); + } else { + mhi_log(MHI_MSG_INFO, "Ring 0x%lX is full\n", + (uintptr_t)ring->base); + return MHI_STATUS_RING_FULL; + } + } + if (NULL != assigned_addr) + *assigned_addr = (char *)ring->wp; + *wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size + + (uintptr_t)ring->base); + return MHI_STATUS_SUCCESS; +} + +inline enum MHI_STATUS ctxt_add_element(struct mhi_ring *ring, + void **assigned_addr) +{ + return add_element(ring, &ring->rp, &ring->wp, assigned_addr); +} +inline enum MHI_STATUS ctxt_del_element(struct mhi_ring *ring, + void **assigned_addr) +{ + return delete_element(ring, &ring->rp, &ring->wp, assigned_addr); +} + +/** + * delete_element - Moves the read pointer of the transfer ring to + * the next element of the transfer ring, + * + * ring location of local ring data structure + * @rp ring read pointer + * @wp ring write pointer + * @assigned_addr location of the element just deleted + */ +enum MHI_STATUS delete_element(struct mhi_ring *ring, void **rp, + void **wp, void **assigned_addr) +{ + uintptr_t d_wp = 0, d_rp = 0, ring_size = 0; + + if (0 == ring->el_size || NULL == ring || + NULL == ring->base || 0 == ring->len) { + mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); + return MHI_STATUS_ERROR; + } + ring_size = ring->len / ring->el_size; + + if (MHI_STATUS_SUCCESS != get_element_index(ring, *rp, &d_rp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != get_element_index(ring, *wp, &d_wp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index.\n"); + return MHI_STATUS_ERROR; + } + if (d_wp == d_rp) { + mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lX is empty\n", + (uintptr_t)ring->base); + if (NULL != assigned_addr) + *assigned_addr = NULL; + return MHI_STATUS_RING_EMPTY; + } + + if (NULL != assigned_addr) + *assigned_addr = (void *)ring->rp; + + *rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size + + (uintptr_t)ring->base); + + return MHI_STATUS_SUCCESS; +} + +int mhi_get_free_desc(struct mhi_client_handle *client_handle) +{ + u32 chan = client_handle->chan; + struct mhi_device_ctxt *ctxt = client_handle->mhi_dev_ctxt; + if (NULL == client_handle || MHI_HANDLE_MAGIC != client_handle->magic) + return -EINVAL; + return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]); +} +EXPORT_SYMBOL(mhi_get_free_desc); + +int get_nr_avail_ring_elements(struct mhi_ring *ring) +{ + u32 nr_el = 0; + uintptr_t ring_size = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + ring_size = ring->len / ring->el_size; + ret_val = get_nr_enclosed_el(ring, ring->rp, ring->wp, &nr_el); + if (ret_val != MHI_STATUS_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Failed to get enclosed el ret %d.\n", ret_val); + return 0; + } + return ring_size - nr_el - 1; +} + +enum MHI_STATUS get_nr_enclosed_el(struct mhi_ring *ring, void *rp, + void *wp, u32 *nr_el) +{ + uintptr_t index_rp = 0; + uintptr_t index_wp = 0; + uintptr_t ring_size = 0; + if (0 == ring->el_size || NULL == ring || + NULL == ring->base || 0 == ring->len) { + mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n"); + return MHI_STATUS_ERROR; + } + if (MHI_STATUS_SUCCESS != get_element_index(ring, rp, &index_rp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index rp 0x%p.\n", rp); + return MHI_STATUS_ERROR; + } + + if (MHI_STATUS_SUCCESS != get_element_index(ring, wp, &index_wp)) { + mhi_log(MHI_MSG_CRITICAL, "Bad element index wp 0x%p.\n", wp); + return MHI_STATUS_ERROR; + } + ring_size = ring->len / ring->el_size; + + if (index_rp < index_wp) + *nr_el = index_wp - index_rp; + else if (index_rp > index_wp) + *nr_el = ring_size - (index_rp - index_wp); + else + *nr_el = 0; + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS get_element_index(struct mhi_ring *ring, + void *address, uintptr_t *index) +{ + if (MHI_STATUS_SUCCESS != validate_ring_el_addr(ring, + (uintptr_t)address)) + return MHI_STATUS_ERROR; + *index = ((uintptr_t)address - (uintptr_t)ring->base) / ring->el_size; + return MHI_STATUS_SUCCESS; +} + +enum MHI_STATUS get_element_addr(struct mhi_ring *ring, + uintptr_t index, void **address) +{ + uintptr_t ring_size = 0; + if (NULL == ring || NULL == address) + return MHI_STATUS_ERROR; + ring_size = ring->len / ring->el_size; + *address = (void *)((uintptr_t)ring->base + + (index % ring_size) * ring->el_size); + return MHI_STATUS_SUCCESS; +} diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c new file mode 100644 index 000000000000..b72500899558 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_ssr.c @@ -0,0 +1,248 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <mhi_sys.h> +#include <mhi.h> + +#include <soc/qcom/subsystem_restart.h> +#include <soc/qcom/subsystem_notif.h> + +#include <linux/esoc_client.h> + +static int mhi_ssr_notify_cb(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret_val = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + struct mhi_pcie_dev_info *mhi_pcie_dev = NULL; + mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices]; + if (NULL != mhi_dev_ctxt) + mhi_dev_ctxt->esoc_notif = action; + switch (action) { + case SUBSYS_BEFORE_POWERUP: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event BEFORE_POWERUP\n"); + atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1); + ret_val = init_mhi_base_state(mhi_dev_ctxt); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to transition to base state %d.\n", + ret_val); + break; + case SUBSYS_AFTER_POWERUP: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event AFTER_POWERUP\n"); + break; + case SUBSYS_POWERUP_FAILURE: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event POWERUP_FAILURE\n"); + break; + case SUBSYS_BEFORE_SHUTDOWN: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event BEFORE_SHUTDOWN\n"); + atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 1); + mhi_notify_clients(mhi_dev_ctxt, MHI_CB_MHI_DISABLED); + break; + case SUBSYS_AFTER_SHUTDOWN: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event AFTER_SHUTDOWN\n"); + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_LINK_DOWN); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to init state transition, to %d\n", + STATE_TRANSITION_LINK_DOWN); + } + break; + case SUBSYS_RAMDUMP_NOTIFICATION: + mhi_log(MHI_MSG_INFO, + "Received Subsystem event RAMDUMP\n"); + ret_val = init_mhi_base_state(mhi_dev_ctxt); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to transition to base state %d.\n", + ret_val); + break; + default: + mhi_log(MHI_MSG_INFO, + "Received ESOC notifcation %d, NOT handling\n", + (int)action); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block mhi_ssr_nb = { + .notifier_call = mhi_ssr_notify_cb, +}; + +static void esoc_parse_link_type(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int ret_val; + ret_val = strcmp(mhi_dev_ctxt->esoc_handle->link, "HSIC+PCIe"); + mhi_log(MHI_MSG_VERBOSE, "Link type is %s as indicated by ESOC\n", + mhi_dev_ctxt->esoc_handle->link); + if (ret_val) + mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI; + else + mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET; +} + +int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int ret_val = 0; + struct device_node *np; + struct pci_driver *mhi_driver; + struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev; + + mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver; + np = dev->of_node; + mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm"); + mhi_log(MHI_MSG_VERBOSE, + "Of table of pcie struct device property is dev->of_node %p\n", + np); + if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_handle)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to register for SSR, ret %lx\n", + (uintptr_t)mhi_dev_ctxt->esoc_handle); + return -EIO; + } + + esoc_parse_link_type(mhi_dev_ctxt); + + mhi_dev_ctxt->esoc_ssr_handle = subsys_notif_register_notifier( + mhi_dev_ctxt->esoc_handle->name, + &mhi_ssr_nb); + if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_ssr_handle)) { + ret_val = PTR_RET(mhi_dev_ctxt->esoc_ssr_handle); + mhi_log(MHI_MSG_CRITICAL, + "Can't find esoc desc ret 0x%lx\n", + (uintptr_t)mhi_dev_ctxt->esoc_ssr_handle); + } + + return ret_val; +} + +void mhi_notify_client(struct mhi_client_handle *client_handle, + enum MHI_CB_REASON reason) +{ + struct mhi_cb_info cb_info = {0}; + struct mhi_result result = {0}; + + cb_info.result = NULL; + cb_info.cb_reason = reason; + + if (NULL != client_handle && + NULL != client_handle->client_info.mhi_client_cb) { + result.user_data = client_handle->user_data; + cb_info.chan = client_handle->chan; + cb_info.result = &result; + mhi_log(MHI_MSG_INFO, "Calling back for chan %d, reason %d\n", + client_handle->chan, reason); + client_handle->client_info.mhi_client_cb(&cb_info); + } +} + +void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_CB_REASON reason) +{ + int i; + struct mhi_client_handle *client_handle = NULL; + + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + if (VALID_CHAN_NR(i)) { + client_handle = mhi_dev_ctxt->client_handle_list[i]; + mhi_notify_client(client_handle, reason); + } + } +} + +void mhi_link_state_cb(struct msm_pcie_notify *notify) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + struct mhi_pcie_dev_info *mhi_pcie_dev = notify->data; + struct mhi_device_ctxt *mhi_dev_ctxt = NULL; + if (NULL == notify || NULL == notify->data) { + mhi_log(MHI_MSG_CRITICAL, + "Incomplete handle received\n"); + return; + } + mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; + switch (notify->event) { + case MSM_PCIE_EVENT_LINKDOWN: + mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n"); + break; + case MSM_PCIE_EVENT_LINKUP: + mhi_log(MHI_MSG_INFO, + "Received MSM_PCIE_EVENT_LINKUP\n"); + if (0 == mhi_pcie_dev->link_up_cntr) { + mhi_log(MHI_MSG_INFO, + "Initializing MHI for the first time\n"); + mhi_ctxt_init(mhi_pcie_dev); + mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt; + mhi_pcie_dev->mhi_ctxt.flags.link_up = 1; + pci_set_master(mhi_pcie_dev->pcie_device); + init_mhi_base_state(mhi_dev_ctxt); + } else { + mhi_log(MHI_MSG_INFO, + "Received Link Up Callback\n"); + } + mhi_pcie_dev->link_up_cntr++; + break; + case MSM_PCIE_EVENT_WAKEUP: + mhi_log(MHI_MSG_INFO, + "Received MSM_PCIE_EVENT_WAKE\n"); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + __pm_relax(&mhi_dev_ctxt->w_lock); + if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { + mhi_log(MHI_MSG_INFO, + "There is a pending resume, doing nothing.\n"); + return; + } + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_WAKE); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to init state transition, to %d\n", + STATE_TRANSITION_WAKE); + } + break; + default: + mhi_log(MHI_MSG_INFO, + "Received bad link event\n"); + return; + break; + } +} + +enum MHI_STATUS init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int r = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + + mhi_assert_device_wake(mhi_dev_ctxt); + mhi_dev_ctxt->flags.link_up = 1; + r = + mhi_set_bus_request(mhi_dev_ctxt, 1); + if (r) + mhi_log(MHI_MSG_INFO, + "Failed to scale bus request to active set.\n"); + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + mhi_dev_ctxt->base_state); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to start state change event, to %d\n", + mhi_dev_ctxt->base_state); + } + return ret_val; +} diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c new file mode 100644 index 000000000000..309d0ee223ea --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_states.c @@ -0,0 +1,1063 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "mhi_sys.h" +#include "mhi_hwio.h" + +static void conditional_chan_db_write( + struct mhi_device_ctxt *mhi_dev_ctxt, u32 chan) +{ + u64 db_value; + unsigned long flags; + + mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; + spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags); + if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) { + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp); + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->channel_db_addr, + chan, db_value); + } + mhi_dev_ctxt->mhi_chan_db_order[chan] = 0; + spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags); +} + +static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 i = 0; + struct mhi_ring *local_ctxt = NULL; + + mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n"); + for (i = 0; i < MHI_MAX_CHANNELS; ++i) + if (VALID_CHAN_NR(i)) { + local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i]; + if (IS_HARDWARE_CHANNEL(i)) + mhi_dev_ctxt->db_mode[i] = 1; + if ((local_ctxt->wp != local_ctxt->rp) || + ((local_ctxt->wp != local_ctxt->rp) && (i % 2))) + conditional_chan_db_write(mhi_dev_ctxt, i); + } +} + +static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + struct mutex *cmd_mutex = NULL; + u64 db_value; + u64 rp = 0; + struct mhi_ring *local_ctxt = NULL; + + mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n"); + cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]; + mhi_dev_ctxt->cmd_ring_order = 0; + mutex_lock(cmd_mutex); + local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING]; + rp = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)local_ctxt->rp); + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp); + if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value) + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->cmd_db_addr, + 0, db_value); + mhi_dev_ctxt->cmd_ring_order = 0; + mutex_unlock(cmd_mutex); +} +static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + u32 i; + u64 db_value = 0; + u32 event_ring_index; + struct mhi_event_ctxt *event_ctxt = NULL; + struct mhi_control_seg *mhi_ctrl = NULL; + spinlock_t *lock = NULL; + unsigned long flags; + mhi_ctrl = mhi_dev_ctxt->mhi_ctrl_seg; + + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { + event_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; + lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[event_ring_index]; + mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; + + + spin_lock_irqsave(lock, flags); + event_ctxt = &mhi_ctrl->mhi_ec_list[event_ring_index]; + db_value = mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt-> + mhi_local_event_ctxt[event_ring_index].wp); + + if (0 == mhi_dev_ctxt->mhi_ev_db_order[event_ring_index]) { + MHI_WRITE_DB(mhi_dev_ctxt, mhi_dev_ctxt->event_db_addr, + event_ring_index, db_value); + } + mhi_dev_ctxt->mhi_ev_db_order[event_ring_index] = 0; + spin_unlock_irqrestore(lock, flags); + } +} + +static enum MHI_STATUS process_m0_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + unsigned long flags; + int ret_val; + mhi_log(MHI_MSG_INFO, "Entered\n"); + ret_val = cancel_delayed_work(&mhi_dev_ctxt->m3_work); + if (ret_val) { + atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); + mhi_log(MHI_MSG_INFO, "M3 work was cancelled\n"); + } else { + mhi_log(MHI_MSG_INFO, + "M3 work NOT cancelled, either running or never started\n"); + } + if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) { + mhi_dev_ctxt->counters.m2_m0++; + } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) { + mhi_dev_ctxt->counters.m3_m0++; + } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) { + mhi_log(MHI_MSG_INFO, + "Transitioning from READY.\n"); + } else { + mhi_log(MHI_MSG_INFO, + "MHI State %d link state %d. Quitting\n", + mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up); + goto exit; + } + + read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->mhi_state = MHI_STATE_M0; + atomic_inc(&mhi_dev_ctxt->flags.data_pending); + mhi_assert_device_wake(mhi_dev_ctxt); + read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + + if (mhi_dev_ctxt->flags.mhi_initialized) { + ring_all_ev_dbs(mhi_dev_ctxt); + ring_all_chan_dbs(mhi_dev_ctxt); + ring_all_cmd_dbs(mhi_dev_ctxt); + } + atomic_dec(&mhi_dev_ctxt->flags.data_pending); + ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1); + if (ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Could not set bus frequency ret: %d\n", + ret_val); + mhi_dev_ctxt->flags.pending_M0 = 0; + wake_up_interruptible(mhi_dev_ctxt->M0_event); + if (ret_val == -ERESTARTSYS) + mhi_log(MHI_MSG_CRITICAL, + "Pending restart detected\n"); + + ret_val = hrtimer_start(&mhi_dev_ctxt->m1_timer, + mhi_dev_ctxt->m1_timeout, + HRTIMER_MODE_REL); + if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) { + atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0); + atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0); + } + mhi_log(MHI_MSG_VERBOSE, "Starting M1 timer, ret %d\n", ret_val); +exit: + mhi_log(MHI_MSG_INFO, "Exited\n"); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_m1_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + unsigned long flags = 0; + int ret_val = 0; + mhi_log(MHI_MSG_INFO, + "Processing M1 state transition from state %d\n", + mhi_dev_ctxt->mhi_state); + + mhi_dev_ctxt->counters.m0_m1++; + mhi_log(MHI_MSG_VERBOSE, + "Cancelling Inactivity timer\n"); + switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { + case 0: + mhi_log(MHI_MSG_VERBOSE, + "Timer was not active\n"); + break; + case 1: + mhi_log(MHI_MSG_VERBOSE, + "Timer was active\n"); + break; + case -1: + mhi_log(MHI_MSG_VERBOSE, + "Timer executing and can't stop\n"); + break; + } + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + if (!mhi_dev_ctxt->flags.pending_M3) { + mhi_dev_ctxt->mhi_state = MHI_STATE_M2; + mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n"); + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, + MHI_STATE_M2); + mhi_dev_ctxt->counters.m1_m2++; + } + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + ret_val = + mhi_set_bus_request(mhi_dev_ctxt, + 0); + if (ret_val) + mhi_log(MHI_MSG_INFO, "Failed to update bus request\n"); + if (!atomic_cmpxchg(&mhi_dev_ctxt->flags.m3_work_enabled, 0, 1)) { + mhi_log(MHI_MSG_INFO, "Starting M3 deferred work\n"); + ret_val = queue_delayed_work(mhi_dev_ctxt->work_queue, + &mhi_dev_ctxt->m3_work, + msecs_to_jiffies(m3_timer_val_ms)); + if (ret_val == 0) + mhi_log(MHI_MSG_CRITICAL, + "Failed to start M3 delayed work.\n"); + } + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_m3_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + unsigned long flags; + mhi_log(MHI_MSG_INFO, + "Processing M3 state transition\n"); + switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { + case 0: + mhi_log(MHI_MSG_VERBOSE, + "Timer was not active\n"); + break; + case 1: + mhi_log(MHI_MSG_VERBOSE, + "Timer was active\n"); + break; + case -1: + mhi_log(MHI_MSG_VERBOSE, + "Timer executing and can't stop\n"); + } + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->mhi_state = MHI_STATE_M3; + mhi_dev_ctxt->flags.pending_M3 = 0; + wake_up_interruptible(mhi_dev_ctxt->M3_event); + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->counters.m0_m3++; + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS mhi_process_link_down( + struct mhi_device_ctxt *mhi_dev_ctxt) +{ + unsigned long flags; + int r; + mhi_log(MHI_MSG_INFO, "Entered.\n"); + if (NULL == mhi_dev_ctxt) + return MHI_STATUS_ERROR; + + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->flags.mhi_initialized = 0; + mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; + mhi_deassert_device_wake(mhi_dev_ctxt); + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + + r = cancel_delayed_work_sync(&mhi_dev_ctxt->m3_work); + if (r) { + atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); + mhi_log(MHI_MSG_INFO, "M3 work cancelled\n"); + } + + r = cancel_work_sync(&mhi_dev_ctxt->m0_work); + if (r) { + atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); + mhi_log(MHI_MSG_INFO, "M0 work cancelled\n"); + } + mhi_dev_ctxt->flags.stop_threads = 1; + + while (!mhi_dev_ctxt->ev_thread_stopped) { + wake_up_interruptible(mhi_dev_ctxt->event_handle); + mhi_log(MHI_MSG_INFO, + "Waiting for threads to SUSPEND EVT: %d, STT: %d\n", + mhi_dev_ctxt->st_thread_stopped, + mhi_dev_ctxt->ev_thread_stopped); + msleep(20); + } + + switch (hrtimer_try_to_cancel(&mhi_dev_ctxt->m1_timer)) { + case 0: + mhi_log(MHI_MSG_CRITICAL, + "Timer was not active\n"); + break; + case 1: + mhi_log(MHI_MSG_CRITICAL, + "Timer was active\n"); + break; + case -1: + mhi_log(MHI_MSG_CRITICAL, + "Timer executing and can't stop\n"); + } + r = mhi_set_bus_request(mhi_dev_ctxt, 0); + if (r) + mhi_log(MHI_MSG_INFO, + "Failed to scale bus request to sleep set.\n"); + mhi_turn_off_pcie_link(mhi_dev_ctxt); + mhi_dev_ctxt->dev_info->link_down_cntr++; + atomic_set(&mhi_dev_ctxt->flags.data_pending, 0); + mhi_log(MHI_MSG_INFO, "Exited.\n"); + + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_link_down_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + mhi_log(MHI_MSG_INFO, "Entered\n"); + if (MHI_STATUS_SUCCESS != + mhi_process_link_down(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to process link down\n"); + } + mhi_log(MHI_MSG_INFO, "Exited.\n"); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_wake_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Entered\n"); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + + if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) { + mhi_log(MHI_MSG_CRITICAL, + "Pending SSR, Ignoring.\n"); + goto exit; + } + ret_val = mhi_turn_on_pcie_link(mhi_dev_ctxt); + + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to turn on PCIe link.\n"); + goto exit; + } + if (mhi_dev_ctxt->flags.mhi_initialized && + mhi_dev_ctxt->flags.link_up) { + mhi_log(MHI_MSG_VERBOSE, + "MHI is initialized, transitioning to M0.\n"); + mhi_initiate_m0(mhi_dev_ctxt); + } + if (!mhi_dev_ctxt->flags.mhi_initialized) { + mhi_log(MHI_MSG_INFO, + "MHI is not initialized transitioning to base.\n"); + ret_val = init_mhi_base_state(mhi_dev_ctxt); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to transition to base state %d.\n", + ret_val); + } + +exit: + __pm_relax(&mhi_dev_ctxt->w_lock); + mhi_log(MHI_MSG_INFO, "Exited.\n"); + return ret_val; + +} + +static enum MHI_STATUS process_bhi_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + mhi_turn_on_pcie_link(mhi_dev_ctxt); + mhi_log(MHI_MSG_INFO, "Entered\n"); + mhi_dev_ctxt->mhi_state = MHI_STATE_BHI; + wake_up_interruptible(mhi_dev_ctxt->bhi_event); + mhi_log(MHI_MSG_INFO, "Exited\n"); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_ready_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Processing READY state transition\n"); + mhi_dev_ctxt->mhi_state = MHI_STATE_READY; + + ret_val = mhi_reset_all_thread_queues(mhi_dev_ctxt); + + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_ERROR, + "Failed to reset thread queues\n"); + + /* Initialize MMIO */ + if (MHI_STATUS_SUCCESS != mhi_init_mmio(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_ERROR, + "Failure during MMIO initialization\n"); + return MHI_STATUS_ERROR; + } + ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt, + cur_work_item); + + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_ERROR, + "Failure during event ring init\n"); + return MHI_STATUS_ERROR; + } + + mhi_dev_ctxt->flags.stop_threads = 0; + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, + MHI_STATE_M0); + return MHI_STATUS_SUCCESS; +} + +static void mhi_reset_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, + int chan) +{ + struct mhi_chan_ctxt *chan_ctxt = + &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan]; + struct mhi_ring *local_chan_ctxt = + &mhi_dev_ctxt->mhi_local_chan_ctxt[chan]; + chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr; + chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr; + local_chan_ctxt->rp = local_chan_ctxt->base; + local_chan_ctxt->wp = local_chan_ctxt->base; + local_chan_ctxt->ack_rp = local_chan_ctxt->base; +} + +static void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt, + int index) +{ + struct mhi_event_ctxt *ev_ctxt; + struct mhi_ring *local_ev_ctxt; + mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index); + ev_ctxt = + &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[index]; + local_ev_ctxt = + &mhi_dev_ctxt->mhi_local_event_ctxt[index]; + ev_ctxt->mhi_event_read_ptr = ev_ctxt->mhi_event_ring_base_addr; + ev_ctxt->mhi_event_write_ptr = ev_ctxt->mhi_event_ring_base_addr; + local_ev_ctxt->rp = local_ev_ctxt->base; + local_ev_ctxt->wp = local_ev_ctxt->base; +} + +static enum MHI_STATUS process_reset_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + u32 i = 0; + u32 ev_ring_index; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n"); + mhi_dev_ctxt->counters.mhi_reset_cntr++; + mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL; + ret_val = mhi_test_for_device_ready(mhi_dev_ctxt); + switch (ret_val) { + case MHI_STATUS_SUCCESS: + break; + case MHI_STATUS_LINK_DOWN: + mhi_log(MHI_MSG_CRITICAL, "Link down detected\n"); + break; + case MHI_STATUS_DEVICE_NOT_READY: + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_RESET); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to initiate 0x%x state trans\n", + STATE_TRANSITION_RESET); + break; + default: + mhi_log(MHI_MSG_CRITICAL, + "Unexpected ret code detected for\n"); + break; + } + for (i = 0; i < NR_OF_CMD_RINGS; ++i) { + mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp = + mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; + mhi_dev_ctxt->mhi_local_cmd_ctxt[i].wp = + mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base; + mhi_dev_ctxt->mhi_ctrl_seg->mhi_cmd_ctxt_list[i]. + mhi_cmd_ring_read_ptr = + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp); + } + for (i = 0; i < EVENT_RINGS_ALLOCATED; ++i) { + ev_ring_index = mhi_dev_ctxt->alloced_ev_rings[i]; + mhi_reset_ev_ctxt(mhi_dev_ctxt, ev_ring_index); + } + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + if (VALID_CHAN_NR(i)) + mhi_reset_chan_ctxt(mhi_dev_ctxt, i); + } + ret_val = mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_READY); + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to initiate 0x%x state trans\n", + STATE_TRANSITION_READY); + return ret_val; +} + +static enum MHI_STATUS process_syserr_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n"); + if (MHI_STATUS_SUCCESS != ret_val) { + mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n"); + return MHI_STATUS_ERROR; + } + mhi_dev_ctxt->mhi_state = MHI_STATE_RESET; + if (MHI_STATUS_SUCCESS != mhi_init_state_transition(mhi_dev_ctxt, + STATE_TRANSITION_RESET)) + mhi_log(MHI_MSG_ERROR, + "Failed to init state transition to RESET.\n"); + return ret_val; +} + +enum MHI_STATUS start_chan_sync(struct mhi_client_handle *client_handle) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + int r = 0; + ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt, + MHI_COMMAND_START_CHAN, + client_handle->chan); + if (ret_val != MHI_STATUS_SUCCESS) { + mhi_log(MHI_MSG_ERROR, + "Failed to send start command for chan %d ret %d\n", + MHI_CLIENT_SAHARA_OUT, ret_val); + return ret_val; + } + r = wait_for_completion_interruptible_timeout( + &client_handle->chan_open_complete, + msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT)); + if (0 == r || -ERESTARTSYS == r) { + mhi_log(MHI_MSG_ERROR, + "Failed to start chan %d ret %d\n", + client_handle->chan, r); + ret_val = MHI_STATUS_ERROR; + } + return ret_val; +} + +static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_EXEC_ENV exec_env) +{ + struct mhi_client_handle *client_handle = NULL; + struct mhi_cb_info cb_info; + int i; + + cb_info.cb_reason = MHI_CB_MHI_ENABLED; + switch (exec_env) { + case MHI_EXEC_ENV_SBL: + mhi_log(MHI_MSG_INFO, "Enabling SBL clients.\n"); + + client_handle = + mhi_dev_ctxt->client_handle_list[MHI_CLIENT_SAHARA_OUT]; + + mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED); + + client_handle = + mhi_dev_ctxt->client_handle_list[MHI_CLIENT_SAHARA_IN]; + + mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED); + break; + case MHI_EXEC_ENV_AMSS: + mhi_log(MHI_MSG_INFO, "Enabling AMSS clients\n"); + for (i = 0; i < MHI_MAX_CHANNELS; ++i) { + if (VALID_CHAN_NR(i) && + i != MHI_CLIENT_SAHARA_OUT && + i != MHI_CLIENT_SAHARA_IN) { + client_handle = + mhi_dev_ctxt->client_handle_list[i]; + mhi_notify_client(client_handle, + MHI_CB_MHI_ENABLED); + } + } + break; + default: + mhi_log(MHI_MSG_ERROR, + "Unrecognized exec_env %d\n", exec_env); + break; + } + mhi_log(MHI_MSG_INFO, "Done.\n"); +} + +static enum MHI_STATUS process_sbl_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + mhi_log(MHI_MSG_INFO, "Processing SBL state transition\n"); + mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL; + wmb(); + enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); + return MHI_STATUS_SUCCESS; +} + +static enum MHI_STATUS process_amss_transition( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + enum MHI_STATUS ret_val; + mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n"); + mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS; + atomic_inc(&mhi_dev_ctxt->flags.data_pending); + mhi_assert_device_wake(mhi_dev_ctxt); + if (0 == mhi_dev_ctxt->flags.mhi_initialized) { + ret_val = mhi_add_elements_to_event_rings(mhi_dev_ctxt, + cur_work_item); + if (MHI_STATUS_SUCCESS != ret_val) + return MHI_STATUS_ERROR; + mhi_dev_ctxt->flags.mhi_initialized = 1; + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_CRITICAL, + "Failed to set local chan state\n"); + ring_all_chan_dbs(mhi_dev_ctxt); + mhi_log(MHI_MSG_INFO, + "Notifying clients that MHI is enabled\n"); + if (ret_val != MHI_STATUS_SUCCESS) + mhi_log(MHI_MSG_CRITICAL, + "Failed to probe MHI CORE clients, ret 0x%x\n", + ret_val); + } + enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env); + atomic_dec(&mhi_dev_ctxt->flags.data_pending); + mhi_log(MHI_MSG_INFO, "Exited\n"); + return MHI_STATUS_SUCCESS; +} + +static void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt, + enum MHI_STATE new_state) +{ + MHI_REG_WRITE_FIELD(mhi_dev_ctxt->mmio_addr, MHICTRL, + MHICTRL_MHISTATE_MASK, + MHICTRL_MHISTATE_SHIFT, + new_state); +} + +static enum MHI_STATUS process_stt_work_item( + struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION cur_work_item) +{ + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + + mhi_log(MHI_MSG_INFO, "Transitioning to %d\n", + (int)cur_work_item); + switch (cur_work_item) { + case STATE_TRANSITION_BHI: + ret_val = process_bhi_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_RESET: + ret_val = process_reset_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_READY: + ret_val = process_ready_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_SBL: + ret_val = process_sbl_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_AMSS: + ret_val = process_amss_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_M0: + ret_val = process_m0_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_M1: + ret_val = process_m1_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_M3: + ret_val = process_m3_transition(mhi_dev_ctxt, cur_work_item); + break; + case STATE_TRANSITION_SYS_ERR: + ret_val = process_syserr_transition(mhi_dev_ctxt, + cur_work_item); + break; + case STATE_TRANSITION_LINK_DOWN: + ret_val = process_link_down_transition(mhi_dev_ctxt, + cur_work_item); + break; + case STATE_TRANSITION_WAKE: + ret_val = process_wake_transition(mhi_dev_ctxt, cur_work_item); + break; + default: + mhi_log(MHI_MSG_ERROR, + "Unrecongized state: %d\n", cur_work_item); + break; + } + return ret_val; +} + +int mhi_state_change_thread(void *ctxt) +{ + int r = 0; + unsigned long flags = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt; + enum STATE_TRANSITION cur_work_item; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + struct mhi_state_work_queue *work_q = + &mhi_dev_ctxt->state_change_work_item_list; + struct mhi_ring *state_change_q = &work_q->q_info; + + if (NULL == mhi_dev_ctxt) { + mhi_log(MHI_MSG_ERROR, "Got bad context, quitting\n"); + return -EIO; + } + for (;;) { + r = wait_event_interruptible( + *mhi_dev_ctxt->state_change_event_handle, + ((work_q->q_info.rp != work_q->q_info.wp) && + !mhi_dev_ctxt->st_thread_stopped)); + if (r) { + mhi_log(MHI_MSG_INFO, + "Caught signal %d, quitting\n", r); + return 0; + } + + if (mhi_dev_ctxt->flags.kill_threads) { + mhi_log(MHI_MSG_INFO, + "Caught exit signal, quitting\n"); + return 0; + } + mhi_dev_ctxt->st_thread_stopped = 0; + spin_lock_irqsave(work_q->q_lock, flags); + cur_work_item = *(enum STATE_TRANSITION *)(state_change_q->rp); + ret_val = ctxt_del_element(&work_q->q_info, NULL); + MHI_ASSERT(ret_val == MHI_STATUS_SUCCESS, + "Failed to delete element from STT workqueue\n"); + spin_unlock_irqrestore(work_q->q_lock, flags); + ret_val = process_stt_work_item(mhi_dev_ctxt, cur_work_item); + } + return 0; +} + +/** + * mhi_reset_channel - Reset for a single MHI channel + * + * @client_handle device context + * + */ +enum MHI_STATUS mhi_reset_channel(struct mhi_client_handle *client_handle) +{ + enum MHI_STATUS ret_val; + struct mhi_chan_ctxt *cur_ctxt = NULL; + struct mhi_device_ctxt *mhi_dev_ctxt = NULL; + u32 chan_id = 0; + struct mhi_ring *cur_ring = NULL; + + chan_id = client_handle->chan; + mhi_dev_ctxt = client_handle->mhi_dev_ctxt; + + if (chan_id > (MHI_MAX_CHANNELS - 1) || NULL == mhi_dev_ctxt) { + mhi_log(MHI_MSG_ERROR, "Bad input parameters\n"); + return MHI_STATUS_ERROR; + } + + mutex_lock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]); + + /* We need to reset the channel completley, we will assume that our + * base is correct*/ + cur_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[chan_id]; + cur_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[chan_id]; + memset(cur_ring->base, 0, sizeof(char)*cur_ring->len); + + if (IS_HARDWARE_CHANNEL(chan_id)) { + ret_val = mhi_init_chan_ctxt(cur_ctxt, + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)cur_ring->base), + (uintptr_t)cur_ring->base, + MAX_NR_TRBS_PER_HARD_CHAN, + (chan_id % 2) ? MHI_IN : MHI_OUT, + (chan_id % 2) ? IPA_IN_EV_RING : IPA_OUT_EV_RING, + cur_ring); + } else { + ret_val = mhi_init_chan_ctxt(cur_ctxt, + mhi_v2p_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + (uintptr_t)cur_ring->base), + (uintptr_t)cur_ring->base, + MAX_NR_TRBS_PER_SOFT_CHAN, + (chan_id % 2) ? MHI_IN : MHI_OUT, + SOFTWARE_EV_RING, + cur_ring); + } + + if (MHI_STATUS_SUCCESS != ret_val) + mhi_log(MHI_MSG_ERROR, "Failed to reset chan ctxt\n"); + + + mutex_unlock(&mhi_dev_ctxt->mhi_chan_mutex[chan_id]); + return ret_val; +} + +/** + * mhi_init_state_transition - Add a new state transition work item to + * the state transition thread work item list. + * + * @mhi_dev_ctxt The mhi_dev_ctxt context + * @new_state The state we wish to transition to + * + */ +enum MHI_STATUS mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt, + enum STATE_TRANSITION new_state) +{ + unsigned long flags = 0; + enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS; + enum STATE_TRANSITION *cur_work_item = NULL; + s32 nr_avail_work_items = 0; + struct mhi_ring *stt_ring = + &mhi_dev_ctxt->state_change_work_item_list.q_info; + struct mhi_state_work_queue *work_q = + &mhi_dev_ctxt->state_change_work_item_list; + + spin_lock_irqsave(work_q->q_lock, flags); + nr_avail_work_items = get_nr_avail_ring_elements(stt_ring); + + if (0 >= nr_avail_work_items) { + mhi_log(MHI_MSG_CRITICAL, "No Room left on STT work queue\n"); + return MHI_STATUS_ERROR; + } + mhi_log(MHI_MSG_VERBOSE, + "Processing state transition %x\n", + new_state); + *(enum STATE_TRANSITION *)stt_ring->wp = new_state; + ret_val = ctxt_add_element(stt_ring, (void **)&cur_work_item); + wmb(); + MHI_ASSERT(MHI_STATUS_SUCCESS == ret_val, + "Failed to add selement to STT workqueue\n"); + spin_unlock_irqrestore(work_q->q_lock, flags); + wake_up_interruptible(mhi_dev_ctxt->state_change_event_handle); + return ret_val; +} + +void delayed_m3(struct work_struct *work) +{ + int r; + struct delayed_work *del_work = to_delayed_work(work); + struct mhi_device_ctxt *mhi_dev_ctxt = container_of(del_work, + struct mhi_device_ctxt, m3_work); + r = mhi_initiate_m3(mhi_dev_ctxt); + if (r) + mhi_log(MHI_MSG_INFO, "Failed to initiate M3 ret: %d\n", r); + +} + +void m0_work(struct work_struct *work) +{ + struct mhi_device_ctxt *mhi_dev_ctxt = + container_of(work, struct mhi_device_ctxt, m0_work); + if (!atomic_read(&mhi_dev_ctxt->flags.pending_resume)) { + mhi_log(MHI_MSG_INFO, "No pending resume, initiating M0.\n"); + mhi_initiate_m0(mhi_dev_ctxt); + } else { + mhi_log(MHI_MSG_INFO, "Pending resume, quitting.\n"); + } +} + +int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + int r = 0; + unsigned long flags; + + mhi_log(MHI_MSG_INFO, + "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", + mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, + mhi_dev_ctxt->flags.pending_M3); + mutex_lock(&mhi_dev_ctxt->pm_lock); + mhi_log(MHI_MSG_INFO, + "Waiting for M0 M1 or M3. Currently %d...\n", + mhi_dev_ctxt->mhi_state); + + r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M3 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1, + msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); + switch (r) { + case 0: + mhi_log(MHI_MSG_CRITICAL, + "Timeout: State %d after %d ms\n", + mhi_dev_ctxt->mhi_state, + MHI_MAX_SUSPEND_TIMEOUT); + mhi_dev_ctxt->counters.m0_event_timeouts++; + r = -ETIME; + goto exit; + break; + case -ERESTARTSYS: + mhi_log(MHI_MSG_CRITICAL, + "Going Down...\n"); + goto exit; + break; + default: + mhi_log(MHI_MSG_INFO, + "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state); + r = 0; + break; + } + if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1) { + mhi_assert_device_wake(mhi_dev_ctxt); + mhi_log(MHI_MSG_INFO, + "MHI state %d, done\n", + mhi_dev_ctxt->mhi_state); + goto exit; + } else { + if (MHI_STATUS_SUCCESS != mhi_turn_on_pcie_link(mhi_dev_ctxt)) { + mhi_log(MHI_MSG_CRITICAL, + "Failed to resume link\n"); + r = -EIO; + goto exit; + } + + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n"); + if (mhi_dev_ctxt->flags.pending_M3) { + mhi_log(MHI_MSG_INFO, + "Pending M3 detected, aborting M0 procedure\n"); + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, + flags); + r = -EPERM; + goto exit; + } + if (mhi_dev_ctxt->flags.link_up) { + mhi_dev_ctxt->flags.pending_M0 = 1; + mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0); + } + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + } +exit: + atomic_set(&mhi_dev_ctxt->flags.m0_work_enabled, 0); + mutex_unlock(&mhi_dev_ctxt->pm_lock); + mhi_log(MHI_MSG_INFO, "Exited...\n"); + return r; +} + +int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + + unsigned long flags; + int r; + + mhi_log(MHI_MSG_INFO, + "Entered MHI state %d, Pending M0 %d Pending M3 %d\n", + mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0, + mhi_dev_ctxt->flags.pending_M3); + mutex_lock(&mhi_dev_ctxt->pm_lock); + switch (mhi_dev_ctxt->mhi_state) { + case MHI_STATE_RESET: + mhi_log(MHI_MSG_INFO, + "MHI in RESET turning link off and quitting\n"); + mhi_turn_off_pcie_link(mhi_dev_ctxt); + r = mhi_set_bus_request(mhi_dev_ctxt, 0); + if (r) + mhi_log(MHI_MSG_INFO, + "Failed to set bus freq ret %d\n", r); + goto exit; + break; + case MHI_STATE_M1: + case MHI_STATE_M2: + mhi_log(MHI_MSG_INFO, + "Triggering wake out of M2\n"); + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + mhi_dev_ctxt->flags.pending_M3 = 1; + mhi_assert_device_wake(mhi_dev_ctxt); + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M0_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M0 || + mhi_dev_ctxt->mhi_state == MHI_STATE_M1, + msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT)); + if (0 == r || -ERESTARTSYS == r) { + mhi_log(MHI_MSG_INFO, + "MDM failed to come out of M2.\n"); + goto exit; + } + break; + case MHI_STATE_M3: + mhi_log(MHI_MSG_INFO, + "MHI state %d, link state %d.\n", + mhi_dev_ctxt->mhi_state, + mhi_dev_ctxt->flags.link_up); + if (mhi_dev_ctxt->flags.link_up) + r = -EPERM; + else + r = 0; + goto exit; + default: + mhi_log(MHI_MSG_INFO, + "MHI state %d, link state %d.\n", + mhi_dev_ctxt->mhi_state, + mhi_dev_ctxt->flags.link_up); + break; + } + while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) { + mhi_log(MHI_MSG_INFO, + "There are still %d acks pending from device\n", + atomic_read(&mhi_dev_ctxt->counters.outbound_acks)); + __pm_stay_awake(&mhi_dev_ctxt->w_lock); + __pm_relax(&mhi_dev_ctxt->w_lock); + goto exit; + } + + if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) + goto exit; + r = hrtimer_cancel(&mhi_dev_ctxt->m1_timer); + if (r) + mhi_log(MHI_MSG_INFO, "Cancelled M1 timer, timer was active\n"); + else + mhi_log(MHI_MSG_INFO, + "Cancelled M1 timer, timer was not active\n"); + write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags); + if (mhi_dev_ctxt->flags.pending_M0) { + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + mhi_log(MHI_MSG_INFO, + "Pending M0 detected, aborting M3 procedure\n"); + r = -EPERM; + goto exit; + } + mhi_dev_ctxt->flags.pending_M3 = 1; + + mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3); + write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags); + + mhi_log(MHI_MSG_INFO, + "Waiting for M3 completion.\n"); + r = wait_event_interruptible_timeout(*mhi_dev_ctxt->M3_event, + mhi_dev_ctxt->mhi_state == MHI_STATE_M3, + msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT)); + switch (r) { + case 0: + mhi_log(MHI_MSG_CRITICAL, + "MDM failed to suspend after %d ms\n", + MHI_MAX_SUSPEND_TIMEOUT); + mhi_dev_ctxt->counters.m3_event_timeouts++; + mhi_dev_ctxt->flags.pending_M3 = 0; + goto exit; + break; + case -ERESTARTSYS: + mhi_log(MHI_MSG_CRITICAL, + "Going Down...\n"); + goto exit; + break; + default: + mhi_log(MHI_MSG_INFO, + "M3 completion received\n"); + break; + } + mhi_deassert_device_wake(mhi_dev_ctxt); + mhi_turn_off_pcie_link(mhi_dev_ctxt); + r = mhi_set_bus_request(mhi_dev_ctxt, 0); + if (r) + mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r); +exit: + atomic_set(&mhi_dev_ctxt->flags.m3_work_enabled, 0); + mhi_dev_ctxt->flags.pending_M3 = 0; + mutex_unlock(&mhi_dev_ctxt->pm_lock); + return r; +} diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c new file mode 100644 index 000000000000..07cd5f8b03c6 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_sys.c @@ -0,0 +1,365 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include "mhi_sys.h" + +enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_CRITICAL; +enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_INFO; +enum MHI_DEBUG_CLASS mhi_msg_class = MHI_DBG_DATA | MHI_DBG_POWER; + +enum MHI_DEBUG_LEVEL mhi_xfer_db_interval; +module_param(mhi_xfer_db_interval, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_xfer_db_interval, "mhi xfer doorbell interval"); +enum MHI_DEBUG_LEVEL tx_mhi_intmodt = 10; +module_param(tx_mhi_intmodt, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tx_mhi_intmodt, "xfer interrupt modulation"); +enum MHI_DEBUG_LEVEL rx_mhi_intmodt = 6; +module_param(rx_mhi_intmodt, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(rx_mhi_intmodt, "rcver interrupt modulation"); + +module_param(mhi_msg_lvl , uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl"); +module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl"); + +module_param(mhi_msg_class , uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(mhi_msg_class, "dbg class"); +u32 m3_timer_val_ms = 1000; +module_param(m3_timer_val_ms, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(m3_timer_val_ms, "timer val"); + +static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf, + size_t count, loff_t *offp) +{ + int amnt_copied = 0; + struct mhi_chan_ctxt *chan_ctxt; + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + uintptr_t v_wp_index; + uintptr_t v_rp_index; + if (NULL == mhi_dev_ctxt) + return -EIO; + *offp = (u32)(*offp) % MHI_MAX_CHANNELS; + if (*offp == (MHI_MAX_CHANNELS - 1)) + msleep(1000); + while (!VALID_CHAN_NR(*offp)) { + *offp += 1; + *offp = (u32)(*offp) % MHI_MAX_CHANNELS; + } + + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp], + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp, + &v_rp_index); + get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[*offp], + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp, + &v_wp_index); + chan_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_cc_list[*offp]; + amnt_copied = + scnprintf(mhi_dev_ctxt->chan_info, + MHI_LOG_SIZE, + "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d\n", + "chan:", + (unsigned int)*offp, + "pkts from dev:", + mhi_dev_ctxt->mhi_chan_cntr[*offp].pkts_xferd, + "state:", + chan_ctxt->mhi_chan_state, + "p_base:", + chan_ctxt->mhi_trb_ring_base_addr, + "v_base:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base, + "v_wp:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp, + "index:", + v_wp_index, + "v_rp:", + mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp, + "index:", + v_rp_index, + "pkts_queued", + get_nr_avail_ring_elements( + &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]), + "/", + mhi_get_chan_max_buffers(*offp)); + + *offp += 1; + + if (amnt_copied < count) + return amnt_copied - + copy_to_user(buf, mhi_dev_ctxt->chan_info, amnt_copied); + else + return -ENOMEM; +} + +static const struct file_operations mhi_dbgfs_chan_fops = { + .read = mhi_dbgfs_chan_read, + .write = NULL, +}; + +static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf, + size_t count, loff_t *offp) +{ + int amnt_copied = 0; + int event_ring_index = 0; + struct mhi_event_ctxt *ev_ctxt; + uintptr_t v_wp_index; + uintptr_t v_rp_index; + uintptr_t device_p_rp_index; + + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + if (NULL == mhi_dev_ctxt) + return -EIO; + *offp = (u32)(*offp) % EVENT_RINGS_ALLOCATED; + event_ring_index = mhi_dev_ctxt->alloced_ev_rings[*offp]; + ev_ctxt = &mhi_dev_ctxt->mhi_ctrl_seg->mhi_ec_list[event_ring_index]; + if (*offp == (EVENT_RINGS_ALLOCATED - 1)) + msleep(1000); + + get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index], + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].rp, + &v_rp_index); + get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index], + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp, + &v_wp_index); + get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index], + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp, + &v_wp_index); + get_element_index(&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index], + (void *)mhi_p2v_addr(mhi_dev_ctxt->mhi_ctrl_seg_info, + ev_ctxt->mhi_event_read_ptr), + &device_p_rp_index); + + amnt_copied = + scnprintf(mhi_dev_ctxt->chan_info, + MHI_LOG_SIZE, + "%s 0x%08x %s %02x %s 0x%08x %s 0x%08x %s 0x%llx %s %llx %s %lu %s %p %s %p %s %lu %s %p %s %lu\n", + "Event Context ", + (unsigned int)event_ring_index, + "Intmod_T", + MHI_GET_EV_CTXT(EVENT_CTXT_INTMODT, ev_ctxt), + "MSI Vector", + ev_ctxt->mhi_msi_vector, + "MSI RX Count", + mhi_dev_ctxt->msi_counter[*offp], + "p_base:", + ev_ctxt->mhi_event_ring_base_addr, + "p_rp:", + ev_ctxt->mhi_event_read_ptr, + "index:", + device_p_rp_index, + "v_base:", + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].base, + "v_wp:", + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].wp, + "index:", + v_wp_index, + "v_rp:", + mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index].rp, + "index:", + v_rp_index); + + *offp += 1; + if (amnt_copied < count) + return amnt_copied - + copy_to_user(buf, mhi_dev_ctxt->chan_info, amnt_copied); + else + return -ENOMEM; +} + +static const struct file_operations mhi_dbgfs_ev_fops = { + .read = mhi_dbgfs_ev_read, + .write = NULL, +}; + +static ssize_t mhi_dbgfs_trigger_msi(struct file *fp, const char __user *buf, + size_t count, loff_t *offp) +{ + u32 msi_nr = 0; + void *irq_ctxt = &((mhi_devices.device_list[0]).pcie_device->dev); + if (copy_from_user(&msi_nr, buf, sizeof(msi_nr))) + return -ENOMEM; + mhi_msi_handlr(msi_nr, irq_ctxt); + return 0; +} + +static const struct file_operations mhi_dbgfs_trigger_msi_fops = { + .read = NULL, + .write = mhi_dbgfs_trigger_msi, +}; + +static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf, + size_t count, loff_t *offp) +{ + int amnt_copied = 0; + struct mhi_device_ctxt *mhi_dev_ctxt = + &mhi_devices.device_list[0].mhi_ctxt; + if (NULL == mhi_dev_ctxt) + return -EIO; + msleep(100); + amnt_copied = + scnprintf(mhi_dev_ctxt->chan_info, + MHI_LOG_SIZE, + "%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n", + "Our State:", + mhi_dev_ctxt->mhi_state, + "M0->M1:", + mhi_dev_ctxt->counters.m0_m1, + "M0<-M1:", + mhi_dev_ctxt->counters.m1_m0, + "M1->M2:", + mhi_dev_ctxt->counters.m1_m2, + "M0<-M2:", + mhi_dev_ctxt->counters.m2_m0, + "M0->M3:", + mhi_dev_ctxt->counters.m0_m3, + "M0<-M3:", + mhi_dev_ctxt->counters.m3_m0, + "M3_ev_TO:", + mhi_dev_ctxt->counters.m3_event_timeouts, + "M0_ev_TO:", + mhi_dev_ctxt->counters.m0_event_timeouts, + "MSI_d:", + mhi_dev_ctxt->counters.msi_disable_cntr, + "MSI_e:", + mhi_dev_ctxt->counters.msi_enable_cntr, + "outstanding_acks:", + atomic_read(&mhi_dev_ctxt->counters.outbound_acks), + "LPM:", + mhi_dev_ctxt->enable_lpm); + if (amnt_copied < count) + return amnt_copied - copy_to_user(buf, + mhi_dev_ctxt->chan_info, amnt_copied); + else + return -ENOMEM; +} + +static const struct file_operations mhi_dbgfs_state_fops = { + .read = mhi_dbgfs_state_read, + .write = NULL, +}; + +uintptr_t mhi_p2v_addr(struct mhi_meminfo *meminfo, phys_addr_t pa) +{ + return meminfo->va_aligned + (pa - meminfo->pa_aligned); +} + +phys_addr_t mhi_v2p_addr(struct mhi_meminfo *meminfo, uintptr_t va) +{ + return meminfo->pa_aligned + (va - meminfo->va_aligned); +} + +inline void *mhi_get_virt_addr(struct mhi_meminfo *meminfo) +{ + return (void *)meminfo->va_aligned; +} + +inline u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo) +{ + return meminfo->size; +} + +enum MHI_STATUS mhi_mallocmemregion(struct mhi_meminfo *meminfo, size_t size) +{ + meminfo->va_unaligned = (uintptr_t)dma_alloc_coherent(NULL, + size, + (dma_addr_t *)&(meminfo->pa_unaligned), + GFP_KERNEL); + if (!meminfo->va_unaligned) + return MHI_STATUS_ERROR; + meminfo->va_aligned = meminfo->va_unaligned; + meminfo->pa_aligned = meminfo->pa_unaligned; + meminfo->size = size; + if ((meminfo->pa_unaligned + size) >= MHI_DATA_SEG_WINDOW_END_ADDR) + return MHI_STATUS_ERROR; + + if (0 == meminfo->va_unaligned) + return MHI_STATUS_ERROR; + mb(); + return MHI_STATUS_SUCCESS; +} + +void mhi_freememregion(struct mhi_meminfo *meminfo) +{ + mb(); + dma_free_coherent(meminfo->dev, + meminfo->size, + (dma_addr_t *)&meminfo->pa_unaligned, + GFP_KERNEL); + meminfo->va_aligned = 0; + meminfo->pa_aligned = 0; + meminfo->va_unaligned = 0; + meminfo->pa_unaligned = 0; +} + +int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt) +{ + struct dentry *mhi_chan_stats; + struct dentry *mhi_state_stats; + struct dentry *mhi_msi_trigger; + struct dentry *mhi_ev_stats; + mhi_dev_ctxt->mhi_parent_folder = + debugfs_create_dir("mhi", NULL); + if (mhi_dev_ctxt->mhi_parent_folder == NULL) { + mhi_log(MHI_MSG_INFO, "Failed to create debugfs parent dir.\n"); + return -EIO; + } + mhi_chan_stats = debugfs_create_file("mhi_chan_stats", + 0444, + mhi_dev_ctxt->mhi_parent_folder, + mhi_dev_ctxt, + &mhi_dbgfs_chan_fops); + if (mhi_chan_stats == NULL) + return -ENOMEM; + mhi_ev_stats = debugfs_create_file("mhi_ev_stats", + 0444, + mhi_dev_ctxt->mhi_parent_folder, + mhi_dev_ctxt, + &mhi_dbgfs_ev_fops); + if (mhi_ev_stats == NULL) + goto clean_chan; + mhi_state_stats = debugfs_create_file("mhi_state_stats", + 0444, + mhi_dev_ctxt->mhi_parent_folder, + mhi_dev_ctxt, + &mhi_dbgfs_state_fops); + if (mhi_state_stats == NULL) + goto clean_ev_stats; + mhi_msi_trigger = debugfs_create_file("mhi_msi_trigger", + 0444, + mhi_dev_ctxt->mhi_parent_folder, + mhi_dev_ctxt, + &mhi_dbgfs_trigger_msi_fops); + if (mhi_msi_trigger == NULL) + goto clean_state; + + mhi_dev_ctxt->chan_info = kmalloc(MHI_LOG_SIZE, GFP_KERNEL); + if (mhi_dev_ctxt->chan_info == NULL) + goto clean_all; + return 0; +clean_all: + debugfs_remove(mhi_msi_trigger); +clean_state: + debugfs_remove(mhi_state_stats); +clean_ev_stats: + debugfs_remove(mhi_ev_stats); +clean_chan: + debugfs_remove(mhi_chan_stats); + debugfs_remove(mhi_dev_ctxt->mhi_parent_folder); + return -ENOMEM; +} diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h new file mode 100644 index 000000000000..9a9ad5df9f08 --- /dev/null +++ b/drivers/platform/msm/mhi/mhi_sys.h @@ -0,0 +1,79 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _H_MHI_SYS_ +#define _H_MHI_SYS_ + +#include <linux/mutex.h> +#include <linux/ipc_logging.h> +#include <linux/sysfs.h> +#include <linux/delay.h> + +#include "mhi.h" + +extern enum MHI_DEBUG_LEVEL mhi_msg_lvl; +extern enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl; +extern enum MHI_DEBUG_CLASS mhi_msg_class; +extern u32 m3_timer_val_ms; + +extern enum MHI_DEBUG_LEVEL mhi_xfer_db_interval; +extern enum MHI_DEBUG_LEVEL tx_mhi_intmodt; +extern enum MHI_DEBUG_LEVEL rx_mhi_intmodt; +extern void *mhi_ipc_log; + +#define MHI_ASSERT(_x, _msg)\ + do {\ + if (!(_x)) {\ + pr_err("ASSERT- %s : Failure in %s:%d/%s()!\n",\ + _msg, __FILE__, __LINE__, __func__); \ + panic("ASSERT"); \ + } \ + } while (0) + +#define mhi_log(_msg_lvl, _msg, ...) do { \ + if ((_msg_lvl) >= mhi_msg_lvl) \ + pr_alert("[%s] " _msg, __func__, ##__VA_ARGS__);\ + if (mhi_ipc_log && ((_msg_lvl) >= mhi_ipc_log_lvl)) \ + ipc_log_string(mhi_ipc_log, \ + "[%s] " _msg, __func__, ##__VA_ARGS__); \ +} while (0) + +irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id); + +struct mhi_meminfo { + struct device *dev; + uintptr_t pa_aligned; + uintptr_t pa_unaligned; + uintptr_t va_aligned; + uintptr_t va_unaligned; + uintptr_t size; +}; + +enum MHI_STATUS mhi_mallocmemregion(struct mhi_meminfo *meminfo, size_t size); + +uintptr_t mhi_get_phy_addr(struct mhi_meminfo *meminfo); +void *mhi_get_virt_addr(struct mhi_meminfo *meminfo); +uintptr_t mhi_p2v_addr(struct mhi_meminfo *meminfo, phys_addr_t pa); +phys_addr_t mhi_v2p_addr(struct mhi_meminfo *meminfo, uintptr_t va); +u64 mhi_get_memregion_len(struct mhi_meminfo *meminfo); +void mhi_freememregion(struct mhi_meminfo *meminfo); + +void print_ring(struct mhi_ring *local_chan_ctxt, u32 ring_id); +int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt); +int mhi_probe(struct pci_dev *mhi_device, + const struct pci_device_id *mhi_device_id); +ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); + +#endif |
