summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorSkylar Chang <chiaweic@codeaurora.org>2016-03-01 17:16:17 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:09:44 -0700
commit14bf6793b8e8b7200666502688e5ca3d8032ce3e (patch)
treea050ddf0720aca932741c4fadd8396d6a723203b /include/linux
parentd9ca088b7e70cf731191ed7d85e7ff9d96d9feee (diff)
msm: ipa3: support IPA driver
Signed-off-by: Skylar Chang <chiaweic@codeaurora.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/ecm_ipa.h95
-rw-r--r--include/linux/ipa.h2598
-rw-r--r--include/linux/msm_gsi.h1178
-rw-r--r--include/linux/rndis_ipa.h102
4 files changed, 3973 insertions, 0 deletions
diff --git a/include/linux/ecm_ipa.h b/include/linux/ecm_ipa.h
new file mode 100644
index 000000000000..5784e0383eab
--- /dev/null
+++ b/include/linux/ecm_ipa.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ECM_IPA_H_
+#define _ECM_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ecm_ipa_callback)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/*
+ * struct ecm_ipa_params - parameters for ecm_ipa initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver.
+ * This callback shall be called by the Netdev once the device
+ * is ready to recieve data from tethered PC.
+ * @ecm_ipa_rx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ecm_ipa_tx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @priv: ecm_ipa will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with ecm_ipa APIs
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not configure this end-point.
+ */
+struct ecm_ipa_params {
+ void (*device_ready_notify)(void);
+ ecm_ipa_callback ecm_ipa_rx_dp_notify;
+ ecm_ipa_callback ecm_ipa_tx_dp_notify;
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+ void *private;
+ bool skip_ep_cfg;
+};
+
+
+#ifdef CONFIG_ECM_IPA
+
+int ecm_ipa_init(struct ecm_ipa_params *params);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv);
+
+int ecm_ipa_disconnect(void *priv);
+
+void ecm_ipa_cleanup(void *priv);
+
+#else /* CONFIG_ECM_IPA*/
+
+int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+ void *priv)
+{
+ return 0;
+}
+
+static inline int ecm_ipa_disconnect(void *priv)
+{
+ return 0;
+}
+
+static inline void ecm_ipa_cleanup(void *priv)
+{
+
+}
+#endif /* CONFIG_ECM_IPA*/
+
+#endif /* _ECM_IPA_H_ */
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
new file mode 100644
index 000000000000..0c618d31d4ce
--- /dev/null
+++ b/include/linux/ipa.h
@@ -0,0 +1,2598 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/msm-sps.h>
+#include <linux/if_ether.h>
+#include "linux/msm_gsi.h"
+
+#define IPA_APPS_MAX_BW_IN_MBPS 700
+/**
+ * enum ipa_transport_type
+ * transport type: either GSI or SPS
+ */
+enum ipa_transport_type {
+ IPA_TRANSPORT_TYPE_SPS,
+ IPA_TRANSPORT_TYPE_GSI
+};
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+ IPA_BYPASS_NAT,
+ IPA_SRC_NAT,
+ IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ * @DMA: all data arriving IPA will not go through IPA logic blocks, this
+ * allows IPA to work as DMA for specific pipes.
+ */
+enum ipa_mode_type {
+ IPA_BASIC,
+ IPA_ENABLE_FRAMING_HDLC,
+ IPA_ENABLE_DEFRAMING_HDLC,
+ IPA_DMA,
+};
+
+/**
+ * enum ipa_aggr_en_type - aggregation setting type in IPA
+ * end-point
+ */
+enum ipa_aggr_en_type {
+ IPA_BYPASS_AGGR,
+ IPA_ENABLE_AGGR,
+ IPA_ENABLE_DEAGGR,
+};
+
+/**
+ * enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+ IPA_MBIM_16 = 0,
+ IPA_HDLC = 1,
+ IPA_TLP = 2,
+ IPA_RNDIS = 3,
+ IPA_GENERIC = 4,
+ IPA_QCMAP = 6,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+ IPA_MBIM,
+ IPA_QCNCM,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+ IPA_RECEIVE,
+ IPA_WRITE_DONE,
+};
+
+/**
+ * enum hdr_total_len_or_pad_type - type vof alue held by TOTAL_LEN_OR_PAD
+ * field in header configuration register.
+ * @IPA_HDR_PAD: field is used as padding length
+ * @IPA_HDR_TOTAL_LEN: field is used as total length
+ */
+enum hdr_total_len_or_pad_type {
+ IPA_HDR_PAD = 0,
+ IPA_HDR_TOTAL_LEN = 1,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en: This defines the default NAT mode for the pipe: in case of
+ * filter miss - the default NAT mode defines the NATing operation
+ * on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+ enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ *
+ * @hdr_len:Header length in bytes to be added/removed. Assuming
+ * header len is constant per endpoint. Valid for
+ * both Input and Output Pipes
+ * @hdr_ofst_metadata_valid: 0: Metadata_Ofst value is invalid, i.e., no
+ * metadata within header.
+ * 1: Metadata_Ofst value is valid, i.e., metadata
+ * within header is in offset Metadata_Ofst Valid
+ * for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_ofst_metadata: Offset within header in which metadata resides
+ * Size of metadata - 4bytes
+ * Example - Stream ID/SSID/mux ID.
+ * Valid for Input Pipes only (IPA Consumer) (for output
+ * pipes, metadata already set within the header)
+ * @hdr_additional_const_len: Defines the constant length that should be added
+ * to the payload length in order for IPA to update
+ * correctly the length field within the header
+ * (valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid: 0: Hdr_Ofst_Pkt_Size value is invalid, i.e., no
+ * length field within the inserted header
+ * 1: Hdr_Ofst_Pkt_Size value is valid, i.e., a
+ * packet length field resides within the header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size: Offset within header in which packet size reside. Upon
+ * Header Insertion, IPA will update this field within the
+ * header with the packet length . Assumption is that
+ * header length field size is constant and is 2Bytes
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux: Determines whether A5 Mux header should be added to the packet.
+ * This bit is valid only when Hdr_En=01(Header Insertion)
+ * SW should set this bit for IPA-to-A5 pipes.
+ * 0: Do not insert A5 Mux Header
+ * 1: Insert A5 Mux Header
+ * Valid for Output Pipes (IPA Producer)
+ * @hdr_remove_additional: bool switch, remove more of the header
+ * based on the aggregation configuration (register
+ * HDR_LEN_INC_DEAGG_HDR)
+ * @hdr_metadata_reg_valid: bool switch, metadata from
+ * register INIT_HDR_METADATA_n is valid.
+ * (relevant only for IPA Consumer pipes)
+ */
+struct ipa_ep_cfg_hdr {
+ u32 hdr_len;
+ u32 hdr_ofst_metadata_valid;
+ u32 hdr_ofst_metadata;
+ u32 hdr_additional_const_len;
+ u32 hdr_ofst_pkt_size_valid;
+ u32 hdr_ofst_pkt_size;
+ u32 hdr_a5_mux;
+ u32 hdr_remove_additional;
+ u32 hdr_metadata_reg_valid;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point
+ * @hdr_pad_to_alignment: Pad packet to specified alignment
+ * (2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes
+ * alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64
+ * byte alignment). Valid for Output Pipes only (IPA Producer).
+ * @hdr_total_len_or_pad_offset: Offset to length field containing either
+ * total length or pad length, per hdr_total_len_or_pad config
+ * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's
+ * HDR_OFST_PKT_SIZE does
+ * not includes padding bytes size, payload_len = packet length,
+ * 1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes
+ * padding bytes size, payload_len = packet length + padding
+ * @hdr_total_len_or_pad: field is used as PAD length ot as Total length
+ * (header + packet + padding)
+ * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process
+ * TOTAL_LEN_OR_PAD field
+ * @hdr_little_endian: 0-Big Endian, 1-Little Endian
+ */
+struct ipa_ep_cfg_hdr_ext {
+ u32 hdr_pad_to_alignment;
+ u32 hdr_total_len_or_pad_offset;
+ bool hdr_payload_len_inc_padding;
+ enum hdr_total_len_or_pad_type hdr_total_len_or_pad;
+ bool hdr_total_len_or_pad_valid;
+ bool hdr_little_endian;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode: Valid for Input Pipes only (IPA Consumer)
+ * @dst: This parameter specifies the output pipe to which the packets
+ * will be routed to.
+ * This parameter is valid for Mode=DMA and not valid for
+ * Mode=Basic
+ * Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+ enum ipa_mode_type mode;
+ enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ *
+ * @aggr_en: Valid for both Input and Output Pipes
+ * @aggr: aggregation type (Valid for both Input and Output Pipes)
+ * @aggr_byte_limit: Limit of aggregated packet size in KB (<=32KB) When set
+ * to 0, there is no size limitation on the aggregation.
+ * When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ * to 0, there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit: Timer to close aggregated packet (<=32ms) When set to 0,
+ * there is no time limitation on the aggregation. When
+ * both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ * there is no aggregation, every packet is sent
+ * independently according to the aggregation structure
+ * Valid for Output Pipes only (IPA Producer)
+ * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false
+ * HW closes aggregation (sends EOT) only based on its
+ * aggregation config (byte/time limit, etc). if set to
+ * true EOF closes aggregation in addition to HW based
+ * aggregation closure. Valid for Output Pipes only (IPA
+ * Producer). EOF affects only Pipes configured for
+ * generic aggregation.
+ */
+struct ipa_ep_cfg_aggr {
+ enum ipa_aggr_en_type aggr_en;
+ enum ipa_aggr_type aggr;
+ u32 aggr_byte_limit;
+ u32 aggr_time_limit;
+ u32 aggr_pkt_limit;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl: Defines the default routing table index to be used in case there
+ * is no filter rule matching, valid for Input Pipes only (IPA
+ * Consumer). Clients should set this to 0 which will cause default
+ * v4 and v6 routes setup internally by IPA driver to be used for
+ * this end-point
+ */
+struct ipa_ep_cfg_route {
+ u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point
+ * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt)
+ * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us
+ * IPAv2.5 support 32 bit HOLB timeout value, previous versions
+ * supports 16 bit
+ */
+struct ipa_ep_cfg_holb {
+ u16 en;
+ u32 tmr_val;
+};
+
+/**
+ * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point
+ * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input
+ * Pipes, which are configured for 'Generic' deaggregation.
+ * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is
+ * used.
+ * @packet_offset_location: Location of packet offset field, which specifies
+ * the offset to the packet from the start of the packet offset field.
+ * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher
+ * size wil be treated as an error. 0 - Packet Length is not Bound,
+ * IPA should not check for a Max Packet Length.
+ */
+struct ipa_ep_cfg_deaggr {
+ u32 deaggr_hdr_len;
+ bool packet_offset_valid;
+ u32 packet_offset_location;
+ u32 max_packet_len;
+};
+
+/**
+ * enum ipa_cs_offload - checksum offload setting
+ */
+enum ipa_cs_offload {
+ IPA_DISABLE_CS_OFFLOAD,
+ IPA_ENABLE_CS_OFFLOAD_UL,
+ IPA_ENABLE_CS_OFFLOAD_DL,
+ IPA_CS_RSVD
+};
+
+/**
+ * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register
+ * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet
+ * fragments should be sent to SW. SW is responsible for
+ * configuring filter rules, and IP packet filter exception should be
+ * used to send all fragments to SW. 1 - IP packet fragment
+ * handling is enabled. IPA checks for fragments and uses frag
+ * rules table for processing fragments. Valid only for Input Pipes
+ * (IPA Consumer)
+ * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01:
+ * Enable checksum calculation offload (UL) - For output pipe
+ * (IPA producer) specifies that checksum trailer is to be added.
+ * For input pipe (IPA consumer) specifies presence of checksum
+ * header and IPA checksum calculation accordingly. 10: Enable
+ * checksum calculation offload (DL) - For output pipe (IPA
+ * producer) specifies that checksum trailer is to be added. For
+ * input pipe (IPA consumer) specifies IPA checksum calculation.
+ * 11: Reserved
+ * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which
+ * checksum meta info header (4 bytes) starts (UL). Values are 0-15, which
+ * mean 0 - 60 byte checksum header offset. Valid for input
+ * pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_cfg {
+ bool frag_offload_en;
+ enum ipa_cs_offload cs_offload_en;
+ u8 cs_metadata_hdr_offset;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask
+ * @metadata_mask: Mask specifying which metadata bits to write to
+ * IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only
+ * masked metadata bits (set to 1) will be written. Valid for Output
+ * Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_metadata_mask {
+ u32 metadata_mask;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata - Meta Data configuration in IPA end-point
+ * @md: This defines the meta data from tx data descriptor
+ * @qmap_id: qmap id
+ */
+struct ipa_ep_cfg_metadata {
+ u32 qmap_id;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat: NAT parmeters
+ * @hdr: Header parameters
+ * @hdr_ext: Extended header parameters
+ * @mode: Mode parameters
+ * @aggr: Aggregation parameters
+ * @deaggr: Deaggregation params
+ * @route: Routing parameters
+ * @cfg: Configuration register data
+ * @metadata_mask: Hdr metadata mask
+ * @meta: Meta Data
+ */
+struct ipa_ep_cfg {
+ struct ipa_ep_cfg_nat nat;
+ struct ipa_ep_cfg_hdr hdr;
+ struct ipa_ep_cfg_hdr_ext hdr_ext;
+ struct ipa_ep_cfg_mode mode;
+ struct ipa_ep_cfg_aggr aggr;
+ struct ipa_ep_cfg_deaggr deaggr;
+ struct ipa_ep_cfg_route route;
+ struct ipa_ep_cfg_cfg cfg;
+ struct ipa_ep_cfg_metadata_mask metadata_mask;
+ struct ipa_ep_cfg_metadata meta;
+};
+
+/**
+ * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point
+ * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled).
+ * Valid for PROD Endpoints
+ * @ipa_ep_delay: 0 - ENDP is free-running, 1 - ENDP is delayed.
+ * SW controls the data flow of an endpoint usind this bit.
+ * Valid for CONS Endpoints
+ */
+struct ipa_ep_cfg_ctrl {
+ bool ipa_ep_suspend;
+ bool ipa_ep_delay;
+};
+
+/**
+ * x should be in bytes
+ */
+#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec))
+typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: type of "client"
+ * @client_bam_hdl: client SPS handle
+ * @client_ep_idx: client PER EP index
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie evt - type of event data - data relevant
+ * to event. May not be valid. See event_type enum for valid
+ * cases.
+ * @desc_fifo_sz: size of desc FIFO
+ * @data_fifo_sz: size of data FIFO
+ * @pipe_mem_preferred: if true, try to alloc the FIFOs in pipe mem, fallback
+ * to sys mem if pipe mem alloc fails
+ * @desc: desc FIFO meta-data when client has allocated it
+ * @data: data FIFO meta-data when client has allocated it
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ */
+struct ipa_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ unsigned long client_bam_hdl;
+ u32 client_ep_idx;
+ void *priv;
+ ipa_notify_cb notify;
+ u32 desc_fifo_sz;
+ u32 data_fifo_sz;
+ bool pipe_mem_preferred;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+};
+
+/**
+ * struct ipa_sps_params - SPS related output parameters resulting from
+ * low/high level client connect
+ * @ipa_bam_hdl: IPA SPS handle
+ * @ipa_ep_idx: IPA PER EP index
+ * @desc: desc FIFO meta-data
+ * @data: data FIFO meta-data
+ */
+struct ipa_sps_params {
+ unsigned long ipa_bam_hdl;
+ u32 ipa_ep_idx;
+ struct sps_mem_buffer desc;
+ struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props: number of tx properties
+ * @prop: the tx properties array
+ */
+struct ipa_tx_intf {
+ u32 num_props;
+ struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props: number of rx properties
+ * @prop: the rx properties array
+ */
+struct ipa_rx_intf {
+ u32 num_props;
+ struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_ext_intf - interface ext properties
+ * @excp_pipe_valid: is next field valid?
+ * @excp_pipe: exception packets should be routed to this pipe
+ * @num_props: number of ext properties
+ * @prop: the ext properties array
+ */
+struct ipa_ext_intf {
+ bool excp_pipe_valid;
+ enum ipa_client_type excp_pipe;
+ u32 num_props;
+ struct ipa_ioc_ext_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg: IPA EP configuration
+ * @client: the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO
+ * @priv: callback cookie
+ * @notify: callback
+ * priv - callback cookie
+ * evt - type of event
+ * data - data relevant to event. May not be valid. See event_type
+ * enum for valid cases.
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ * by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ */
+struct ipa_sys_connect_params {
+ struct ipa_ep_cfg ipa_ep_cfg;
+ enum ipa_client_type client;
+ u32 desc_fifo_sz;
+ void *priv;
+ ipa_notify_cb notify;
+ bool skip_ep_cfg;
+ bool keep_ipa_awake;
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @dma_address: dma mapped address of TX packet
+ * @dma_address_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+ u8 pkt_init_dst_ep;
+ bool pkt_init_dst_ep_valid;
+ bool pkt_init_dst_ep_remote;
+ dma_addr_t dma_address;
+ bool dma_address_valid;
+};
+
+/**
+ * typedef ipa_msg_free_fn - callback function
+ * @param buff - [in] the message payload to free
+ * @param len - [in] size of message payload
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver to
+ * free message payload after IPA driver processing is complete
+ *
+ * No return value
+ */
+typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * typedef ipa_msg_pull_fn - callback function
+ * @param buff - [in] where to copy message payload
+ * @param len - [in] size of buffer to copy payload into
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver for
+ * IPA driver to pull messages from the kernel client upon demand from
+ * user-space
+ *
+ * Returns how many bytes were copied into the buffer.
+ */
+typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * enum ipa_voltage_level - IPA Voltage levels
+ */
+enum ipa_voltage_level {
+ IPA_VOLTAGE_UNSPECIFIED,
+ IPA_VOLTAGE_SVS = IPA_VOLTAGE_UNSPECIFIED,
+ IPA_VOLTAGE_NOMINAL,
+ IPA_VOLTAGE_TURBO,
+ IPA_VOLTAGE_MAX,
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+ IPA_RM_RESOURCE_GRANTED,
+ IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+ enum ipa_rm_event event,
+ unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ * register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ * to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ * to notify the IPA RM client about its state
+ * change IPA RM client is expected to perform non
+ * blocking operations only in notify_cb and
+ * release notification context as soon as
+ * possible.
+ */
+struct ipa_rm_register_params {
+ void *user_data;
+ ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ * the resource
+ * @name: resource name
+ * @floor_voltage: floor voltage needed for client to operate in maximum
+ * bandwidth.
+ * @reg_params: register parameters, contains are ignored
+ * for consumer resource NULL should be provided
+ * for consumer resource
+ * @request_resource: function which should be called to request resource,
+ * NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ * NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+ enum ipa_rm_resource_name name;
+ enum ipa_voltage_level floor_voltage;
+ struct ipa_rm_register_params reg_params;
+ int (*request_resource)(void);
+ int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_perf_profile - information regarding IPA RM client performance
+ * profile
+ *
+ * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_rm_perf_profile {
+ u32 max_supported_bandwidth_mbps;
+};
+
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
+/**
+ * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM)
+ */
+enum teth_tethering_mode {
+ TETH_TETHERING_MODE_RMNET,
+ TETH_TETHERING_MODE_MBIM,
+ TETH_TETHERING_MODE_MAX,
+};
+
+/**
+ * teth_bridge_init_params - Parameters used for in/out USB API
+ * @usb_notify_cb: Callback function which should be used by the caller.
+ * Output parameter.
+ * @private_data: Data for the callback function. Should be used by the
+ * caller. Output parameter.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not confiugre this end-point.
+ */
+struct teth_bridge_init_params {
+ ipa_notify_cb usb_notify_cb;
+ void *private_data;
+ enum ipa_client_type client;
+ bool skip_ep_cfg;
+};
+
+/**
+ * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect()
+ * @ipa_usb_pipe_hdl: IPA to USB pipe handle, returned from ipa_connect()
+ * @usb_ipa_pipe_hdl: USB to IPA pipe handle, returned from ipa_connect()
+ * @tethering_mode: Rmnet or MBIM
+ * @ipa_client_type: IPA "client" name (IPA_CLIENT_USB#_PROD)
+ */
+struct teth_bridge_connect_params {
+ u32 ipa_usb_pipe_hdl;
+ u32 usb_ipa_pipe_hdl;
+ enum teth_tethering_mode tethering_mode;
+ enum ipa_client_type client_type;
+};
+
+/**
+ * struct ipa_tx_data_desc - information needed
+ * to send data packet to HW link: link to data descriptors
+ * priv: client specific private data
+ * @pyld_buffer: pointer to the data buffer that holds frame
+ * @pyld_len: length of the data packet
+ */
+struct ipa_tx_data_desc {
+ struct list_head link;
+ void *priv;
+ void *pyld_buffer;
+ u16 pyld_len;
+};
+
+/**
+ * struct ipa_rx_data - information needed
+ * to send to wlan driver on receiving data from ipa hw
+ * @skb: skb
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_rx_data {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * enum ipa_irq_type - IPA Interrupt Type
+ * Used to register handlers for IPA interrupts
+ *
+ * Below enum is a logical mapping and not the actual interrupt bit in HW
+ */
+enum ipa_irq_type {
+ IPA_BAD_SNOC_ACCESS_IRQ,
+ IPA_EOT_COAL_IRQ,
+ IPA_UC_IRQ_0,
+ IPA_UC_IRQ_1,
+ IPA_UC_IRQ_2,
+ IPA_UC_IRQ_3,
+ IPA_UC_IN_Q_NOT_EMPTY_IRQ,
+ IPA_UC_RX_CMD_Q_NOT_FULL_IRQ,
+ IPA_UC_TX_CMD_Q_NOT_FULL_IRQ,
+ IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ,
+ IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ,
+ IPA_RX_ERR_IRQ,
+ IPA_DEAGGR_ERR_IRQ,
+ IPA_TX_ERR_IRQ,
+ IPA_STEP_MODE_IRQ,
+ IPA_PROC_ERR_IRQ,
+ IPA_TX_SUSPEND_IRQ,
+ IPA_TX_HOLB_DROP_IRQ,
+ IPA_BAM_IDLE_IRQ,
+ IPA_IRQ_MAX
+};
+
+/**
+ * struct ipa_tx_suspend_irq_data - interrupt data for IPA_TX_SUSPEND_IRQ
+ * @endpoints: bitmask of endpoints which case IPA_TX_SUSPEND_IRQ interrupt
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_tx_suspend_irq_data {
+ u32 endpoints;
+};
+
+
+/**
+ * typedef ipa_irq_handler_t - irq handler/callback type
+ * @param ipa_irq_type - [in] interrupt type
+ * @param private_data - [in, out] the client private data
+ * @param interrupt_data - [out] interrupt information data
+ *
+ * callback registered by ipa_add_interrupt_handler function to
+ * handle a specific interrupt type
+ *
+ * No return value
+ */
+typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt,
+ void *private_data,
+ void *interrupt_data);
+
+/**
+ * struct IpaHwBamStats_t - Strucuture holding the BAM statistics
+ *
+ * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwBamStats_t {
+ u32 bamFifoFull;
+ u32 bamFifoEmpty;
+ u32 bamFifoUsageHigh;
+ u32 bamFifoUsageLow;
+ u32 bamUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwRingStats_t - Strucuture holding the Ring statistics
+ *
+ * @ringFull : Number of times Transfer Ring got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @ringUsageHigh : Number of times Transfer Ring usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @ringUsageLow : Number of times Transfer Ring usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwRingStats_t {
+ u32 ringFull;
+ u32 ringEmpty;
+ u32 ringUsageHigh;
+ u32 ringUsageLow;
+ u32 RingUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel
+ * structures
+ *
+ * @max_outstanding_pkts : Number of outstanding packets in Rx Ring
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW
+ * @rx_ind_ring_stats : Ring info
+ * @bam_stats : BAM info
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_pkts_in_dis_uninit_state : number of completions we
+ * received in disabled or uninitialized state
+ * @num_ic_inj_vdev_change : Number of times the Imm Cmd is
+ * injected due to vdev_id change
+ * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is
+ * injected due to fw_desc change
+*/
+struct IpaHwStatsWDIRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_ic_inj_vdev_change;
+ u32 num_ic_inj_fw_desc_change;
+ u32 reserved1;
+ u32 reserved2;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDITxInfoData_t - Structure holding the WDI Tx channel
+ * structures
+ *
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @copy_engine_doorbell_value : latest value of doorbell written to copy engine
+ * @num_db_fired : Number of DB from uC FW to Copy engine
+ * @tx_comp_ring_stats : ring info
+ * @bam_stats : BAM info
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in
+ * Running state
+ * @num_qmb_int_handled : Number of QMB interrupts handled
+*/
+struct IpaHwStatsWDITxInfoData_t {
+ u32 num_pkts_processed;
+ u32 copy_engine_doorbell_value;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_runnning_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures
+ *
+ * @rx_ch_stats : RX stats
+ * @tx_ch_stats : TX stats
+*/
+struct IpaHwStatsWDIInfoData_t {
+ struct IpaHwStatsWDIRxInfoData_t rx_ch_stats;
+ struct IpaHwStatsWDITxInfoData_t tx_ch_stats;
+} __packed;
+
+
+/**
+ * struct ipa_wdi_ul_params - WDI_RX configuration
+ * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing
+ * Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params {
+ phys_addr_t rdy_ring_base_pa;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_ring_rp_pa;
+};
+
+/**
+ * struct ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU)
+ * @rdy_ring: SG table describing the Rx ring (containing Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params_smmu {
+ struct sg_table rdy_ring;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_ring_rp_pa;
+};
+
+/**
+ * struct ipa_wdi_dl_params - WDI_TX configuration
+ * @comp_ring_base_pa: physical address of the base of the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring_base_pa: physical address of the base of the Copy Engine Source
+ * Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params {
+ phys_addr_t comp_ring_base_pa;
+ u32 comp_ring_size;
+ phys_addr_t ce_ring_base_pa;
+ phys_addr_t ce_door_bell_pa;
+ u32 ce_ring_size;
+ u32 num_tx_buffers;
+};
+
+/**
+ * struct ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU)
+ * @comp_ring: SG table describing the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring: SG table describing the Copy Engine Source Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params_smmu {
+ struct sg_table comp_ring;
+ u32 comp_ring_size;
+ struct sg_table ce_ring;
+ phys_addr_t ce_door_bell_pa;
+ u32 ce_ring_size;
+ u32 num_tx_buffers;
+};
+
+/**
+ * struct ipa_wdi_in_params - information provided by WDI client
+ * @sys: IPA EP configuration info
+ * @ul: WDI_RX configuration info
+ * @dl: WDI_TX configuration info
+ * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
+ * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
+ * @smmu_enabled: true if WLAN uses SMMU
+ */
+struct ipa_wdi_in_params {
+ struct ipa_sys_connect_params sys;
+ union {
+ struct ipa_wdi_ul_params ul;
+ struct ipa_wdi_dl_params dl;
+ struct ipa_wdi_ul_params_smmu ul_smmu;
+ struct ipa_wdi_dl_params_smmu dl_smmu;
+ } u;
+ bool smmu_enabled;
+};
+
+/**
+ * struct ipa_wdi_out_params - information provided to WDI client
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ * @clnt_hdl: opaque handle assigned to client
+ */
+struct ipa_wdi_out_params {
+ phys_addr_t uc_door_bell_pa;
+ u32 clnt_hdl;
+};
+
+/**
+ * struct ipa_wdi_db_params - information provided to retrieve
+ * physical address of uC doorbell
+ * @client: type of "client" (IPA_CLIENT_WLAN#_PROD/CONS)
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ */
+struct ipa_wdi_db_params {
+ enum ipa_client_type client;
+ phys_addr_t uc_door_bell_pa;
+};
+
+/**
+ * struct ipa_wdi_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify: callback
+ */
+typedef void (*ipa_uc_ready_cb)(void *priv);
+struct ipa_wdi_uc_ready_params {
+ bool is_uC_ready;
+ void *priv;
+ ipa_uc_ready_cb notify;
+};
+
+/**
+ * struct ipa_wdi_buffer_info - address info of a WLAN allocated buffer
+ * @pa: physical address of the buffer
+ * @iova: IOVA of the buffer as embedded inside the WDI descriptors
+ * @size: size in bytes of the buffer
+ * @result: result of map or unmap operations (out param)
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_buffer_info {
+ phys_addr_t pa;
+ unsigned long iova;
+ size_t size;
+ int result;
+};
+
+/**
+ * struct odu_bridge_params - parameters for odu bridge initialization API
+ *
+ * @netdev_name: network interface name
+ * @priv: private data that will be supplied to client's callback
+ * @tx_dp_notify: callback for handling SKB. the following event are supported:
+ * IPA_WRITE_DONE: will be called after client called to odu_bridge_tx_dp()
+ * Client is expected to free the skb.
+ * IPA_RECEIVE: will be called for delivering skb to APPS.
+ * Client is expected to deliver the skb to network stack.
+ * @send_dl_skb: callback for sending skb on downlink direction to adapter.
+ * Client is expected to free the skb.
+ * @device_ethaddr: device Ethernet address in network order.
+ * @ipa_desc_size: IPA Sys Pipe Desc Size
+ */
+struct odu_bridge_params {
+ const char *netdev_name;
+ void *priv;
+ ipa_notify_cb tx_dp_notify;
+ int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+ u8 device_ethaddr[ETH_ALEN];
+ u32 ipa_desc_size;
+};
+
+/**
+ * enum ipa_mhi_event_type - event type for mhi callback
+ *
+ * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
+ * this event MHI client is expected to call to ipa_mhi_start() API
+ * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
+ */
+enum ipa_mhi_event_type {
+ IPA_MHI_EVENT_READY,
+ IPA_MHI_EVENT_DATA_AVAILABLE,
+ IPA_MHI_EVENT_MAX,
+};
+
+typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
+ unsigned long data);
+
+/**
+ * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
+ * @addr_low: MSI lower base physical address
+ * @addr_hi: MSI higher base physical address
+ * @data: Data Pattern to use when generating the MSI
+ * @mask: Mask indicating number of messages assigned by the host to device
+ *
+ * msi value is written according to this formula:
+ * ((data & ~mask) | (mmio.msiVec & mask))
+ */
+struct ipa_mhi_msi_info {
+ u32 addr_low;
+ u32 addr_hi;
+ u32 data;
+ u32 mask;
+};
+
+/**
+ * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
+ *
+ * @msi: MSI (Message Signaled Interrupts) parameters
+ * @mmio_addr: MHI MMIO physical address
+ * @first_ch_idx: First channel ID for hardware accelerated channels.
+ * @first_er_idx: First event ring ID for hardware accelerated channels.
+ * @assert_bit40: should assert bit 40 in order to access hots space.
+ * if PCIe iATU is configured then not need to assert bit40
+ * @notify: client callback
+ * @priv: client private data to be provided in client callback
+ * @test_mode: flag to indicate if IPA MHI is in unit test mode
+ */
+struct ipa_mhi_init_params {
+ struct ipa_mhi_msi_info msi;
+ u32 mmio_addr;
+ u32 first_ch_idx;
+ u32 first_er_idx;
+ bool assert_bit40;
+ mhi_client_cb notify;
+ void *priv;
+ bool test_mode;
+};
+
+/**
+ * struct ipa_mhi_start_params - parameters for IPA MHI start API
+ *
+ * @host_ctrl_addr: Base address of MHI control data structures
+ * @host_data_addr: Base address of MHI data buffers
+ * @channel_context_addr: channel context array address in host address space
+ * @event_context_addr: event context array address in host address space
+ */
+struct ipa_mhi_start_params {
+ u32 host_ctrl_addr;
+ u32 host_data_addr;
+ u64 channel_context_array_addr;
+ u64 event_context_array_addr;
+};
+
+/**
+ * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
+ *
+ * @sys: IPA EP configuration info
+ * @channel_id: MHI channel id
+ */
+struct ipa_mhi_connect_params {
+ struct ipa_sys_connect_params sys;
+ u8 channel_id;
+};
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
+
+/**
+ * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
+ *
+ * @ipa_ep_num: IPA EP pipe number
+ * @ipa_gsi_chan_num: GSI channel number
+ * @ipa_if_tlv: number of IPA_IF TLV
+ * @ipa_if_aos: number of IPA_IF AOS
+ * @ee: Execution environment
+ */
+struct ipa_gsi_ep_config {
+ int ipa_ep_num;
+ int ipa_gsi_chan_num;
+ int ipa_if_tlv;
+ int ipa_if_aos;
+ int ee;
+};
+
+enum ipa_usb_teth_prot {
+ IPA_USB_RNDIS = 0,
+ IPA_USB_ECM = 1,
+ IPA_USB_RMNET = 2,
+ IPA_USB_MBIM = 3,
+ IPA_USB_DIAG = 4,
+ IPA_USB_MAX_TETH_PROT_SIZE
+};
+
+/**
+ * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
+ *
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ */
+struct ipa_usb_teth_params {
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+};
+
+enum ipa_usb_notify_event {
+ IPA_USB_DEVICE_READY,
+ IPA_USB_REMOTE_WAKEUP,
+ IPA_USB_SUSPEND_COMPLETED
+};
+
+enum ipa_usb_max_usb_packet_size {
+ IPA_USB_HIGH_SPEED_512B = 512,
+ IPA_USB_SUPER_SPEED_1024B = 1024
+};
+
+/**
+ * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @last_trb_addr: Address (LSB - based on alignment restrictions) of
+ * last TRB in queue. Used to identify roll over case
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
+ * configuration). Must be aligned to max USB Packet Size.
+ * Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr: Used to generate "Update Transfer" command
+ * @depcmd_hi_addr: Used to generate "Update Transfer" command.
+ */
+struct ipa_usb_xdci_chan_scratch {
+ u16 last_trb_addr;
+ u8 const_buffer_size;
+ u32 depcmd_low_addr;
+ u8 depcmd_hi_addr;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @client: type of "client"
+ * @ipa_ep_cfg: IPA EP configuration
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @teth_prot: tethering protocol for which the channel is created
+ * @gevntcount_low_addr: GEVNCOUNT low address for event scratch
+ * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
+ * @dir: channel direction
+ * @xfer_ring_len: length of transfer ring in bytes (must be integral
+ * multiple of transfer element size - 16B for xDCI)
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
+ * aligned to xfer_ring_len rounded to power of two
+ * @xfer_scratch: parameters for xDCI channel scratch
+ *
+ */
+struct ipa_usb_xdci_chan_params {
+ /* IPA EP params */
+ enum ipa_client_type client;
+ struct ipa_ep_cfg ipa_ep_cfg;
+ bool keep_ipa_awake;
+ enum ipa_usb_teth_prot teth_prot;
+ /* event ring params */
+ u32 gevntcount_low_addr;
+ u8 gevntcount_hi_addr;
+ /* transfer ring params */
+ enum gsi_chan_dir dir;
+ u16 xfer_ring_len;
+ u64 xfer_ring_base_addr;
+ struct ipa_usb_xdci_chan_scratch xfer_scratch;
+};
+
+/**
+ * ipa_usb_chan_out_params - out parameters for channel request
+ *
+ * @clnt_hdl: opaque client handle assigned by IPA to client
+ * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
+ * LSBs of the doorbell value should be written
+ * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
+ * MSBs of the doorbell value should be written
+ *
+ */
+struct ipa_req_chan_out_params {
+ u32 clnt_hdl;
+ u32 db_reg_phs_addr_lsb;
+ u32 db_reg_phs_addr_msb;
+};
+
+/**
+ * ipa_usb_teth_prot_params - parameters for connecting RNDIS
+ *
+ * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
+ * @max_packet_number_to_dev: max number of UL aggregated packets
+ * @max_xfer_size_bytes_to_host: max size of DL packets in bytes
+ *
+ */
+struct ipa_usb_teth_prot_params {
+ u32 max_xfer_size_bytes_to_dev;
+ u32 max_packet_number_to_dev;
+ u32 max_xfer_size_bytes_to_host;
+};
+
+/**
+ * ipa_usb_xdci_connect_params - parameters required to start IN, OUT
+ * channels, and connect RNDIS/ECM/teth_bridge
+ *
+ * @max_pkt_size: high speed or full speed
+ * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
+ * The hardware-assigned transfer resource index for the
+ * transfer, which was returned in response to the
+ * Start Transfer command. This field is used for
+ * "Update Transfer" command.
+ * Should be 0 =< ipa_to_usb_xferrscidx <= 127.
+ * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
+ * channel
+ * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
+ * Should be 0 =< usb_to_ipa_xferrscidx <= 127.
+ * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
+ * channel
+ * @teth_prot: tethering protocol
+ * @teth_prot_params: parameters for connecting the tethering protocol.
+ * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_usb_xdci_connect_params {
+ enum ipa_usb_max_usb_packet_size max_pkt_size;
+ u8 ipa_to_usb_xferrscidx;
+ bool ipa_to_usb_xferrscidx_valid;
+ u8 usb_to_ipa_xferrscidx;
+ bool usb_to_ipa_xferrscidx_valid;
+ enum ipa_usb_teth_prot teth_prot;
+ struct ipa_usb_teth_prot_params teth_prot_params;
+ u32 max_supported_bandwidth_mbps;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+ u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+ *ipa_ep_cfg);
+
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback);
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx);
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext);
+int ipa_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa_tx_dp_mul(enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc);
+
+void ipa_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out);
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa_enable_wdi_pipe(u32 clnt_hdl);
+int ipa_disable_wdi_pipe(u32 clnt_hdl);
+int ipa_resume_wdi_pipe(u32 clnt_hdl);
+int ipa_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa_get_smem_restr_bytes(void);
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+
+/*
+ * Resource manager
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params);
+
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_perf_profile *profile);
+
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+ unsigned long msecs);
+
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int teth_bridge_init(struct teth_bridge_init_params *params);
+
+int teth_bridge_disconnect(enum ipa_client_type client);
+
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa_get_client(int pipe_idx);
+
+bool ipa_get_client_uplink(int pipe_idx);
+
+/*
+ * ODU bridge
+ */
+
+int odu_bridge_init(struct odu_bridge_params *params);
+
+int odu_bridge_connect(void);
+
+int odu_bridge_disconnect(void);
+
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata);
+
+int odu_bridge_cleanup(void);
+
+/*
+ * IPADMA
+ */
+int ipa_dma_init(void);
+
+int ipa_dma_enable(void);
+
+int ipa_dma_disable(void);
+
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+ void (*user_cb)(void *user1), void *user_param);
+
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa_dma_destroy(void);
+
+/*
+ * MHI
+ */
+int ipa_mhi_init(struct ipa_mhi_init_params *params);
+
+int ipa_mhi_start(struct ipa_mhi_start_params *params);
+
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
+
+int ipa_mhi_suspend(bool force);
+
+int ipa_mhi_resume(void);
+
+void ipa_mhi_destroy(void);
+
+/*
+ * IPA_USB
+ */
+
+/**
+ * ipa_usb_init_teth_prot - Peripheral should call this function to initialize
+ * RNDIS/ECM/teth_bridge, prior to calling ipa_usb_xdci_connect()
+ *
+ * @usb_teth_type: tethering protocol type
+ * @teth_params: pointer to tethering protocol parameters.
+ * Should be struct ipa_usb_teth_params for RNDIS/ECM,
+ * or NULL for teth_bridge
+ * @ipa_usb_notify_cb: will be called to notify USB driver on certain events
+ * @user_data: cookie used for ipa_usb_notify_cb
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data);
+
+/**
+ * ipa_usb_xdci_connect - Peripheral should call this function to start IN &
+ * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
+ * For DIAG, only starts IN channel.
+ *
+ * @ul_chan_params: parameters for allocating UL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for UL channel
+ * @dl_chan_params: parameters for allocating DL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for DL channel
+ * @connect_params: handles and scratch params of the required channels,
+ * tethering protocol and the tethering protocol parameters.
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params);
+
+/**
+ * ipa_usb_xdci_disconnect - Peripheral should call this function to stop
+ * IN & OUT xDCI channels
+ * For DIAG, only stops IN channel.
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
+ * RNDIS/ECM/MBIM/RMNET
+ *
+ * @teth_prot: tethering protocol
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_suspend - Peripheral should call this function to suspend
+ * IN & OUT xDCI channels
+ *
+ * @ul_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for OUT channel
+ * @dl_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_resume - Peripheral should call this function to resume
+ * IN & OUT xDCI channels
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
+
+/*
+ * mux id
+ */
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data);
+
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+int ipa_restore_suspend_handler(void);
+
+/*
+ * Miscellaneous
+ */
+void ipa_bam_reg_dump(void);
+
+int ipa_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa_is_ready(void);
+
+void ipa_proxy_clk_vote(void);
+void ipa_proxy_clk_unvote(void);
+
+enum ipa_hw_type ipa_get_hw_type(void);
+
+bool ipa_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx);
+
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa_get_modem_cfg_emb_pipe_flt(void);
+
+enum ipa_transport_type ipa_get_transport_type(void);
+
+struct device *ipa_get_dma_dev(void);
+struct iommu_domain *ipa_get_smmu_domain(void);
+
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count);
+
+struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx);
+
+int ipa_stop_gsi_channel(u32 clnt_hdl);
+
+typedef void (*ipa_ready_cb)(void *user_data);
+
+/**
+* ipa_register_ipa_ready_cb() - register a callback to be invoked
+* when IPA core driver initialization is complete.
+*
+* @ipa_ready_cb: CB to be triggered.
+* @user_data: Data to be sent to the originator of the CB.
+*
+* Note: This function is expected to be utilized when ipa_is_ready
+* function returns false.
+* An IPA client may also use this function directly rather than
+* calling ipa_is_ready beforehand, as if this API returns -EEXIST,
+* this means IPA initialization is complete (and no callback will
+* be triggered).
+* When the callback is triggered, the client MUST perform his
+* operations in a different context.
+*
+* The function will return 0 on success, -ENOMEM on memory issues and
+* -EEXIST if IPA initialization is complete already.
+*/
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+ void *user_data);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+/*
+ * Connect / Disconnect
+ */
+static inline int ipa_connect(const struct ipa_connect_params *in,
+ struct ipa_sps_params *sps, u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disconnect(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Resume / Suspend
+ */
+static inline int ipa_reset_endpoint(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Remove ep delay
+ */
+static inline int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep(u32 clnt_hdl,
+ const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_nat(u32 clnt_hdl,
+ const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+ const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_mode(u32 clnt_hdl,
+ const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_aggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+ const struct ipa_ep_cfg_deaggr *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_route(u32 clnt_hdl,
+ const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_holb(u32 clnt_hdl,
+ const struct ipa_ep_cfg_holb *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_cfg(u32 clnt_hdl,
+ const struct ipa_ep_cfg_cfg *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_metadata_mask(u32 clnt_hdl,
+ const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg)
+{
+ return -EPERM;
+}
+
+static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl,
+ const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+ return -EPERM;
+}
+
+/*
+ * Header removal / addition
+ */
+static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_hdr(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_hdr(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+ return -EPERM;
+}
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+ return -EPERM;
+}
+
+/*
+ * Header Processing Context
+ */
+static inline int ipa_add_hdr_proc_ctx(
+ struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+ return -EPERM;
+}
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_rt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+ return -EPERM;
+}
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules)
+{
+ return -EPERM;
+}
+
+/*
+ * Filtering
+ */
+static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules)
+{
+ return -EPERM;
+}
+
+static inline int ipa_commit_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+static inline int ipa_reset_flt(enum ipa_ip_type ip)
+{
+ return -EPERM;
+}
+
+/*
+ * NAT
+ */
+static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+ return -EPERM;
+}
+
+
+static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+ return -EPERM;
+}
+
+/*
+ * Messaging
+ */
+static inline int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+ ipa_msg_free_fn callback)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_pull_msg(struct ipa_msg_meta *meta,
+ ipa_msg_pull_fn callback)
+{
+ return -EPERM;
+}
+
+static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+ return -EPERM;
+}
+
+/*
+ * Interface
+ */
+static inline int ipa_register_intf(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_intf_ext(const char *name,
+ const struct ipa_tx_intf *tx,
+ const struct ipa_rx_intf *rx,
+ const struct ipa_ext_intf *ext)
+{
+ return -EPERM;
+}
+
+static inline int ipa_deregister_intf(const char *name)
+{
+ return -EPERM;
+}
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+ return -EPERM;
+}
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+/*
+ * To transfer multiple data packets
+ */
+static inline int ipa_tx_dp_mul(
+ enum ipa_client_type dst,
+ struct ipa_tx_data_desc *data_desc)
+{
+ return -EPERM;
+}
+
+static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
+{
+ return;
+}
+
+/*
+ * System pipes
+ */
+static inline u16 ipa_get_smem_restr_bytes(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+ struct ipa_wdi_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_wdi_get_dbpa(
+ struct ipa_wdi_db_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_reg_rdyCB(
+ struct ipa_wdi_uc_ready_params *param)
+{
+ return -EPERM;
+}
+
+
+/*
+ * Resource manager
+ */
+static inline int ipa_rm_create_resource(
+ struct ipa_rm_create_params *create_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_delete_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_set_perf_profile(
+ enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_perf_profile *profile)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+ struct ipa_rm_register_params *reg_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency_sync(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+ enum ipa_rm_resource_name resource_name,
+ enum ipa_rm_resource_name depends_on_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+ enum ipa_rm_resource_name resource_name,
+ unsigned long msecs)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+ enum ipa_rm_resource_name resource_name)
+{
+ return -EPERM;
+}
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+static inline int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_disconnect(enum ipa_client_type client)
+{
+ return -EPERM;
+}
+
+static inline int teth_bridge_connect(struct teth_bridge_connect_params
+ *connect_params)
+{
+ return -EPERM;
+}
+
+/*
+ * Tethering client info
+ */
+static inline void ipa_set_client(int index, enum ipacm_client_enum client,
+ bool uplink)
+{
+ return;
+}
+
+static inline enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+ return -EPERM;
+}
+
+static inline bool ipa_get_client_uplink(int pipe_idx)
+{
+ return -EPERM;
+}
+
+
+/*
+ * ODU bridge
+ */
+static inline int odu_bridge_init(struct odu_bridge_params *params)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_disconnect(void)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_connect(void)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_tx_dp(struct sk_buff *skb,
+ struct ipa_tx_meta *metadata)
+{
+ return -EPERM;
+}
+
+static inline int odu_bridge_cleanup(void)
+{
+ return -EPERM;
+}
+
+/*
+ * IPADMA
+ */
+static inline int ipa_dma_init(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_enable(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_disable(void)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src
+ , int len)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src
+ , int len, void (*user_cb)(void *user1),
+ void *user_param)
+{
+ return -EPERM;
+}
+
+static inline int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+ return -EPERM;
+}
+
+static inline void ipa_dma_destroy(void)
+{
+ return;
+}
+
+/*
+ * MHI
+ */
+static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
+ u32 *clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_suspend(bool force)
+{
+ return -EPERM;
+}
+
+static inline int ipa_mhi_resume(void)
+{
+ return -EPERM;
+}
+
+static inline void ipa_mhi_destroy(void)
+{
+ return;
+}
+
+/*
+ * IPA_USB
+ */
+
+static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_connect(
+ struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
+{
+ return -EPERM;
+}
+
+/*
+ * mux id
+ */
+static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+ return -EPERM;
+}
+
+/*
+ * interrupts
+ */
+static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+ ipa_irq_handler_t handler,
+ bool deferred_flag,
+ void *private_data)
+{
+ return -EPERM;
+}
+
+static inline int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+ return -EPERM;
+}
+
+static inline int ipa_restore_suspend_handler(void)
+{
+ return -EPERM;
+}
+
+/*
+ * Miscellaneous
+ */
+static inline void ipa_bam_reg_dump(void)
+{
+ return;
+}
+
+static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+ return -EPERM;
+}
+
+static inline int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+ return -EPERM;
+}
+
+static inline bool ipa_is_ready(void)
+{
+ return false;
+}
+
+static inline void ipa_proxy_clk_vote(void)
+{
+}
+
+static inline void ipa_proxy_clk_unvote(void)
+{
+}
+
+static inline enum ipa_hw_type ipa_get_hw_type(void)
+{
+ return IPA_HW_None;
+}
+
+static inline bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(
+ int pipe_idx)
+{
+ return -EFAULT;
+}
+
+static inline bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+ return -EINVAL;
+}
+
+static inline enum ipa_transport_type ipa_get_transport_type(void)
+{
+ return -EFAULT;
+}
+
+static inline struct device *ipa_get_dma_dev(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_domain *ipa_get_smmu_domain(void)
+{
+ return NULL;
+}
+
+static inline int ipa_create_wdi_mapping(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ return -EINVAL;
+}
+
+static inline int ipa_release_wdi_mapping(u32 num_buffers,
+ struct ipa_wdi_buffer_info *info)
+{
+ return -EINVAL;
+}
+
+static inline int ipa_disable_apps_wan_cons_deaggr(void)
+{
+ return -EINVAL;
+}
+
+static inline struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+{
+ return NULL;
+}
+
+static inline int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_register_ipa_ready_cb(
+ void (*ipa_ready_cb)(void *user_data),
+ void *user_data)
+{
+ return -EPERM;
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* _IPA_H_ */
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
new file mode 100644
index 000000000000..4aedca7d6d7f
--- /dev/null
+++ b/include/linux/msm_gsi.h
@@ -0,0 +1,1178 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_GSI_H
+#define MSM_GSI_H
+#include <linux/types.h>
+
+enum gsi_status {
+ GSI_STATUS_SUCCESS = 0,
+ GSI_STATUS_ERROR = 1,
+ GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
+ GSI_STATUS_RING_EMPTY = 3,
+ GSI_STATUS_RES_ALLOC_FAILURE = 4,
+ GSI_STATUS_BAD_STATE = 5,
+ GSI_STATUS_INVALID_PARAMS = 6,
+ GSI_STATUS_UNSUPPORTED_OP = 7,
+ GSI_STATUS_NODEV = 8,
+ GSI_STATUS_POLL_EMPTY = 9,
+ GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
+ GSI_STATUS_TIMED_OUT = 11,
+ GSI_STATUS_AGAIN = 12,
+};
+
+enum gsi_per_evt {
+ GSI_PER_EVT_GLOB_ERROR,
+ GSI_PER_EVT_GLOB_GP1,
+ GSI_PER_EVT_GLOB_GP2,
+ GSI_PER_EVT_GLOB_GP3,
+ GSI_PER_EVT_GENERAL_BREAK_POINT,
+ GSI_PER_EVT_GENERAL_BUS_ERROR,
+ GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
+ GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
+};
+
+/**
+ * gsi_per_notify - Peripheral callback info
+ *
+ * @user_data: cookie supplied in gsi_register_device
+ * @evt_id: type of notification
+ * @err_desc: error related information
+ *
+ */
+struct gsi_per_notify {
+ void *user_data;
+ enum gsi_per_evt evt_id;
+ union {
+ uint16_t err_desc;
+ } data;
+};
+
+enum gsi_intr_type {
+ GSI_INTR_MSI = 0x0,
+ GSI_INTR_IRQ = 0x1
+};
+
+
+/**
+ * gsi_per_props - Peripheral related properties
+ *
+ * @ee: EE where this driver and peripheral driver runs
+ * @intr: control interrupt type
+ * @intvec: write data for MSI write
+ * @msi_addr: MSI address
+ * @irq: IRQ number
+ * @phys_addr: physical address of GSI block
+ * @size: register size of GSI block
+ * @notify_cb: general notification callback
+ * @req_clk_cb: callback to request peripheral clock
+ * granted should be set to true if request is completed
+ * synchronously, false otherwise (peripheral needs
+ * to call gsi_complete_clk_grant later when request is
+ * completed)
+ * if this callback is not provided, then GSI will assume
+ * peripheral is clocked at all times
+ * @rel_clk_cb: callback to release peripheral clock
+ * @user_data: cookie used for notifications
+ *
+ * All the callbacks are in interrupt context
+ *
+ */
+struct gsi_per_props {
+ unsigned int ee;
+ enum gsi_intr_type intr;
+ uint32_t intvec;
+ uint64_t msi_addr;
+ unsigned int irq;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ void (*notify_cb)(struct gsi_per_notify *notify);
+ void (*req_clk_cb)(void *user_data, bool *granted);
+ int (*rel_clk_cb)(void *user_data);
+ void *user_data;
+};
+
+enum gsi_evt_err {
+ GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
+ GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
+ GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
+ GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
+};
+
+/**
+ * gsi_evt_err_notify - event ring error callback info
+ *
+ * @user_data: cookie supplied in gsi_alloc_evt_ring
+ * @evt_id: type of error
+ * @err_desc: more info about the error
+ *
+ */
+struct gsi_evt_err_notify {
+ void *user_data;
+ enum gsi_evt_err evt_id;
+ uint16_t err_desc;
+};
+
+enum gsi_evt_chtype {
+ GSI_EVT_CHTYPE_MHI_EV = 0x0,
+ GSI_EVT_CHTYPE_XHCI_EV = 0x1,
+ GSI_EVT_CHTYPE_GPI_EV = 0x2,
+ GSI_EVT_CHTYPE_XDCI_EV = 0x3
+};
+
+enum gsi_evt_ring_elem_size {
+ GSI_EVT_RING_RE_SIZE_4B = 4,
+ GSI_EVT_RING_RE_SIZE_16B = 16,
+};
+
+/**
+ * gsi_evt_ring_props - Event ring related properties
+ *
+ * @intf: interface type (of the associated channel)
+ * @intr: interrupt type
+ * @re_size: size of event ring element
+ * @ring_len: length of ring in bytes (must be integral multiple of
+ * re_size)
+ * @ring_base_addr: physical base address of ring. Address must be aligned to
+ * ring_len rounded to power of two
+ * @ring_base_vaddr: virtual base address of ring (set to NULL when not
+ * applicable)
+ * @int_modt: cycles base interrupt moderation (32KHz clock)
+ * @int_modc: interrupt moderation packet counter
+ * @intvec: write data for MSI write
+ * @msi_addr: MSI address
+ * @rp_update_addr: physical address to which event read pointer should be
+ * written on every event generation. must be set to 0 when
+ * no update is desdired
+ * @exclusive: if true, only one GSI channel can be associated with this
+ * event ring. if false, the event ring can be shared among
+ * multiple GSI channels but in that case no polling
+ * (GSI_CHAN_MODE_POLL) is supported on any of those channels
+ * @err_cb: error notification callback
+ * @user_data: cookie used for error notifications
+ * @evchid_valid: is evchid valid?
+ * @evchid: the event ID that is being specifically requested (this is
+ * relevant for MHI where doorbell routing requires ERs to be
+ * physically contiguous)
+ */
+struct gsi_evt_ring_props {
+ enum gsi_evt_chtype intf;
+ enum gsi_intr_type intr;
+ enum gsi_evt_ring_elem_size re_size;
+ uint16_t ring_len;
+ uint64_t ring_base_addr;
+ void *ring_base_vaddr;
+ uint16_t int_modt;
+ uint8_t int_modc;
+ uint32_t intvec;
+ uint64_t msi_addr;
+ uint64_t rp_update_addr;
+ bool exclusive;
+ void (*err_cb)(struct gsi_evt_err_notify *notify);
+ void *user_data;
+ bool evchid_valid;
+ uint8_t evchid;
+};
+
+enum gsi_chan_mode {
+ GSI_CHAN_MODE_CALLBACK = 0x0,
+ GSI_CHAN_MODE_POLL = 0x1,
+};
+
+enum gsi_chan_prot {
+ GSI_CHAN_PROT_MHI = 0x0,
+ GSI_CHAN_PROT_XHCI = 0x1,
+ GSI_CHAN_PROT_GPI = 0x2,
+ GSI_CHAN_PROT_XDCI = 0x3
+};
+
+enum gsi_chan_dir {
+ GSI_CHAN_DIR_FROM_GSI = 0x0,
+ GSI_CHAN_DIR_TO_GSI = 0x1
+};
+
+enum gsi_max_prefetch {
+ GSI_ONE_PREFETCH_SEG = 0x0,
+ GSI_TWO_PREFETCH_SEG = 0x1
+};
+
+enum gsi_chan_evt {
+ GSI_CHAN_EVT_INVALID = 0x0,
+ GSI_CHAN_EVT_SUCCESS = 0x1,
+ GSI_CHAN_EVT_EOT = 0x2,
+ GSI_CHAN_EVT_OVERFLOW = 0x3,
+ GSI_CHAN_EVT_EOB = 0x4,
+ GSI_CHAN_EVT_OOB = 0x5,
+ GSI_CHAN_EVT_DB_MODE = 0x6,
+ GSI_CHAN_EVT_UNDEFINED = 0x10,
+ GSI_CHAN_EVT_RE_ERROR = 0x11,
+};
+
+/**
+ * gsi_chan_xfer_notify - Channel callback info
+ *
+ * @chan_user_data: cookie supplied in gsi_alloc_channel
+ * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
+ * event to be generated
+ * @evt_id: type of event triggered by the associated TRE
+ * (corresponding to xfer_user_data)
+ * @bytes_xfered: number of bytes transferred by the associated TRE
+ * (corresponding to xfer_user_data)
+ *
+ */
+struct gsi_chan_xfer_notify {
+ void *chan_user_data;
+ void *xfer_user_data;
+ enum gsi_chan_evt evt_id;
+ uint16_t bytes_xfered;
+};
+
+enum gsi_chan_err {
+ GSI_CHAN_INVALID_TRE_ERR = 0x0,
+ GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
+ GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
+ GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
+ GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+ GSI_CHAN_HWO_1_ERR = 0x5
+};
+
+/**
+ * gsi_chan_err_notify - Channel general callback info
+ *
+ * @chan_user_data: cookie supplied in gsi_alloc_channel
+ * @evt_id: type of error
+ * @err_desc: more info about the error
+ *
+ */
+struct gsi_chan_err_notify {
+ void *chan_user_data;
+ enum gsi_chan_err evt_id;
+ uint16_t err_desc;
+};
+
+enum gsi_chan_ring_elem_size {
+ GSI_CHAN_RE_SIZE_4B = 4,
+ GSI_CHAN_RE_SIZE_16B = 16,
+ GSI_CHAN_RE_SIZE_32B = 32,
+};
+
+enum gsi_chan_use_db_eng {
+ GSI_CHAN_DIRECT_MODE = 0x0,
+ GSI_CHAN_DB_MODE = 0x1,
+};
+
+/**
+ * gsi_chan_props - Channel related properties
+ *
+ * @prot: interface type
+ * @dir: channel direction
+ * @ch_id: virtual channel ID
+ * @evt_ring_hdl: handle of associated event ring. set to ~0 if no
+ * event ring associated
+ * @re_size: size of channel ring element
+ * @ring_len: length of ring in bytes (must be integral multiple of
+ * re_size)
+ * @ring_base_addr: physical base address of ring. Address must be aligned to
+ * ring_len rounded to power of two
+ * @ring_base_vaddr: virtual base address of ring (set to NULL when not
+ * applicable)
+ * @use_db_eng: 0 => direct mode (doorbells are written directly to RE
+ * engine)
+ * 1 => DB mode (doorbells are written to DB engine)
+ * @max_prefetch: limit number of pre-fetch segments for channel
+ * @low_weight: low channel weight (priority of channel for RE engine
+ * round robin algorithm); must be >= 1
+ * @xfer_cb: transfer notification callback, this callback happens
+ * on event boundaries
+ *
+ * e.g. 1
+ *
+ * out TD with 3 REs
+ *
+ * RE1: EOT=0, EOB=0, CHAIN=1;
+ * RE2: EOT=0, EOB=0, CHAIN=1;
+ * RE3: EOT=1, EOB=0, CHAIN=0;
+ *
+ * the callback will be triggered for RE3 using the
+ * xfer_user_data of that RE
+ *
+ * e.g. 2
+ *
+ * in REs
+ *
+ * RE1: EOT=1, EOB=0, CHAIN=0;
+ * RE2: EOT=1, EOB=0, CHAIN=0;
+ * RE3: EOT=1, EOB=0, CHAIN=0;
+ *
+ * received packet consumes all of RE1, RE2 and part of RE3
+ * for EOT condition. there will be three callbacks in below
+ * order
+ *
+ * callback for RE1 using GSI_CHAN_EVT_OVERFLOW
+ * callback for RE2 using GSI_CHAN_EVT_OVERFLOW
+ * callback for RE3 using GSI_CHAN_EVT_EOT
+ *
+ * @err_cb: error notification callback
+ * @chan_user_data: cookie used for notifications
+ *
+ * All the callbacks are in interrupt context
+ *
+ */
+struct gsi_chan_props {
+ enum gsi_chan_prot prot;
+ enum gsi_chan_dir dir;
+ uint8_t ch_id;
+ unsigned long evt_ring_hdl;
+ enum gsi_chan_ring_elem_size re_size;
+ uint16_t ring_len;
+ uint64_t ring_base_addr;
+ void *ring_base_vaddr;
+ enum gsi_chan_use_db_eng use_db_eng;
+ enum gsi_max_prefetch max_prefetch;
+ uint8_t low_weight;
+ void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
+ void (*err_cb)(struct gsi_chan_err_notify *notify);
+ void *chan_user_data;
+};
+
+enum gsi_xfer_flag {
+ GSI_XFER_FLAG_CHAIN = 0x1,
+ GSI_XFER_FLAG_EOB = 0x100,
+ GSI_XFER_FLAG_EOT = 0x200,
+ GSI_XFER_FLAG_BEI = 0x400
+};
+
+enum gsi_xfer_elem_type {
+ GSI_XFER_ELEM_DATA,
+ GSI_XFER_ELEM_IMME_CMD,
+};
+
+/**
+ * gsi_xfer_elem - Metadata about a single transfer
+ *
+ * @addr: physical address of buffer
+ * @len: size of buffer for GSI_XFER_ELEM_DATA:
+ * for outbound transfers this is the number of bytes to
+ * transfer.
+ * for inbound transfers, this is the maximum number of
+ * bytes the host expects from device in this transfer
+ *
+ * immediate command opcode for GSI_XFER_ELEM_IMME_CMD
+ * @flags: transfer flags, OR of all the applicable flags
+ *
+ * GSI_XFER_FLAG_BEI: Block event interrupt
+ * 1: Event generated by this ring element must not assert
+ * an interrupt to the host
+ * 0: Event generated by this ring element must assert an
+ * interrupt to the host
+ *
+ * GSI_XFER_FLAG_EOT: Interrupt on end of transfer
+ * 1: If an EOT condition is encountered when processing
+ * this ring element, an event is generated by the device
+ * with its completion code set to EOT.
+ * 0: If an EOT condition is encountered for this ring
+ * element, a completion event is not be generated by the
+ * device, unless IEOB is 1
+ *
+ * GSI_XFER_FLAG_EOB: Interrupt on end of block
+ * 1: Device notifies host after processing this ring element
+ * by sending a completion event
+ * 0: Completion event is not required after processing this
+ * ring element
+ *
+ * GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
+ * elements in a TD
+ *
+ * @type: transfer type
+ *
+ * GSI_XFER_ELEM_DATA: for all data transfers
+ * GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
+ *
+ * @xfer_user_data: cookie used in xfer_cb
+ *
+ */
+struct gsi_xfer_elem {
+ uint64_t addr;
+ uint16_t len;
+ uint16_t flags;
+ enum gsi_xfer_elem_type type;
+ void *xfer_user_data;
+};
+
+/**
+ * gsi_mhi_channel_scratch - MHI protocol SW config area of
+ * channel scratch
+ *
+ * @mhi_host_wp_addr: Valid only when UL/DL Sync En is asserted. Defines
+ * address in host from which channel write pointer
+ * should be read in polling mode
+ * @max_outstanding_tre: Used for the prefetch management sequence by the
+ * sequencer. Defines the maximum number of allowed
+ * outstanding TREs in IPA/GSI (in Bytes). RE engine
+ * prefetch will be limited by this configuration. It
+ * is suggested to configure this value with the IPA_IF
+ * channel AOS queue size. To disable the feature in
+ * doorbell mode (DB Mode=1) Maximum outstanding TREs
+ * should be set to 64KB (or any value larger or equal
+ * to ring length . RLEN)
+ * @assert_bit40: 1: bit #41 in address should be asserted upon
+ * IPA_IF.ProcessDescriptor routine (for MHI over PCIe
+ * transfers)
+ * 0: bit #41 in address should be deasserted upon
+ * IPA_IF.ProcessDescriptor routine (for non-MHI over
+ * PCIe transfers)
+ * @ul_dl_sync_en: When asserted, UL/DL synchronization feature is
+ * enabled for the channel. Supported only for predefined
+ * UL/DL endpoint pair
+ * @outstanding_threshold: Used for the prefetch management sequence by the
+ * sequencer. Defines the threshold (in Bytes) as to when
+ * to update the channel doorbell. Should be smaller than
+ * Maximum outstanding TREs. value.
+ */
+struct __packed gsi_mhi_channel_scratch {
+ uint64_t mhi_host_wp_addr;
+ uint32_t ul_dl_sync_en:1;
+ uint32_t assert_bit40:1;
+ uint32_t resvd1:14;
+ uint32_t max_outstanding_tre:16;
+ uint32_t resvd2:16;
+ uint32_t outstanding_threshold:16;
+};
+
+/**
+ * gsi_xdci_channel_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregationi
+ * configuration). Must be aligned to Max USB Packet Size
+ * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
+ * transfer resource index for the transfer, which was
+ * returned in response to the Start Transfer command.
+ * This field is used for "Update Transfer" command
+ * @last_trb_addr: Address (LSB - based on alignment restrictions) of
+ * last TRB in queue. Used to identify rollover case
+ * @depcmd_low_addr: Used to generate "Update Transfer" command
+ * @max_outstanding_tre: Used for the prefetch management sequence by the
+ * sequencer. Defines the maximum number of allowed
+ * outstanding TREs in IPA/GSI (in Bytes). RE engine
+ * prefetch will be limited by this configuration. It
+ * is suggested to configure this value with the IPA_IF
+ * channel AOS queue size. To disable the feature in
+ * doorbell mode (DB Mode=1) Maximum outstanding TREs
+ * should be set to 64KB (or any value larger or equal
+ * to ring length . RLEN)
+ * @depcmd_hi_addr: Used to generate "Update Transfer" command
+ * @outstanding_threshold: Used for the prefetch management sequence by the
+ * sequencer. Defines the threshold (in Bytes) as to when
+ * to update the channel doorbell. Should be smaller than
+ * Maximum outstanding TREs. value.
+ */
+struct __packed gsi_xdci_channel_scratch {
+ uint32_t last_trb_addr:16;
+ uint32_t resvd1:4;
+ uint32_t xferrscidx:7;
+ uint32_t const_buffer_size:5;
+ uint32_t depcmd_low_addr;
+ uint32_t depcmd_hi_addr:8;
+ uint32_t resvd2:8;
+ uint32_t max_outstanding_tre:16;
+ uint32_t resvd3:16;
+ uint32_t outstanding_threshold:16;
+};
+
+/**
+ * gsi_channel_scratch - channel scratch SW config area
+ *
+ */
+union __packed gsi_channel_scratch {
+ struct __packed gsi_mhi_channel_scratch mhi;
+ struct __packed gsi_xdci_channel_scratch xdci;
+ struct __packed {
+ uint32_t word1;
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+ } data;
+};
+
+/**
+ * gsi_mhi_evt_scratch - MHI protocol SW config area of
+ * event scratch
+ *
+ * @ul_dl_sync_en: When asserted, UL/DL synchronization feature is enabled for
+ * the channel. Supported only for predefined UL/DL endpoint
+ * pair
+ */
+struct __packed gsi_mhi_evt_scratch {
+ uint32_t resvd1:31;
+ uint32_t ul_dl_sync_en:1;
+ uint32_t resvd2;
+};
+
+/**
+ * gsi_xdci_evt_scratch - xDCI protocol SW config area of
+ * event scratch
+ *
+ */
+struct __packed gsi_xdci_evt_scratch {
+ uint32_t gevntcount_low_addr;
+ uint32_t gevntcount_hi_addr:8;
+ uint32_t resvd1:24;
+};
+
+/**
+ * gsi_evt_scratch - event scratch SW config area
+ *
+ */
+union __packed gsi_evt_scratch {
+ struct __packed gsi_mhi_evt_scratch mhi;
+ struct __packed gsi_xdci_evt_scratch xdci;
+ struct __packed {
+ uint32_t word1;
+ uint32_t word2;
+ } data;
+};
+
+/**
+ * gsi_device_scratch - EE scratch config parameters
+ *
+ * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
+ * @mhi_base_chan_idx: base index of IPA MHI channel indexes.
+ * IPA MHI channel index = GSI channel ID +
+ * MHI base channel index
+ * @max_usb_pkt_size_valid: is max_usb_pkt_size valid?
+ * @max_usb_pkt_size: max USB packet size in bytes (valid values are
+ * 512 and 1024)
+ */
+struct gsi_device_scratch {
+ bool mhi_base_chan_idx_valid;
+ uint8_t mhi_base_chan_idx;
+ bool max_usb_pkt_size_valid;
+ uint16_t max_usb_pkt_size;
+};
+
+/**
+ * gsi_chan_info - information about channel occupancy
+ *
+ * @wp: channel write pointer (physical address)
+ * @rp: channel read pointer (physical address)
+ * @evt_valid: is evt* info valid?
+ * @evt_wp: event ring write pointer (physical address)
+ * @evt_rp: event ring read pointer (physical address)
+ */
+struct gsi_chan_info {
+ uint64_t wp;
+ uint64_t rp;
+ bool evt_valid;
+ uint64_t evt_wp;
+ uint64_t evt_rp;
+};
+
+#ifdef CONFIG_GSI
+/**
+ * gsi_register_device - Peripheral should call this function to
+ * register itself with GSI before invoking any other APIs
+ *
+ * @props: Peripheral properties
+ * @dev_hdl: Handle populated by GSI, opaque to client
+ *
+ * @Return -GSI_STATUS_AGAIN if request should be re-tried later
+ * other error codes for failure
+ */
+int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
+
+/**
+ * gsi_complete_clk_grant - Peripheral should call this function to
+ * grant the clock resource requested by GSI previously that could not
+ * be granted synchronously. GSI will release the clock resource using
+ * the rel_clk_cb when appropriate
+ *
+ * @dev_hdl: Client handle previously obtained from
+ * gsi_register_device
+ *
+ * @Return gsi_status
+ */
+int gsi_complete_clk_grant(unsigned long dev_hdl);
+
+/**
+ * gsi_write_device_scratch - Peripheral should call this function to
+ * write to the EE scratch area
+ *
+ * @dev_hdl: Client handle previously obtained from
+ * gsi_register_device
+ * @val: Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_device_scratch(unsigned long dev_hdl,
+ struct gsi_device_scratch *val);
+
+/**
+ * gsi_deregister_device - Peripheral should call this function to
+ * de-register itself with GSI
+ *
+ * @dev_hdl: Client handle previously obtained from
+ * gsi_register_device
+ * @force: When set to true, cleanup is performed even if there
+ * are in use resources like channels, event rings, etc.
+ * this would be used after GSI reset to recover from some
+ * fatal error
+ * When set to false, there must not exist any allocated
+ * channels and event rings.
+ *
+ * @Return gsi_status
+ */
+int gsi_deregister_device(unsigned long dev_hdl, bool force);
+
+/**
+ * gsi_alloc_evt_ring - Peripheral should call this function to
+ * allocate an event ring
+ *
+ * @props: Event ring properties
+ * @dev_hdl: Client handle previously obtained from
+ * gsi_register_device
+ * @evt_ring_hdl: Handle populated by GSI, opaque to client
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
+ unsigned long *evt_ring_hdl);
+
+/**
+ * gsi_write_evt_ring_scratch - Peripheral should call this function to
+ * write to the scratch area of the event ring context
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ * @val: Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+ union __packed gsi_evt_scratch val);
+
+/**
+ * gsi_dealloc_evt_ring - Peripheral should call this function to
+ * de-allocate an event ring. There should not exist any active
+ * channels using this event ring
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
+
+/**
+ * gsi_query_evt_ring_db_addr - Peripheral should call this function to
+ * query the physical addresses of the event ring doorbell registers
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ * @db_addr_wp_lsb: Physical address of doorbell register where the 32
+ * LSBs of the doorbell value should be written
+ * @db_addr_wp_msb: Physical address of doorbell register where the 32
+ * MSBs of the doorbell value should be written
+ *
+ * @Return gsi_status
+ */
+int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+ uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
+
+/**
+ * gsi_reset_evt_ring - Peripheral should call this function to
+ * reset an event ring to recover from error state
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
+
+/**
+ * gsi_get_evt_ring_cfg - This function returns the current config
+ * of the specified event ring
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ * @props: where to copy properties to
+ * @scr: where to copy scratch info to
+ *
+ * @Return gsi_status
+ */
+int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+ struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
+
+/**
+ * gsi_set_evt_ring_cfg - This function applies the supplied config
+ * to the specified event ring.
+ *
+ * exclusive property of the event ring cannot be changed after
+ * gsi_alloc_evt_ring
+ *
+ * @evt_ring_hdl: Client handle previously obtained from
+ * gsi_alloc_evt_ring
+ * @props: the properties to apply
+ * @scr: the scratch info to apply
+ *
+ * @Return gsi_status
+ */
+int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+ struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
+
+/**
+ * gsi_alloc_channel - Peripheral should call this function to
+ * allocate a channel
+ *
+ * @props: Channel properties
+ * @dev_hdl: Client handle previously obtained from
+ * gsi_register_device
+ * @chan_hdl: Handle populated by GSI, opaque to client
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
+ unsigned long *chan_hdl);
+
+/**
+ * gsi_write_channel_scratch - Peripheral should call this function to
+ * write to the scratch area of the channel context
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @val: Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_channel_scratch(unsigned long chan_hdl,
+ union __packed gsi_channel_scratch val);
+
+/**
+ * gsi_start_channel - Peripheral should call this function to
+ * start a channel i.e put into running state
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_start_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_stop_channel - Peripheral should call this function to
+ * stop a channel. Stop will happen on a packet boundary
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
+ * other error codes for failure
+ */
+int gsi_stop_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_reset_channel - Peripheral should call this function to
+ * reset a channel to recover from error state
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_reset_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_dealloc_channel - Peripheral should call this function to
+ * de-allocate a channel
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_dealloc_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_stop_db_channel - Peripheral should call this function to
+ * stop a channel when all transfer elements till the doorbell
+ * have been processed
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
+ * other error codes for failure
+ */
+int gsi_stop_db_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_query_channel_db_addr - Peripheral should call this function to
+ * query the physical addresses of the channel doorbell registers
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @db_addr_wp_lsb: Physical address of doorbell register where the 32
+ * LSBs of the doorbell value should be written
+ * @db_addr_wp_msb: Physical address of doorbell register where the 32
+ * MSBs of the doorbell value should be written
+ *
+ * @Return gsi_status
+ */
+int gsi_query_channel_db_addr(unsigned long chan_hdl,
+ uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
+
+/**
+ * gsi_query_channel_info - Peripheral can call this function to query the
+ * channel and associated event ring (if any) status.
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @info: Where to read the values into
+ *
+ * @Return gsi_status
+ */
+int gsi_query_channel_info(unsigned long chan_hdl,
+ struct gsi_chan_info *info);
+
+/**
+ * gsi_is_channel_empty - Peripheral can call this function to query if
+ * the channel is empty. This is only applicable to GPI. "Empty" means
+ * GSI has consumed all descriptors for a TO_GSI channel and SW has
+ * processed all completed descriptors for a FROM_GSI channel.
+ *
+ * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
+ * @is_empty: set by GSI based on channel emptiness
+ *
+ * @Return gsi_status
+ */
+int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
+
+/**
+ * gsi_get_channel_cfg - This function returns the current config
+ * of the specified channel
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @props: where to copy properties to
+ * @scr: where to copy scratch info to
+ *
+ * @Return gsi_status
+ */
+int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+ union gsi_channel_scratch *scr);
+
+/**
+ * gsi_set_channel_cfg - This function applies the supplied config
+ * to the specified channel
+ *
+ * ch_id and evt_ring_hdl of the channel cannot be changed after
+ * gsi_alloc_channel
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @props: the properties to apply
+ * @scr: the scratch info to apply
+ *
+ * @Return gsi_status
+ */
+int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+ union gsi_channel_scratch *scr);
+
+/**
+ * gsi_poll_channel - Peripheral should call this function to query for
+ * completed transfer descriptors.
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @notify: Information about the completed transfer if any
+ *
+ * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
+ * completed)
+ */
+int gsi_poll_channel(unsigned long chan_hdl,
+ struct gsi_chan_xfer_notify *notify);
+
+/**
+ * gsi_config_channel_mode - Peripheral should call this function
+ * to configure the channel mode.
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @mode: Mode to move the channel into
+ *
+ * @Return gsi_status
+ */
+int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
+
+/**
+ * gsi_queue_xfer - Peripheral should call this function
+ * to queue transfers on the given channel
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ * @num_xfers: Number of transfer in the array @ xfer
+ * @xfer: Array of num_xfers transfer descriptors
+ * @ring_db: If true, tell HW about these queued xfers
+ * If false, do not notify HW at this time
+ *
+ * @Return gsi_status
+ */
+int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+ struct gsi_xfer_elem *xfer, bool ring_db);
+
+/**
+ * gsi_start_xfer - Peripheral should call this function to
+ * inform HW about queued xfers
+ *
+ * @chan_hdl: Client handle previously obtained from
+ * gsi_alloc_channel
+ *
+ * @Return gsi_status
+ */
+int gsi_start_xfer(unsigned long chan_hdl);
+
+/**
+ * gsi_configure_regs - Peripheral should call this function
+ * to configure the GSI registers before/after the FW is
+ * loaded but before it is enabled.
+ *
+ * @gsi_base_addr: Base address of GSI register space
+ * @gsi_size: Mapping size of the GSI register space
+ * @per_base_addr: Base address of the peripheral using GSI
+ *
+ * @Return gsi_status
+ */
+int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
+ phys_addr_t per_base_addr);
+
+/**
+ * gsi_enable_fw - Peripheral should call this function
+ * to enable the GSI FW after the FW has been loaded to the SRAM.
+ *
+ * @gsi_base_addr: Base address of GSI register space
+ * @gsi_size: Mapping size of the GSI register space
+
+ * @Return gsi_status
+ */
+int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
+
+/*
+ * Here is a typical sequence of calls
+ *
+ * gsi_register_device
+ *
+ * gsi_write_device_scratch (if the protocol needs this)
+ *
+ * gsi_alloc_evt_ring (for as many event rings as needed)
+ * gsi_write_evt_ring_scratch
+ *
+ * gsi_alloc_channel (for as many channels as needed; channels can have
+ * no event ring, an exclusive event ring or a shared event ring)
+ * gsi_write_channel_scratch
+ * gsi_start_channel
+ * gsi_queue_xfer/gsi_start_xfer
+ * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
+ * xfer completions)
+ * gsi_stop_db_channel/gsi_stop_channel
+ *
+ * gsi_dealloc_channel
+ *
+ * gsi_dealloc_evt_ring
+ *
+ * gsi_deregister_device
+ *
+ */
+#else
+static inline int gsi_register_device(struct gsi_per_props *props,
+ unsigned long *dev_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_device_scratch(unsigned long dev_hdl,
+ struct gsi_device_scratch *val)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
+ unsigned long dev_hdl,
+ unsigned long *evt_ring_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+ union __packed gsi_evt_scratch val)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+ uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_alloc_channel(struct gsi_chan_props *props,
+ unsigned long dev_hdl,
+ unsigned long *chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
+ union __packed gsi_channel_scratch val)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_start_channel(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_stop_channel(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_reset_channel(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_dealloc_channel(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_stop_db_channel(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
+ uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_channel_info(unsigned long chan_hdl,
+ struct gsi_chan_info *info)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_poll_channel(unsigned long chan_hdl,
+ struct gsi_chan_xfer_notify *notify)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_config_channel_mode(unsigned long chan_hdl,
+ enum gsi_chan_mode mode)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+ struct gsi_xfer_elem *xfer, bool ring_db)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_start_xfer(unsigned long chan_hdl)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
+ struct gsi_chan_props *props,
+ union gsi_channel_scratch *scr)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
+ struct gsi_chan_props *props,
+ union gsi_channel_scratch *scr)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+ struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+ struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
+ phys_addr_t per_base_addr)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
+#endif
+#endif
diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h
new file mode 100644
index 000000000000..14cce939d485
--- /dev/null
+++ b/include/linux/rndis_ipa.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RNDIS_IPA_H_
+#define _RNDIS_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ipa_callback)(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data);
+
+/*
+ * struct ipa_usb_init_params - parameters for driver initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the device
+ * is ready to recieve data from tethered PC.
+ * @ipa_rx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ipa_tx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @private: The network driver will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with between USB driver and the network driver.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ * should or should not configure this end-point.
+ */
+struct ipa_usb_init_params {
+ void (*device_ready_notify)(void);
+ ipa_callback ipa_rx_notify;
+ ipa_callback ipa_tx_notify;
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+ void *private;
+ bool skip_ep_cfg;
+};
+
+#ifdef CONFIG_RNDIS_IPA
+
+int rndis_ipa_init(struct ipa_usb_init_params *params);
+
+int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_packet_number_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ void *private);
+
+int rndis_ipa_pipe_disconnect_notify(void *private);
+
+void rndis_ipa_cleanup(void *private);
+
+#else /* CONFIG_RNDIS_IPA*/
+
+int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+ return 0;
+}
+
+int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_packet_number_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ void *private)
+{
+ return 0;
+}
+
+static inline int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+ return 0;
+}
+
+static inline void rndis_ipa_cleanup(void *private)
+{
+
+}
+#endif /* CONFIG_RNDIS_IPA */
+
+#endif /* _RNDIS_IPA_H_ */