summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CORE/CLD_TXRX/HTT/htt.c117
-rw-r--r--CORE/CLD_TXRX/HTT/htt.h304
-rw-r--r--CORE/CLD_TXRX/HTT/htt_h2t.c169
-rw-r--r--CORE/CLD_TXRX/HTT/htt_internal.h18
-rw-r--r--CORE/CLD_TXRX/HTT/htt_rx.c69
-rw-r--r--CORE/CLD_TXRX/HTT/htt_t2h.c12
-rw-r--r--CORE/CLD_TXRX/HTT/htt_tx.c177
-rw-r--r--CORE/CLD_TXRX/HTT/htt_types.h47
-rw-r--r--CORE/CLD_TXRX/TLSHIM/tl_shim.c254
-rw-r--r--CORE/CLD_TXRX/TLSHIM/tl_shim.h7
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_cfg.c41
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx.c93
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx_types.h9
-rw-r--r--CORE/SERVICES/COMMON/adf/adf_nbuf.c2
-rw-r--r--CORE/SERVICES/COMMON/adf/adf_os_types.h1
-rw-r--r--CORE/SERVICES/COMMON/adf/linux/adf_os_types_pvt.h1
-rw-r--r--CORE/SERVICES/COMMON/hif.h12
-rw-r--r--CORE/SERVICES/COMMON/htc_api.h6
-rw-r--r--CORE/SERVICES/COMMON/htc_services.h8
-rw-r--r--CORE/SERVICES/COMMON/ol_cfg.h58
-rw-r--r--CORE/SERVICES/COMMON/ol_htt_api.h84
-rw-r--r--CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h136
-rw-r--r--CORE/SERVICES/HTC/htc.c18
-rw-r--r--CORE/TL/inc/wlan_qct_tl.h108
24 files changed, 1738 insertions, 13 deletions
diff --git a/CORE/CLD_TXRX/HTT/htt.c b/CORE/CLD_TXRX/HTT/htt.c
index 0552e668b39b..7ce284a19bae 100644
--- a/CORE/CLD_TXRX/HTT/htt.c
+++ b/CORE/CLD_TXRX/HTT/htt.c
@@ -58,6 +58,23 @@ htt_h2t_rx_ring_cfg_msg_hl(struct htt_pdev_t *pdev);
A_STATUS (*htt_h2t_rx_ring_cfg_msg)(
struct htt_pdev_t *pdev);
+#ifdef IPA_UC_OFFLOAD
+A_STATUS
+htt_ipa_config(htt_pdev_handle pdev, A_STATUS status)
+{
+ if ((A_OK == status) &&
+ ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
+ status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
+ }
+ return status;
+}
+
+#define HTT_IPA_CONFIG htt_ipa_config
+#else
+#define HTT_IPA_CONFIG(pdev, status) status /* no-op */
+#endif /* IPA_UC_OFFLOAD */
+
+
struct htt_htc_pkt *
htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
{
@@ -335,7 +352,10 @@ htt_attach_target(htt_pdev_handle pdev)
* handshaking.
*/
- return htt_h2t_rx_ring_cfg_msg(pdev);
+ status = htt_h2t_rx_ring_cfg_msg(pdev);
+ status = HTT_IPA_CONFIG(pdev, status);
+
+ return status;
}
void
@@ -480,3 +500,98 @@ void htt_htc_disable_aspm(void)
{
htc_disable_aspm();
}
+
+#ifdef IPA_UC_OFFLOAD
+/*
+ * Attach resource for micro controller data path
+ */
+int
+htt_ipa_uc_attach(struct htt_pdev_t *pdev)
+{
+ int error;
+
+ /* TX resource attach */
+ error = htt_tx_ipa_uc_attach(pdev,
+ ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
+ ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
+ if (error) {
+ adf_os_print("HTT IPA UC TX attach fail code %d\n", error);
+ HTT_ASSERT0(0);
+ return error;
+ }
+
+ /* RX resource attach */
+ error = htt_rx_ipa_uc_attach(pdev,
+ ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+ if (error) {
+ adf_os_print("HTT IPA UC RX attach fail code %d\n", error);
+ htt_tx_ipa_uc_detach(pdev);
+ HTT_ASSERT0(0);
+ return error;
+ }
+
+ return 0; /* success */
+}
+
+void
+htt_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ /* TX IPA micro controller detach */
+ htt_tx_ipa_uc_detach(pdev);
+
+ /* RX IPA micro controller detach */
+ htt_rx_ipa_uc_detach(pdev);
+}
+
+/*
+ * Distribute micro controller resource to control module
+ */
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+ u_int32_t *ce_sr_base_paddr,
+ u_int32_t *ce_sr_ring_size,
+ u_int32_t *ce_reg_paddr,
+ u_int32_t *tx_comp_ring_base_paddr,
+ u_int32_t *tx_comp_ring_size,
+ u_int32_t *tx_num_alloc_buffer,
+ u_int32_t *rx_rdy_ring_base_paddr,
+ u_int32_t *rx_rdy_ring_size,
+ u_int32_t *rx_proc_done_idx_paddr)
+{
+ /* Release allocated resource to client */
+ *tx_comp_ring_base_paddr =
+ (u_int32_t)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
+ *tx_comp_ring_size =
+ (u_int32_t)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
+ *tx_num_alloc_buffer =
+ (u_int32_t)pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
+ *rx_rdy_ring_base_paddr =
+ (u_int32_t)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
+ *rx_rdy_ring_size =
+ (u_int32_t)pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
+ *rx_proc_done_idx_paddr =
+ (u_int32_t)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
+
+ /* Get copy engine, bus resource */
+ HTCIpaGetCEResource(pdev->htc_pdev,
+ ce_sr_base_paddr, ce_sr_ring_size, ce_reg_paddr);
+
+
+ return 0;
+}
+
+/*
+ * Distribute micro controller doorbell register to firmware
+ */
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+ u_int32_t ipa_uc_tx_doorbell_paddr,
+ u_int32_t ipa_uc_rx_doorbell_paddr)
+{
+ pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
+ pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
+ return 0;
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/HTT/htt.h b/CORE/CLD_TXRX/HTT/htt.h
index fbd81695beb5..55da8c9880d2 100644
--- a/CORE/CLD_TXRX/HTT/htt.h
+++ b/CORE/CLD_TXRX/HTT/htt.h
@@ -66,9 +66,11 @@
*----
* 3.0 Remove HTT_H2T_MSG_TYPE_MGMT_TX messages
* 3.1 Added HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND message
+ * 3.2 Added HTT_H2T_MSG_TYPE_WDI_IPA_CFG,
+ * HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQUEST messages
*/
#define HTT_CURRENT_VERSION_MAJOR 3
-#define HTT_CURRENT_VERSION_MINOR 1
+#define HTT_CURRENT_VERSION_MINOR 2
#define HTT_NUM_TX_FRAG_DESC 1024
@@ -110,8 +112,9 @@ enum htt_h2t_msg_type {
HTT_H2T_MSG_TYPE_AGGR_CFG = 0x5,
HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 0x6,
DEPRECATED_HTT_H2T_MSG_TYPE_MGMT_TX = 0x7, /* no longer used */
-
- /* keep this last */
+ HTT_H2T_MSG_TYPE_WDI_IPA_CFG = 0x8,
+ HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ = 0x9,
+ /* keep this last */
HTT_H2T_NUM_MSGS
};
@@ -1177,6 +1180,253 @@ PREPACK struct htt_mgmt_tx_compl_ind {
((_var) |= ((_val) << HTT_AGGR_CFG_MAX_NUM_AMSDU_SUBFRM_S)); \
} while (0)
+/**
+ * @brief HTT WDI_IPA Config Message
+ *
+ * @details
+ * The HTT WDI_IPA config message is created/sent by host at driver
+ * init time. It contains information about data structures used on
+ * WDI_IPA TX and RX path.
+ * |31 24|23 16|15 8|7 0|
+ * |----------------+----------------+----------------+----------------|
+ * | tx pkt pool size | Rsvd | msg_type |
+ * |-------------------------------------------------------------------|
+ * | tx comp ring base |
+ * |-------------------------------------------------------------------|
+ * | tx comp ring size |
+ * |-------------------------------------------------------------------|
+ * | tx comp WR_IDX physical address |
+ * |-------------------------------------------------------------------|
+ * | tx CE WR_IDX physical address |
+ * |-------------------------------------------------------------------|
+ * | rx indication ring base |
+ * |-------------------------------------------------------------------|
+ * | rx indication ring size |
+ * |-------------------------------------------------------------------|
+ * | rx ind RD_IDX physical address |
+ * |-------------------------------------------------------------------|
+ * | rx ind WR_IDX physical address |
+ * |-------------------------------------------------------------------|
+ *
+ * Header fields:
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this as WDI_IPA config message
+ * value: = 0x8
+ * - TX_PKT_POOL_SIZE
+ * Bits 15:0
+ * Purpose: Total number of TX packet buffer pool allocated by Host for
+ * WDI_IPA TX path
+ * - TX_COMP_RING_BASE_ADDR
+ * Bits 31:0
+ * Purpose: TX Completion Ring base address in DDR
+ * - TX_COMP_RING_SIZE
+ * Bits 31:0
+ * Purpose: TX Completion Ring size (must be power of 2)
+ * - TX_COMP_WR_IDX_ADDR
+ * Bits 31:0
+ * Purpose: IPA doorbell register address OR DDR address where WIFI FW
+ * updates the Write Index for WDI_IPA TX completion ring
+ * - TX_CE_WR_IDX_ADDR
+ * Bits 31:0
+ * Purpose: DDR address where IPA uC
+ * updates the WR Index for TX CE ring
+ * (needed for fusion platforms)
+ * - RX_IND_RING_BASE_ADDR
+ * Bits 31:0
+ * Purpose: RX Indication Ring base address in DDR
+ * - RX_IND_RING_SIZE
+ * Bits 31:0
+ * Purpose: RX Indication Ring size
+ * - RX_IND_RD_IDX_ADDR
+ * Bits 31:0
+ * Purpose: DDR address where IPA uC updates the Read Index for WDI_IPA
+ * RX indication ring
+ * - RX_IND_WR_IDX_ADDR
+ * Bits 31:0
+ * Purpose: IPA doorbell register address OR DDR address where WIFI FW
+ * updates the Write Index for WDI_IPA RX indication ring
+ */
+
+#define HTT_WDI_IPA_CFG_SZ 36 /* bytes */
+
+#define HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_M 0xffff0000
+#define HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_S 16
+
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_M 0xffffffff
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_S 0
+
+#define HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_M 0xffffffff
+#define HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_S 0
+
+#define HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_M 0xffffffff
+#define HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_S 0
+
+#define HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_M) >> HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_S)
+#define HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_M) >> HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_S)
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_M) >> HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_S)
+#define HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_M) >> HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_S)
+#define HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_M) >> HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_S)
+#define HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_M) >> HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_S)
+#define HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_M) >> HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_S)
+#define HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_RX_IND_RING_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_M) >> HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_S)
+#define HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_S)); \
+ } while (0)
+
+#define HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_GET(_var) \
+ (((_var) & HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_M) >> HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_S)
+#define HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_S)); \
+ } while (0)
+
+PREPACK struct htt_wdi_ipa_cfg_t
+{
+ /* DWORD 0: flags and meta-data */
+ A_UINT32
+ msg_type: 8, /* HTT_H2T_MSG_TYPE_WDI_IPA_CFG */
+ reserved: 8,
+ tx_pkt_pool_size: 16;
+ /* DWORD 1 */
+ A_UINT32 tx_comp_ring_base_addr;
+ /* DWORD 2 */
+ A_UINT32 tx_comp_ring_size;
+ /* DWORD 3 */
+ A_UINT32 tx_comp_wr_idx_addr;
+ /* DWORD 4*/
+ A_UINT32 tx_ce_wr_idx_addr;
+ /* DWORD 5 */
+ A_UINT32 rx_ind_ring_base_addr;
+ /* DWORD 6 */
+ A_UINT32 rx_ind_ring_size;
+ /* DWORD 7 */
+ A_UINT32 rx_ind_rd_idx_addr;
+ /* DWORD 8 */
+ A_UINT32 rx_ind_wr_idx_addr;
+} POSTPACK;
+
+enum htt_wdi_ipa_op_code {
+ HTT_WDI_IPA_OPCODE_TX_SUSPEND = 0,
+ HTT_WDI_IPA_OPCODE_TX_RESUME = 1,
+ HTT_WDI_IPA_OPCODE_RX_SUSPEND = 2,
+ HTT_WDI_IPA_OPCODE_RX_RESUME = 3,
+ /* keep this last */
+ HTT_WDI_IPA_OPCODE_MAX
+};
+
+/**
+ * @brief HTT WDI_IPA Operation Request Message
+ *
+ * @details
+ * HTT WDI_IPA Operation Request message is sent by host
+ * to either suspend or resume WDI_IPA TX or RX path.
+ * |31 24|23 16|15 8|7 0|
+ * |----------------+----------------+----------------+----------------|
+ * | op_code | Rsvd | msg_type |
+ * |-------------------------------------------------------------------|
+ *
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this as WDI_IPA Operation Request message
+ * value: = 0x9
+ * - OP_CODE
+ * Bits 31:16
+ * Purpose: Identifies operation host is requesting (e.g. TX suspend)
+ * value: = enum htt_wdi_ipa_op_code
+ */
+
+PREPACK struct htt_wdi_ipa_op_request_t
+{
+ /* DWORD 0: flags and meta-data */
+ A_UINT32
+ msg_type: 8, /* HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQUEST */
+ reserved: 8,
+ op_code: 16;
+} POSTPACK;
+
+#define HTT_WDI_IPA_OP_REQUEST_SZ 4 /* bytes */
+
+#define HTT_WDI_IPA_OP_REQUEST_OP_CODE_M 0xffff0000
+#define HTT_WDI_IPA_OP_REQUEST_OP_CODE_S 16
+
+#define HTT_WDI_IPA_OP_REQUEST_OP_CODE_GET(_var) \
+ (((_var) & HTT_WDI_IPA_OP_REQUEST_OP_CODE_M) >> HTT_WDI_IPA_OP_REQUEST_OP_CODE_S)
+#define HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_OP_REQUEST_OP_CODE, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_OP_REQUEST_OP_CODE_S)); \
+ } while (0)
/*=== target -> host messages ===============================================*/
@@ -1201,7 +1451,9 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
- HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ /* 0x13 is reserved for RX_RING_LOW_IND (RX Full reordering related) */
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
HTT_T2H_MSG_TYPE_TEST,
/* keep this last */
HTT_T2H_NUM_MSGS
@@ -3768,4 +4020,48 @@ PREPACK struct htt_tx_frag_desc_bank_cfg_t {
#define DEBUG_DMA_DONE
+/**
+ * @brief HTT WDI_IPA Operation Response Message
+ *
+ * @details
+ * HTT WDI_IPA Operation Response message is sent by target
+ * to host confirming suspend or resume operation.
+ * |31 24|23 16|15 8|7 0|
+ * |----------------+----------------+----------------+----------------|
+ * | op_code | Rsvd | msg_type |
+ * |-------------------------------------------------------------------|
+ *
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this as WDI_IPA Operation Response message
+ * value: = 0x13
+ * - OP_CODE
+ * Bits 31:16
+ * Purpose: Identifies the operation target is responding to (e.g. TX suspend)
+ * value: = enum htt_wdi_ipa_op_code
+ */
+
+PREPACK struct htt_wdi_ipa_op_response_t
+{
+ /* DWORD 0: flags and meta-data */
+ A_UINT32
+ msg_type: 8, /* HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE */
+ reserved: 8,
+ op_code: 16;
+} POSTPACK;
+
+#define HTT_WDI_IPA_OP_RESPONSE_SZ 4 /* bytes */
+
+#define HTT_WDI_IPA_OP_RESPONSE_OP_CODE_M 0xffff0000
+#define HTT_WDI_IPA_OP_RESPONSE_OP_CODE_S 16
+
+#define HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(_var) \
+ (((_var) & HTT_WDI_IPA_OP_RESPONSE_OP_CODE_M) >> HTT_WDI_IPA_OP_RESPONSE_OP_CODE_S)
+#define HTT_WDI_IPA_OP_RESPONSE_OP_CODE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_WDI_IPA_OP_RESPONSE_OP_CODE, _val); \
+ ((_var) |= ((_val) << HTT_WDI_IPA_OP_RESPONSE_OP_CODE_S)); \
+ } while (0)
+
#endif
diff --git a/CORE/CLD_TXRX/HTT/htt_h2t.c b/CORE/CLD_TXRX/HTT/htt_h2t.c
index a88512815888..a2c61bff2c67 100644
--- a/CORE/CLD_TXRX/HTT/htt_h2t.c
+++ b/CORE/CLD_TXRX/HTT/htt_h2t.c
@@ -685,3 +685,172 @@ htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
#endif
return 0;
}
+
+#ifdef IPA_UC_OFFLOAD
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ adf_nbuf_t msg;
+ u_int32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt) {
+ return A_NO_MEMORY;
+ }
+
+ /* show that this is not a tx frame download (not required, but helpful) */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ msg = adf_nbuf_alloc(
+ pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+ /* reserve room for HTC header */
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ adf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (u_int32_t *) adf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
+ pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
+
+ SET_HTC_PACKET_INFO_TX(
+ &pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ adf_nbuf_data(msg),
+ adf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+
+
+int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
+ a_bool_t uc_active,
+ a_bool_t is_tx)
+{
+ struct htt_htc_pkt *pkt;
+ adf_nbuf_t msg;
+ u_int32_t *msg_word;
+ u_int8_t active_target = 0;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt) {
+ return A_NO_MEMORY;
+ }
+
+ /* show that this is not a tx frame download (not required, but helpful) */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ msg = adf_nbuf_alloc(
+ pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
+ /* reserve room for HTC header */
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, FALSE);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ adf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+
+ /* fill in the message contents */
+ msg_word = (u_int32_t *) adf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ adf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ if (uc_active && is_tx)
+ {
+ active_target = HTT_WDI_IPA_OPCODE_TX_RESUME;
+ }
+ else if (!uc_active && is_tx)
+ {
+ active_target = HTT_WDI_IPA_OPCODE_TX_SUSPEND;
+ }
+ else if (uc_active && !is_tx)
+ {
+ active_target = HTT_WDI_IPA_OPCODE_RX_RESUME;
+ }
+ else if (!uc_active && !is_tx)
+ {
+ active_target = HTT_WDI_IPA_OPCODE_RX_SUSPEND;
+ }
+ HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word,
+ active_target);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
+
+ SET_HTC_PACKET_INFO_TX(
+ &pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ adf_nbuf_data(msg),
+ adf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ HTCSendPkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/HTT/htt_internal.h b/CORE/CLD_TXRX/HTT/htt_internal.h
index 7225d312e3cc..7ec53bfffc6a 100644
--- a/CORE/CLD_TXRX/HTT/htt_internal.h
+++ b/CORE/CLD_TXRX/HTT/htt_internal.h
@@ -401,4 +401,22 @@ htt_rx_hash_list_insert(struct htt_pdev_t *pdev, u_int32_t paddr,
adf_nbuf_t
htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, u_int32_t paddr);
+#ifdef IPA_UC_OFFLOAD
+int
+htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base);
+
+int
+htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int rx_ind_ring_size);
+
+int
+htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
+
+int
+htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
+#endif /* IPA_UC_OFFLOAD */
+
#endif /* _HTT_INTERNAL__H_ */
diff --git a/CORE/CLD_TXRX/HTT/htt_rx.c b/CORE/CLD_TXRX/HTT/htt_rx.c
index 635e7c98ea47..101010c3e8f2 100644
--- a/CORE/CLD_TXRX/HTT/htt_rx.c
+++ b/CORE/CLD_TXRX/HTT/htt_rx.c
@@ -2464,3 +2464,72 @@ fail1:
return 1; /* failure */
}
+#ifdef IPA_UC_OFFLOAD
+int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int rx_ind_ring_elements)
+{
+ /* Allocate RX indication ring */
+ /* RX IND ring element
+ * 4bytes: pointer
+ * 2bytes: VDEV ID
+ * 2bytes: length */
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
+ adf_os_mem_alloc_consistent(pdev->osdev,
+ rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t),
+ &pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_rx_rsc.rx_ind_ring_base), memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+ adf_os_print("%s: RX IND RING alloc fail", __func__);
+ return -1;
+ }
+
+ /* RX indication ring size, by bytes */
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size = rx_ind_ring_elements *
+ sizeof(struct ipa_uc_rx_ring_elem_t);
+
+ /* Allocate RX process done index */
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
+ adf_os_mem_alloc_consistent(pdev->osdev,
+ 4,
+ &pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx), memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+ adf_os_print("%s: RX PROC DONE IND alloc fail", __func__);
+ adf_os_mem_free_consistent(pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_rx_rsc.rx_ind_ring_base), memctx));
+ return -2;
+ }
+
+ return 0;
+}
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+ adf_os_mem_free_consistent(pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_rx_rsc.rx_ind_ring_base), memctx));
+ }
+
+ if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+ adf_os_mem_free_consistent(pdev->osdev,
+ 4,
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx), memctx));
+ }
+
+ return 0;
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/HTT/htt_t2h.c b/CORE/CLD_TXRX/HTT/htt_t2h.c
index 3e317d80b1ff..fadb895b92c9 100644
--- a/CORE/CLD_TXRX/HTT/htt_t2h.c
+++ b/CORE/CLD_TXRX/HTT/htt_t2h.c
@@ -551,6 +551,18 @@ if (adf_os_unlikely(pdev->rx_ring.rx_reset)) {
ol_rx_in_order_indication_handler(pdev->txrx_pdev, htt_t2h_msg,
peer_id, tid, offload_ind);
}
+
+#ifdef IPA_UC_OFFLOAD
+ case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
+ {
+ u_int8_t op_code;
+
+ op_code = HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
+ ol_txrx_ipa_uc_op_response(pdev->txrx_pdev, op_code);
+ break;
+ }
+#endif /* IPA_UC_OFFLOAD */
+
default:
htt_t2h_lp_msg_handler(context, htt_t2h_msg);
return ;
diff --git a/CORE/CLD_TXRX/HTT/htt_tx.c b/CORE/CLD_TXRX/HTT/htt_tx.c
index 89ca4753195d..286f8a0d499e 100644
--- a/CORE/CLD_TXRX/HTT/htt_tx.c
+++ b/CORE/CLD_TXRX/HTT/htt_tx.c
@@ -50,6 +50,23 @@
#include <ol_txrx_htt_api.h> /* ol_tx_msdu_id_storage */
#include <htt_internal.h>
+#ifdef IPA_UC_OFFLOAD
+/* IPA Micro controler TX data packet HTT Header Preset */
+/* 31 | 30 29 | 28 | 27 | 26 22 | 21 16 | 15 13 | 12 8 | 7 0
+ *------------------------------------------------------------------------------
+ * R | CS OL | R | PP | ext TID | vdev ID | pkt type | pkt subtype | msg type
+ * 0 | 0 | 0 | | 0x1F | 0 | 2 | 0 | 0x01
+ *------------------------------------------------------------------------------
+ * pkt ID | pkt length
+ *------------------------------------------------------------------------------
+ * frag_desc_ptr
+ *------------------------------------------------------------------------------
+ * peer_id
+ *------------------------------------------------------------------------------
+ */
+#define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
+#endif /* IPA_UC_OFFLOAD */
+
/*--- setup / tear-down functions -------------------------------------------*/
#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
@@ -559,3 +576,163 @@ htt_tx_desc_display(void *tx_desc)
}
}
#endif
+
+#ifdef IPA_UC_OFFLOAD
+int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base)
+{
+ unsigned int tx_buffer_count;
+ adf_nbuf_t buffer_vaddr;
+ u_int32_t buffer_paddr;
+ u_int32_t *header_ptr;
+ u_int32_t *ring_vaddr;
+ int return_code = 0;
+
+ /* Allocate CE Write Index WORD */
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
+ adf_os_mem_alloc_consistent(pdev->osdev,
+ 4,
+ &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
+ if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+ adf_os_print("%s: CE Write Index WORD alloc fail", __func__);
+ return -1;
+ }
+
+ /* Allocate TX COMP Ring */
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
+ adf_os_mem_alloc_consistent(pdev->osdev,
+ uc_tx_buf_cnt * 4,
+ &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
+ if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+ adf_os_print("%s: TX COMP ring alloc fail", __func__);
+ return_code = -2;
+ goto free_tx_ce_idx;
+ }
+
+ adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, uc_tx_buf_cnt * 4);
+
+ /* Allocate TX BUF vAddress Storage */
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
+ (adf_nbuf_t *)adf_os_mem_alloc(pdev->osdev,
+ uc_tx_buf_cnt * sizeof(adf_nbuf_t));
+ if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
+ adf_os_print("%s: TX BUF POOL vaddr storage alloc fail",
+ __func__);
+ return_code = -3;
+ goto free_tx_comp_base;
+ }
+ adf_os_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
+ uc_tx_buf_cnt * sizeof(adf_nbuf_t));
+
+ ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
+ /* Allocate TX buffers as many as possible */
+ for (tx_buffer_count = 0;
+ tx_buffer_count < (uc_tx_buf_cnt - 1);
+ tx_buffer_count++) {
+ buffer_vaddr = adf_nbuf_alloc(pdev->osdev,
+ uc_tx_buf_sz, 0, 4, FALSE);
+ if (!buffer_vaddr)
+ {
+ adf_os_print("%s: TX BUF alloc fail, allocated buffer count %d",
+ __func__, tx_buffer_count);
+ return 0;
+ }
+
+ /* Init buffer */
+ adf_os_mem_zero(adf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+ header_ptr = (u_int32_t *)adf_nbuf_data(buffer_vaddr);
+
+ *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
+ header_ptr++;
+ *header_ptr |= ((u_int16_t)uc_tx_partition_base + tx_buffer_count) << 16;
+
+ adf_nbuf_map(pdev->osdev, buffer_vaddr, ADF_OS_DMA_BIDIRECTIONAL);
+ buffer_paddr = adf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+ header_ptr++;
+ *header_ptr = (u_int32_t)(buffer_paddr + 16);
+
+ header_ptr++;
+ *header_ptr = 0xFFFFFFFF;
+
+ /* FRAG Header */
+ header_ptr++;
+ *header_ptr = buffer_paddr + 32;
+
+ *ring_vaddr = buffer_paddr;
+ printk("TX RING vADD %lx BF pADDR %x buffer_paddr %x buffer_vaddr %lx\n",
+ (unsigned long)ring_vaddr, (unsigned int)(*ring_vaddr),
+ (unsigned int)buffer_paddr, (unsigned long)adf_nbuf_data(buffer_vaddr));
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
+ buffer_vaddr;
+ /* Memory barrier to ensure actual value updated */
+
+ ring_vaddr++;
+ }
+
+ adf_os_print("%s: Allocated TX buffer count is %d\n",
+ __func__, tx_buffer_count);
+ pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
+
+ return 0;
+
+free_tx_comp_base:
+ adf_os_mem_free_consistent(pdev->osdev,
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
+free_tx_ce_idx:
+ adf_os_mem_free_consistent(pdev->osdev,
+ 4,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
+ return return_code;
+}
+
+int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ u_int16_t idx;
+
+ if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+ adf_os_mem_free_consistent(pdev->osdev,
+ 4,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_ce_idx), memctx));
+ }
+
+ if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+ adf_os_mem_free_consistent(pdev->osdev,
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ adf_os_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_comp_base), memctx));
+ }
+
+ /* Free each single buffer */
+ for(idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
+ if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
+ adf_nbuf_unmap(pdev->osdev,
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx],
+ ADF_OS_DMA_FROM_DEVICE);
+ adf_nbuf_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]);
+ }
+ }
+
+ /* Free storage */
+ adf_os_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
+
+ return 0;
+}
+#endif /* IPA_UC_OFFLOAD */
diff --git a/CORE/CLD_TXRX/HTT/htt_types.h b/CORE/CLD_TXRX/HTT/htt_types.h
index 1a9ef94b76bf..bdab0b533763 100644
--- a/CORE/CLD_TXRX/HTT/htt_types.h
+++ b/CORE/CLD_TXRX/HTT/htt_types.h
@@ -118,6 +118,48 @@ struct htt_rx_hash_bucket {
#endif
};
+#ifdef IPA_UC_OFFLOAD
+
+/* IPA micro controller
+ wlan host driver
+ firmware shared memory structure */
+struct uc_shared_mem_t
+{
+ u_int32_t *vaddr;
+ adf_os_dma_addr_t paddr;
+ adf_os_dma_mem_context(memctx);
+};
+
+/* Micro controller datapath offload
+ * WLAN TX resources */
+struct htt_ipa_uc_tx_resource_t
+{
+ struct uc_shared_mem_t tx_ce_idx;
+ struct uc_shared_mem_t tx_comp_base;
+
+ u_int32_t tx_comp_idx_paddr;
+ adf_nbuf_t *tx_buf_pool_vaddr_strg;
+ u_int32_t alloc_tx_buf_cnt;
+};
+
+/* Micro controller datapath offload
+ * WLAN RX resources */
+struct htt_ipa_uc_rx_resource_t
+{
+ adf_os_dma_addr_t rx_rdy_idx_paddr;
+ struct uc_shared_mem_t rx_ind_ring_base;
+ struct uc_shared_mem_t rx_ipa_prc_done_idx;
+ u_int32_t rx_ind_ring_size;
+};
+
+struct ipa_uc_rx_ring_elem_t
+{
+ u_int32_t rx_packet_paddr;
+ u_int16_t vdev_id;
+ u_int16_t rx_packet_leng;
+};
+#endif /* IPA_UC_OFFLOAD */
+
struct htt_pdev_t {
ol_pdev_handle ctrl_pdev;
ol_txrx_pdev_handle txrx_pdev;
@@ -266,6 +308,11 @@ struct htt_pdev_t {
int cur_seq_num_hl;
struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
struct targetdef_s *targetdef;
+
+#ifdef IPA_UC_OFFLOAD
+ struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
+ struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
+#endif /* IPA_UC_OFFLOAD */
};
#endif /* _HTT_TYPES__H_ */
diff --git a/CORE/CLD_TXRX/TLSHIM/tl_shim.c b/CORE/CLD_TXRX/TLSHIM/tl_shim.c
index de3d16017b0d..9f675607fe08 100644
--- a/CORE/CLD_TXRX/TLSHIM/tl_shim.c
+++ b/CORE/CLD_TXRX/TLSHIM/tl_shim.c
@@ -817,7 +817,10 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid,
adf_nbuf_t rx_buf_list)
{
struct txrx_tl_shim_ctx *tl_shim;
-#if defined(IPA_OFFLOAD) || \
+ /* Firmware data path active response will use shim RX thread
+ * T2H MSG running on SIRQ context,
+ * IPA kernel module API should not be called on SIRQ CTXT */
+#if (defined(IPA_OFFLOAD) && !defined(IPA_UC_OFFLOAD))|| \
(defined(FEATURE_WLAN_ESE) && !defined(FEATURE_WLAN_ESE_UPLOAD))
void *vos_ctx = vos_get_global_context(VOS_MODULE_ID_TL, context);
#endif
@@ -870,7 +873,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid,
* there is no cached frames have any significant impact on
* performance.
*/
-#ifdef IPA_OFFLOAD
+#if defined (IPA_OFFLOAD) && !defined(IPA_UC_OFFLOAD)
VOS_STATUS ret;
adf_os_spin_lock_bh(&tl_shim->bufq_lock);
sta_info->suspend_flush = 1;
@@ -934,7 +937,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid,
#else /* QCA_CONFIG_SMP */
tlshim_data_rx_cb(tl_shim, rx_buf_list, staid);
#endif /* QCA_CONFIG_SMP */
-#ifdef IPA_OFFLOAD
+#if defined(IPA_OFFLOAD) && !defined(IPA_UC_OFFLOAD)
}
#endif
}
@@ -2247,3 +2250,248 @@ void tl_shim_set_peer_authorized_event(void *vos_ctx, v_U8_t session_id)
vos_event_set(&tl_shim->peer_authorized_events[session_id]);
}
#endif
+
+#ifdef IPA_UC_OFFLOAD
+/*=============================================================================
+ FUNCTION WLANTL_GetIpaUcResource
+
+ DESCRIPTION
+ This function will be called by TL client.
+ Data path resource will be used by FW should be allocated within lower layer.
+ Shared resource information should be propagated to IPA.
+ To propagate resource information, client will use this API
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ ce_sr_base_paddr : Copy Engine Source Ring base address
+ ce_sr_ring_size : Copy Engine Source Ring size
+ ce_reg_paddr : Copy engine register address
+ tx_comp_ring_base_paddr : TX COMP ring base address
+ tx_comp_ring_size : TX COMP ring size
+ tx_num_alloc_buffer : Number of TX allocated buffer
+ rx_rdy_ring_base_paddr : RX ready ring base address
+ rx_rdy_ring_size : RX ready ring size
+ rx_proc_done_idx_paddr : RX process done index physical address
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_GetIpaUcResource(void *vos_ctx,
+ v_U32_t *ce_sr_base_paddr,
+ v_U32_t *ce_sr_ring_size,
+ v_U32_t *ce_reg_paddr,
+ v_U32_t *tx_comp_ring_base_paddr,
+ v_U32_t *tx_comp_ring_size,
+ v_U32_t *tx_num_alloc_buffer,
+ v_U32_t *rx_rdy_ring_base_paddr,
+ v_U32_t *rx_rdy_ring_size,
+ v_U32_t *rx_proc_done_idx_paddr)
+{
+ if (!vos_ctx || !((pVosContextType)vos_ctx)->pdev_txrx_ctx) {
+ TLSHIM_LOGE("%s: Invalid context", __func__);
+ return;
+ }
+
+ wdi_in_ipa_uc_get_resource(((pVosContextType)vos_ctx)->pdev_txrx_ctx,
+ ce_sr_base_paddr,
+ ce_sr_ring_size,
+ ce_reg_paddr,
+ tx_comp_ring_base_paddr,
+ tx_comp_ring_size,
+ tx_num_alloc_buffer,
+ rx_rdy_ring_base_paddr,
+ rx_rdy_ring_size,
+ rx_proc_done_idx_paddr);
+}
+
+/*=============================================================================
+ FUNCTION WLANTL_SetUcDoorbellPaddr
+
+ DESCRIPTION
+ This function will be called by TL client.
+ UC controller should provide doorbell register address to firmware
+ TL client will call this API to pass doorbell register address to firmware
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ ipa_tx_uc_doorbell_paddr : Micro Controller WLAN TX COMP doorbell regiser
+ ipa_rx_uc_doorbell_paddr : Micro Controller WLAN RX REDY doorbell regiser
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_SetUcDoorbellPaddr(void *vos_ctx,
+ v_U32_t ipa_tx_uc_doorbell_paddr,
+ v_U32_t ipa_rx_uc_doorbell_paddr)
+{
+ if (!vos_ctx || !((pVosContextType)vos_ctx)->pdev_txrx_ctx) {
+ TLSHIM_LOGE("%s: Invalid context", __func__);
+ return;
+ }
+
+ wdi_in_ipa_uc_set_doorbell_paddr(((pVosContextType)vos_ctx)->pdev_txrx_ctx,
+ ipa_tx_uc_doorbell_paddr,
+ ipa_rx_uc_doorbell_paddr);
+}
+
+/*=============================================================================
+ FUNCTION WLANTL_SetUcActive
+
+ DESCRIPTION
+ This function will be called by TL client.
+ Send Micro controller data path active or inactive notification to firmware
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ uc_active : Micro Controller data path is active or not
+ is_tx : Micro Controller WLAN TX data path is active or not
+ is_rx : Micro Controller WLAN RX data path is active or not
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_SetUcActive(void *vos_ctx,
+ v_BOOL_t uc_active,
+ v_BOOL_t is_tx
+)
+{
+ if (!vos_ctx || !((pVosContextType)vos_ctx)->pdev_txrx_ctx) {
+ TLSHIM_LOGE("%s: Invalid context", __func__);
+ return;
+ }
+
+ TLSHIM_LOGD("%s, ACTIVE %d, TX %d",
+ __func__, uc_active, is_tx);
+ wdi_in_ipa_uc_set_active(((pVosContextType)vos_ctx)->pdev_txrx_ctx,
+ uc_active, is_tx);
+}
+
+/*=============================================================================
+ FUNCTION WLANTL_IpaUcFwOpEventHandler
+
+ DESCRIPTION
+ This function will be called by TL client.
+ Firmware data path activation response handler.
+ Firmware response will be routed to upper layer
+
+ PARAMETERS
+ IN
+ context : pre-registered shim context
+ rxpkt : message pointer from firmware
+ staid : STA ID, not used
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_IpaUcFwOpEventHandler(void *context,
+ void *rxpkt,
+ u_int16_t staid)
+{
+ v_U8_t op_code;
+ struct txrx_tl_shim_ctx *tl_shim = (struct txrx_tl_shim_ctx *)context;
+
+ if (!tl_shim) {
+ TLSHIM_LOGE("%s: Invalid context", __func__);
+ return;
+ }
+
+ vos_mem_copy(&op_code, rxpkt, 1);
+ TLSHIM_LOGD("%s, opcode %d", __func__, op_code);
+ if (tl_shim->fw_op_cb) {
+ tl_shim->fw_op_cb(op_code);
+ }
+}
+
+/*=============================================================================
+ FUNCTION WLANTL_IpaUcOpEventHandler
+
+ DESCRIPTION
+ This function will be called by TL client.
+ This API will be registered into OL layer and if firmware send any
+ Activity related notification, OL layer will call this function.
+ firmware indication will be serialized within TLSHIM RX Thread
+
+ PARAMETERS
+ IN
+ op_code : OP Code from firmware
+ shim_ctxt : shim context pointer
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_IpaUcOpEventHandler(v_U8_t op_code, void *shim_ctxt)
+{
+ pVosSchedContext sched_ctx = get_vos_sched_ctxt();
+ struct VosTlshimPkt *pkt;
+ v_U8_t *op_code_pkt;
+
+ if (unlikely(!sched_ctx))
+ return;
+
+ pkt = vos_alloc_tlshim_pkt(sched_ctx);
+ if (!pkt) {
+ TLSHIM_LOGW("No available Rx message buffer");
+ return;
+ }
+
+ op_code_pkt = (v_U8_t *)vos_mem_malloc(4);
+ vos_mem_copy(op_code_pkt, &op_code, 1);
+ pkt->callback = (vos_tlshim_cb) WLANTL_IpaUcFwOpEventHandler;
+ pkt->context = shim_ctxt;
+ pkt->Rxpkt = (void *) op_code_pkt;
+ pkt->staId = 0;
+ vos_indicate_rxpkt(sched_ctx, pkt);
+}
+
+/*=============================================================================
+ FUNCTION WLANTL_RegisterOPCbFnc
+
+ DESCRIPTION
+ This function will be called by TL client.
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ func : callback function pointer
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_RegisterOPCbFnc(void *vos_ctx,
+ void (*func)(v_U8_t op_code))
+{
+ struct txrx_tl_shim_ctx *tl_shim;
+
+ if (!vos_ctx) {
+ TLSHIM_LOGE("%s: Invalid context", __func__);
+ return;
+ }
+
+ tl_shim = vos_get_context(VOS_MODULE_ID_TL, vos_ctx);
+ tl_shim->fw_op_cb = func;
+ wdi_in_ipa_uc_register_op_cb(((pVosContextType)vos_ctx)->pdev_txrx_ctx,
+ WLANTL_IpaUcOpEventHandler, (void *)tl_shim);
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/TLSHIM/tl_shim.h b/CORE/CLD_TXRX/TLSHIM/tl_shim.h
index 4d3070f5ec6f..3eedfe3a31f7 100644
--- a/CORE/CLD_TXRX/TLSHIM/tl_shim.h
+++ b/CORE/CLD_TXRX/TLSHIM/tl_shim.h
@@ -70,6 +70,10 @@ struct tlshim_session_flow_Control {
};
#endif /* QCA_LL_TX_FLOW_CT */
+#ifdef IPA_UC_OFFLOAD
+typedef void(*ipa_uc_fw_op_cb)(v_U8_t op_code);
+#endif /* IPA_UC_OFFLOAD */
+
struct txrx_tl_shim_ctx {
void *cfg_ctx;
ol_txrx_tx_fp tx;
@@ -98,6 +102,9 @@ struct deferred_iapp_work iapp_work;
#ifdef QCA_SUPPORT_TXRX_VDEV_PAUSE_LL
vos_event_t *peer_authorized_events;
#endif
+#ifdef IPA_UC_OFFLOAD
+ ipa_uc_fw_op_cb fw_op_cb;;
+#endif /* IPA_UC_OFFLOAD */
};
/*
diff --git a/CORE/CLD_TXRX/TXRX/ol_cfg.c b/CORE/CLD_TXRX/TXRX/ol_cfg.c
index cbaa6494d612..ba111262162c 100644
--- a/CORE/CLD_TXRX/TXRX/ol_cfg.c
+++ b/CORE/CLD_TXRX/TXRX/ol_cfg.c
@@ -79,7 +79,13 @@ ol_pdev_handle ol_pdev_cfg_attach(adf_os_device_t osdev,
cfg_ctx->rx_fwd_disabled = 0;
cfg_ctx->is_packet_log_enabled = 0;
cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
-
+#ifdef IPA_UC_OFFLOAD
+ cfg_ctx->ipa_uc_rsc.uc_offload_enabled = cfg_param.is_uc_offload_enabled;
+ cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
+ cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
+ cfg_ctx->ipa_uc_rsc.rx_ind_ring_size = cfg_param.uc_rx_indication_ring_count;
+ cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
+#endif /* IPA_UC_OFFLOAD */
return (ol_pdev_handle) cfg_ctx;
}
@@ -213,3 +219,36 @@ int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev)
struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
return cfg->is_full_reorder_offload;
}
+
+#ifdef IPA_UC_OFFLOAD
+unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_buf_size;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_max_buf_cnt;
+}
+
+unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.rx_ind_ring_size;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_partition_base;
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c
index 608dd6f5198b..655073fabce1 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx.c
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c
@@ -324,6 +324,15 @@ ol_txrx_pdev_attach(
goto fail2;
}
+#ifdef IPA_UC_OFFLOAD
+ /* Attach micro controller data path offload resource */
+ if (ol_cfg_ipa_uc_offload_enabled(ctrl_pdev)) {
+ if (htt_ipa_uc_attach(pdev->htt_pdev)) {
+ goto fail3;
+ }
+ }
+#endif /* IPA_UC_OFFLOAD */
+
pdev->tx_desc.array = adf_os_mem_alloc(
osdev, desc_pool_size * sizeof(union ol_tx_desc_list_elem_t));
if (!pdev->tx_desc.array) {
@@ -663,6 +672,11 @@ fail5:
fail4:
adf_os_mem_free(pdev->tx_desc.array);
+#ifdef IPA_UC_OFFLOAD
+ if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
+ htt_ipa_uc_detach(pdev->htt_pdev);
+ }
+#endif /* IPA_UC_OFFLOAD */
fail3:
htt_detach(pdev->htt_pdev);
@@ -748,6 +762,13 @@ ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
adf_os_mem_free(pdev->tx_desc.array);
+#ifdef IPA_UC_OFFLOAD
+ /* Detach micro controller data path offload resource */
+ if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
+ htt_ipa_uc_detach(pdev->htt_pdev);
+ }
+#endif /* IPA_UC_OFFLOAD */
+
htt_detach(pdev->htt_pdev);
ol_txrx_peer_find_detach(pdev);
@@ -2026,3 +2047,75 @@ ol_txrx_ll_set_tx_pause_q_depth(
return;
}
#endif /* QCA_LL_TX_FLOW_CT */
+
+#ifdef IPA_UC_OFFLOAD
+void
+ol_txrx_ipa_uc_get_resource(
+ ol_txrx_pdev_handle pdev,
+ u_int32_t *ce_sr_base_paddr,
+ u_int32_t *ce_sr_ring_size,
+ u_int32_t *ce_reg_paddr,
+ u_int32_t *tx_comp_ring_base_paddr,
+ u_int32_t *tx_comp_ring_size,
+ u_int32_t *tx_num_alloc_buffer,
+ u_int32_t *rx_rdy_ring_base_paddr,
+ u_int32_t *rx_rdy_ring_size,
+ u_int32_t *rx_proc_done_idx_paddr
+)
+{
+ htt_ipa_uc_get_resource(pdev->htt_pdev,
+ ce_sr_base_paddr,
+ ce_sr_ring_size,
+ ce_reg_paddr,
+ tx_comp_ring_base_paddr,
+ tx_comp_ring_size,
+ tx_num_alloc_buffer,
+ rx_rdy_ring_base_paddr,
+ rx_rdy_ring_size,
+ rx_proc_done_idx_paddr);
+}
+
+void
+ol_txrx_ipa_uc_set_doorbell_paddr(
+ ol_txrx_pdev_handle pdev,
+ u_int32_t ipa_tx_uc_doorbell_paddr,
+ u_int32_t ipa_rx_uc_doorbell_paddr
+)
+{
+ htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
+ ipa_tx_uc_doorbell_paddr,
+ ipa_rx_uc_doorbell_paddr);
+}
+
+void
+ol_txrx_ipa_uc_set_active(
+ ol_txrx_pdev_handle pdev,
+ a_bool_t uc_active,
+ a_bool_t is_tx
+)
+{
+ htt_h2t_ipa_uc_set_active(pdev->htt_pdev,
+ uc_active,
+ is_tx);
+}
+
+void
+ol_txrx_ipa_uc_op_response(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t op_code
+)
+{
+ pdev->ipa_uc_op_cb(op_code, pdev->osif_dev);
+}
+
+void ol_txrx_ipa_uc_register_op_cb(
+ ol_txrx_pdev_handle pdev,
+ ipa_uc_op_cb_type op_cb,
+ void *osif_dev)
+{
+ pdev->ipa_uc_op_cb = op_cb;
+ pdev->osif_dev = osif_dev;
+}
+
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
index debbdd36970e..f430cb7facd9 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
@@ -318,6 +318,10 @@ typedef enum _throttle_phase {
#define THROTTLE_TX_THRESHOLD (100)
+#ifdef IPA_UC_OFFLOAD
+typedef void (*ipa_uc_op_cb_type)(u_int8_t op_code, void *osif_ctxt);
+#endif /* IPA_UC_OFFLOAD */
+
/*
* As depicted in the diagram below, the pdev contains an array of
* NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
@@ -678,6 +682,11 @@ struct ol_txrx_pdev_t {
/* mark as true if traffic is paused due to thermal throttling */
a_bool_t is_paused;
} tx_throttle;
+
+#ifdef IPA_UC_OFFLOAD
+ ipa_uc_op_cb_type ipa_uc_op_cb;
+ void *osif_dev;
+#endif /* IPA_UC_OFFLOAD */
};
struct ol_txrx_vdev_t {
diff --git a/CORE/SERVICES/COMMON/adf/adf_nbuf.c b/CORE/SERVICES/COMMON/adf/adf_nbuf.c
index b007bc1dc1c0..7dcbbbc200f6 100644
--- a/CORE/SERVICES/COMMON/adf/adf_nbuf.c
+++ b/CORE/SERVICES/COMMON/adf/adf_nbuf.c
@@ -103,7 +103,7 @@ __adf_nbuf_alloc(adf_os_device_t osdev, size_t size, int reserve, int align, int
void
__adf_nbuf_free(struct sk_buff *skb)
{
-#ifdef IPA_OFFLOAD
+#if defined(IPA_OFFLOAD) && !defined(IPA_UC_OFFLOAD)
if( (NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb) )
NBUF_CALLBACK_FN_EXEC(skb);
else
diff --git a/CORE/SERVICES/COMMON/adf/adf_os_types.h b/CORE/SERVICES/COMMON/adf/adf_os_types.h
index 3d02bd037562..61d4e5aed87e 100644
--- a/CORE/SERVICES/COMMON/adf/adf_os_types.h
+++ b/CORE/SERVICES/COMMON/adf/adf_os_types.h
@@ -265,6 +265,7 @@ typedef enum {
* ADF_OS_DMA_FROM_DEVICE (data going from memory to device)
*/
typedef enum {
+ ADF_OS_DMA_BIDIRECTIONAL = __ADF_OS_DMA_BIDIRECTIONAL,
ADF_OS_DMA_TO_DEVICE = __ADF_OS_DMA_TO_DEVICE,
ADF_OS_DMA_FROM_DEVICE = __ADF_OS_DMA_FROM_DEVICE,
} adf_os_dma_dir_t;
diff --git a/CORE/SERVICES/COMMON/adf/linux/adf_os_types_pvt.h b/CORE/SERVICES/COMMON/adf/linux/adf_os_types_pvt.h
index 8cb4ba51f2f5..1953c3010548 100644
--- a/CORE/SERVICES/COMMON/adf/linux/adf_os_types_pvt.h
+++ b/CORE/SERVICES/COMMON/adf/linux/adf_os_types_pvt.h
@@ -179,6 +179,7 @@ enum __adf_net_wireless_evcode{
#define __adf_os_snprint snprintf
#define __adf_os_vsnprint vsnprintf
+#define __ADF_OS_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
#define __ADF_OS_DMA_TO_DEVICE DMA_TO_DEVICE
#define __ADF_OS_DMA_FROM_DEVICE DMA_FROM_DEVICE
#define __adf_os_inline inline
diff --git a/CORE/SERVICES/COMMON/hif.h b/CORE/SERVICES/COMMON/hif.h
index b31cea757e27..fe1438f17bd2 100644
--- a/CORE/SERVICES/COMMON/hif.h
+++ b/CORE/SERVICES/COMMON/hif.h
@@ -790,6 +790,18 @@ void sim_target_register_write(struct ol_softc *scn, u_int32_t addr, u_int32_t v
#endif
+#ifdef IPA_UC_OFFLOAD
+/*
+ * IPA micro controller data path offload feature enabled,
+ * HIF should release copy engine related resource information to IPA UC
+ * IPA UC will access hardware resource with released information
+ */
+void HIFIpaGetCEResource(HIF_DEVICE *hif_device,
+ A_UINT32 *ce_sr_base_paddr,
+ A_UINT32 *ce_sr_ring_size,
+ A_UINT32 *ce_reg_paddr);
+#endif /* IPA_UC_OFFLOAD */
+
#ifdef __cplusplus
}
#endif
diff --git a/CORE/SERVICES/COMMON/htc_api.h b/CORE/SERVICES/COMMON/htc_api.h
index 93e3dfa72881..d71c2074d408 100644
--- a/CORE/SERVICES/COMMON/htc_api.h
+++ b/CORE/SERVICES/COMMON/htc_api.h
@@ -692,4 +692,10 @@ void HTCCancelDeferredTargetSleep(void *context);
/* Disable ASPM : Disable PCIe low power */
void htc_disable_aspm(void);
+#ifdef IPA_UC_OFFLOAD
+void HTCIpaGetCEResource(HTC_HANDLE htc_handle,
+ a_uint32_t *ce_sr_base_paddr,
+ a_uint32_t *ce_sr_ring_size,
+ a_uint32_t *ce_reg_paddr);
+#endif/* IPA_UC_OFFLOAD */
#endif /* _HTC_API_H_ */
diff --git a/CORE/SERVICES/COMMON/htc_services.h b/CORE/SERVICES/COMMON/htc_services.h
index 05dc54a67c58..73062f3fdcbd 100644
--- a/CORE/SERVICES/COMMON/htc_services.h
+++ b/CORE/SERVICES/COMMON/htc_services.h
@@ -45,7 +45,9 @@ typedef enum {
NMI_SERVICE_GROUP = 2,
HTT_SERVICE_GROUP = 3,
CFG_NV_SERVICE_GROUP = 4,
-
+#ifdef IPA_UC_OFFLOAD
+ WDI_IPA_SERVICE_GROUP = 5,
+#endif /* IPA_UC_OFFLOAD */
HTC_TEST_GROUP = 254,
HTC_SERVICE_GROUP_LAST = 255
}HTC_SERVICE_GROUP_IDS;
@@ -72,7 +74,9 @@ typedef enum {
#define HTC_RAW_STREAMS_SVC MAKE_SERVICE_ID(HTC_TEST_GROUP,0)
#define CFG_NV_SVC MAKE_SERVICE_ID(CFG_NV_SERVICE_GROUP,0)
-
+#ifdef IPA_UC_OFFLOAD
+#define WDI_IPA_TX_SVC MAKE_SERVICE_ID(WDI_IPA_SERVICE_GROUP,0)
+#endif /* IPA_UC_OFFLOAD */
/*
* Directions for interconnect pipe configuration.
* These definitions may be used during configuration and are shared
diff --git a/CORE/SERVICES/COMMON/ol_cfg.h b/CORE/SERVICES/COMMON/ol_cfg.h
index d980cec23227..be15f4bfc5aa 100644
--- a/CORE/SERVICES/COMMON/ol_cfg.h
+++ b/CORE/SERVICES/COMMON/ol_cfg.h
@@ -49,6 +49,16 @@ enum wlan_frm_fmt {
wlan_frm_fmt_802_3,
};
+#ifdef IPA_UC_OFFLOAD
+struct wlan_ipa_uc_rsc_t {
+ u8 uc_offload_enabled;
+ u32 tx_max_buf_cnt;
+ u32 tx_buf_size;
+ u32 rx_ind_ring_size;
+ u32 tx_partition_base;
+};
+#endif /* IPA_UC_OFFLOAD */
+
/* Config parameters for txrx_pdev */
struct txrx_pdev_cfg_t {
u8 is_high_latency;
@@ -70,6 +80,9 @@ struct txrx_pdev_cfg_t {
u8 rx_fwd_disabled;
u8 is_packet_log_enabled;
u8 is_full_reorder_offload;
+#ifdef IPA_UC_OFFLOAD
+ struct wlan_ipa_uc_rsc_t ipa_uc_rsc;
+#endif /* IPA_UC_OFFLOAD */
};
/**
@@ -454,4 +467,49 @@ void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, u_int8_t val);
*/
u_int8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev);
+#ifdef IPA_UC_OFFLOAD
+/**
+ * @brief IPA micro controller data path offload enable or not
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * feature enabled or not
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * TX buffer size which should be pre-allocated by driver.
+ * Default buffer size is 2K
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * TX buffer count which should be pre-allocated by driver.
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * RX indication ring size which will notified by WLAN FW to IPA
+ * micro controller
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
+#endif /* IPA_UC_OFFLOAD */
#endif /* _OL_CFG__H_ */
diff --git a/CORE/SERVICES/COMMON/ol_htt_api.h b/CORE/SERVICES/COMMON/ol_htt_api.h
index 7d5989e1e0f6..670ac387e1d7 100644
--- a/CORE/SERVICES/COMMON/ol_htt_api.h
+++ b/CORE/SERVICES/COMMON/ol_htt_api.h
@@ -267,5 +267,89 @@ htt_rx_reorder_log_print(struct htt_pdev_t *pdev);
#define htt_rx_reorder_log_print(pdev)
#endif
+#ifdef IPA_UC_OFFLOAD
+/**
+ * @brief send IPA UC resource config message to firmware with HTT message
+ * @details
+ * send IPA UC resource config message to firmware with HTT message
+ *
+ * @param pdev - handle to the HTT instance
+ */
+int
+htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev);
+
+/**
+ * @brief Client request resource information
+ * @details
+ * OL client will reuqest IPA UC related resource information
+ * Resource information will be distributted to IPA module
+ * All of the required resources should be pre-allocated
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ce_sr_base_paddr - copy engine source ring base physical address
+ * @param ce_sr_ring_size - copy engine source ring size
+ * @param ce_reg_paddr - copy engine register physical address
+ * @param tx_comp_ring_base_paddr - tx comp ring base physical address
+ * @param tx_comp_ring_size - tx comp ring size
+ * @param tx_num_alloc_buffer - number of allocated tx buffer
+ * @param rx_rdy_ring_base_paddr - rx ready ring base physical address
+ * @param rx_rdy_ring_size - rx ready ring size
+ * @param rx_proc_done_idx_paddr - rx process done index physical address
+ */
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+ a_uint32_t *ce_sr_base_paddr,
+ a_uint32_t *ce_sr_ring_size,
+ a_uint32_t *ce_reg_paddr,
+ a_uint32_t *tx_comp_ring_base_paddr,
+ a_uint32_t *tx_comp_ring_size,
+ a_uint32_t *tx_num_alloc_buffer,
+ a_uint32_t *rx_rdy_ring_base_paddr,
+ a_uint32_t *rx_rdy_ring_size,
+ a_uint32_t *rx_proc_done_idx_paddr);
+
+/**
+ * @brief Client set IPA UC doorbell register
+ * @details
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_tx_doorbell_paddr - tx comp doorbell physical address
+ * @param ipa_uc_rx_doorbell_paddr - rx ready doorbell physical address
+ */
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+ a_uint32_t ipa_uc_tx_doorbell_paddr,
+ a_uint32_t ipa_uc_rx_doorbell_paddr);
+
+/**
+ * @brief Client notify IPA UC data path active or not
+ *
+ * @param pdev - handle to the HTT instance
+ * @param uc_active - UC data path is active or not
+ * @param is_tx - UC TX is active or not
+ */
+int
+htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
+ a_bool_t uc_active,
+ a_bool_t is_tx);
+
+/**
+ * @brief Attach IPA UC data path
+ *
+ * @param pdev - handle to the HTT instance
+ */
+int
+htt_ipa_uc_attach(struct htt_pdev_t *pdev);
+
+/**
+ * @brief detach IPA UC data path
+ *
+ * @param pdev - handle to the HTT instance
+ */
+void
+htt_ipa_uc_detach(struct htt_pdev_t *pdev);
+#endif /* IPA_UC_OFFLOAD */
#endif /* _OL_HTT_API__H_ */
diff --git a/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h b/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h
index 23913524be7e..340033c70c4f 100644
--- a/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h
+++ b/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h
@@ -872,8 +872,19 @@ ol_txrx_peer_stats_copy(
/* Config parameters for txrx_pdev */
struct txrx_pdev_cfg_param_t {
- u_int8_t is_full_reorder_offload;
+ u_int8_t is_full_reorder_offload;
+ /* IPA Micro controller data path offload enable flag */
+ u_int8_t is_uc_offload_enabled;
+ /* IPA Micro controller data path offload TX buffer count */
+ u_int32_t uc_tx_buffer_count;
+ /* IPA Micro controller data path offload TX buffer size */
+ u_int32_t uc_tx_buffer_size;
+ /* IPA Micro controller data path offload RX indication ring count */
+ u_int32_t uc_rx_indication_ring_count;
+ /* IPA Micro controller data path offload TX partition base */
+ u_int32_t uc_tx_partition_base;
};
+
/**
* @brief Setup configuration parameters
* @details
@@ -1122,6 +1133,129 @@ ol_txrx_ll_set_tx_pause_q_depth(
);
#endif /* QCA_LL_TX_FLOW_CT */
+#ifdef IPA_UC_OFFLOAD
+/**
+ * @brief Client request resource information
+ * @details
+ * OL client will reuqest IPA UC related resource information
+ * Resource information will be distributted to IPA module
+ * All of the required resources should be pre-allocated
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ce_sr_base_paddr - copy engine source ring base physical address
+ * @param ce_sr_ring_size - copy engine source ring size
+ * @param ce_reg_paddr - copy engine register physical address
+ * @param tx_comp_ring_base_paddr - tx comp ring base physical address
+ * @param tx_comp_ring_size - tx comp ring size
+ * @param tx_num_alloc_buffer - number of allocated tx buffer
+ * @param rx_rdy_ring_base_paddr - rx ready ring base physical address
+ * @param rx_rdy_ring_size - rx ready ring size
+ * @param rx_proc_done_idx_paddr - rx process done index physical address
+ */
+void
+ol_txrx_ipa_uc_get_resource(
+ ol_txrx_pdev_handle pdev,
+ u_int32_t *ce_sr_base_paddr,
+ u_int32_t *ce_sr_ring_size,
+ u_int32_t *ce_reg_paddr,
+ u_int32_t *tx_comp_ring_base_paddr,
+ u_int32_t *tx_comp_ring_size,
+ u_int32_t *tx_num_alloc_buffer,
+ u_int32_t *rx_rdy_ring_base_paddr,
+ u_int32_t *rx_rdy_ring_size,
+ u_int32_t *rx_proc_done_idx_paddr
+);
+
+/**
+ * @brief Client set IPA UC doorbell register
+ * @details
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_tx_doorbell_paddr - tx comp doorbell physical address
+ * @param ipa_uc_rx_doorbell_paddr - rx ready doorbell physical address
+ */
+void
+ol_txrx_ipa_uc_set_doorbell_paddr(
+ ol_txrx_pdev_handle pdev,
+ u_int32_t ipa_tx_uc_doorbell_paddr,
+ u_int32_t ipa_rx_uc_doorbell_paddr
+);
+/**
+ * @brief Client notify IPA UC data path active or not
+ *
+ * @param pdev - handle to the HTT instance
+ * @param uc_active - UC data path is active or not
+ * @param is_tx - UC TX is active or not
+ */
+void
+ol_txrx_ipa_uc_set_active(
+ ol_txrx_pdev_handle pdev,
+ a_bool_t uc_active,
+ a_bool_t is_tx
+);
+
+/**
+ * @brief Offload data path activation notificaiton
+ * @details
+ * Firmware notification handler for offload datapath activity
+ *
+ * @param pdev - handle to the HTT instance
+ * @param op_code - activated for tx or rx data patrh
+ */
+void
+ol_txrx_ipa_uc_op_response(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t op_code);
+
+/**
+ * @brief callback function registration
+ * @details
+ * OSIF layer callback function registration API
+ * OSIF layer will register firmware offload datapath activity
+ * notification callback
+ *
+ * @param pdev - handle to the HTT instance
+ * @param ipa_uc_op_cb_type - callback function pointer should be registered
+ * @param osif_dev - osif instance pointer
+ */
+void ol_txrx_ipa_uc_register_op_cb(
+ ol_txrx_pdev_handle pdev,
+ void (*ipa_uc_op_cb_type)(u_int8_t op_code, void *osif_ctxt),
+ void *osif_dev);
+#else
+#define ol_txrx_ipa_uc_get_resource( \
+ pdev, \
+ ce_sr_base_paddr, \
+ ce_sr_ring_size, \
+ ce_reg_paddr, \
+ tx_comp_ring_base_paddr, \
+ tx_comp_ring_size, \
+ tx_num_alloc_buffer, \
+ rx_rdy_ring_base_paddr, \
+ rx_rdy_ring_size, \
+ rx_proc_done_idx_paddr) /* NO-OP */
+
+#define ol_txrx_ipa_uc_set_doorbell_paddr( \
+ pdev, \
+ ipa_tx_uc_doorbell_paddr, \
+ ipa_rx_uc_doorbell_paddr) /* NO-OP */
+
+#define ol_txrx_ipa_uc_set_active( \
+ pdev, \
+ uc_active, \
+ is_tx) /* NO-OP */
+
+#define ol_txrx_ipa_uc_op_response( \
+ pdev, \
+ op_code) /* NO-OP */
+
+#define ol_txrx_ipa_uc_register_op_cb( \
+ pdev, \
+ ipa_uc_op_cb_type, \
+ osif_dev) /* NO-OP */
+#endif /* IPA_UC_OFFLOAD */
#endif /* _OL_TXRX_CTRL_API__H_ */
diff --git a/CORE/SERVICES/HTC/htc.c b/CORE/SERVICES/HTC/htc.c
index efe0167ace07..60389f4a8f55 100644
--- a/CORE/SERVICES/HTC/htc.c
+++ b/CORE/SERVICES/HTC/htc.c
@@ -798,3 +798,21 @@ void HTCCancelDeferredTargetSleep(void *context)
#endif
#endif
}
+
+#ifdef IPA_UC_OFFLOAD
+void HTCIpaGetCEResource(HTC_HANDLE htc_handle,
+ a_uint32_t *ce_sr_base_paddr,
+ a_uint32_t *ce_sr_ring_size,
+ a_uint32_t *ce_reg_paddr)
+{
+ HTC_TARGET *target = GET_HTC_TARGET_FROM_HANDLE(htc_handle);
+
+ if (target->hif_dev != NULL) {
+ HIFIpaGetCEResource(target->hif_dev,
+ ce_sr_base_paddr,
+ ce_sr_ring_size,
+ ce_reg_paddr);
+ }
+}
+#endif /* IPA_UC_OFFLOAD */
+
diff --git a/CORE/TL/inc/wlan_qct_tl.h b/CORE/TL/inc/wlan_qct_tl.h
index c75c8bd5bf68..8ba4b584cf8f 100644
--- a/CORE/TL/inc/wlan_qct_tl.h
+++ b/CORE/TL/inc/wlan_qct_tl.h
@@ -3168,4 +3168,112 @@ void WLANTL_SetAdapterMaxQDepth
#endif /* QCA_LL_TX_FLOW_CT */
#endif /* QCA_WIFI_2_0 */
+#ifdef IPA_UC_OFFLOAD
+/*=============================================================================
+ FUNCTION WLANTL_GetIpaUcResource
+
+ DESCRIPTION
+ This function will be called by TL client.
+ Data path resource will be used by FW should be allocated within lower layer.
+ Shared resource information should be propagated to IPA.
+ To propagate resource information, client will use this API
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ ce_sr_base_paddr : Copy Engine Source Ring base address
+ ce_sr_ring_size : Copy Engine Source Ring size
+ ce_reg_paddr : Copy engine register address
+ tx_comp_ring_base_paddr : TX COMP ring base address
+ tx_comp_ring_size : TX COMP ring size
+ tx_num_alloc_buffer : Number of TX allocated buffer
+ rx_rdy_ring_base_paddr : RX ready ring base address
+ rx_rdy_ring_size : RX ready ring size
+ rx_proc_done_idx_paddr : RX process done index physical address
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_GetIpaUcResource(void *vos_ctx,
+ v_U32_t *ce_sr_base_paddr,
+ v_U32_t *ce_sr_ring_size,
+ v_U32_t *ce_reg_paddr,
+ v_U32_t *tx_comp_ring_base_paddr,
+ v_U32_t *tx_comp_ring_size,
+ v_U32_t *tx_num_alloc_buffer,
+ v_U32_t *rx_rdy_ring_base_paddr,
+ v_U32_t *rx_rdy_ring_size,
+ v_U32_t *rx_proc_done_idx_paddr);
+
+/*=============================================================================
+ FUNCTION WLANTL_SetUcDoorbellPaddr
+
+ DESCRIPTION
+ This function will be called by TL client.
+ UC controller should provide doorbell register address to firmware
+ TL client will call this API to pass doorbell register address to firmware
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ ipa_tx_uc_doorbell_paddr : Micro Controller WLAN TX COMP doorbell regiser
+ ipa_rx_uc_doorbell_paddr : Micro Controller WLAN RX REDY doorbell regiser
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_SetUcDoorbellPaddr(void *vos_ctx,
+ v_U32_t ipa_tx_uc_doorbell_paddr,
+ v_U32_t ipa_rx_uc_doorbell_paddr);
+
+/*=============================================================================
+ FUNCTION WLANTL_SetUcActive
+
+ DESCRIPTION
+ This function will be called by TL client.
+ Send Micro controller data path active or inactive notification to firmware
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ uc_active : Micro Controller data path is active or not
+ is_tx : Micro Controller WLAN TX data path is active or not
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_SetUcActive(void *vos_ctx,
+ v_BOOL_t uc_active,
+ v_BOOL_t is_tx
+);
+
+/*=============================================================================
+ FUNCTION WLANTL_RegisterOPCbFnc
+
+ DESCRIPTION
+ This function will be called by TL client.
+
+ PARAMETERS
+ IN
+ vos_ctx : Global OS context context
+ func : callback function pointer
+
+ RETURN VALUE
+ NONE
+
+ SIDE EFFECTS
+
+==============================================================================*/
+void WLANTL_RegisterOPCbFnc(void *vos_ctx,
+ void (*func)(v_U8_t op_code));
+#endif /* IPA_UC_OFFLOAD */
#endif /* #ifndef WLAN_QCT_WLANTL_H */