summaryrefslogtreecommitdiff
path: root/dp
diff options
context:
space:
mode:
authorPrakash Dhavali <pdhavali@codeaurora.org>2016-03-02 00:54:45 -0800
committerPrakash Dhavali <pdhavali@codeaurora.org>2016-03-03 01:02:02 -0800
commit142cee4bf22a1d052da15f3d8d050002121e4e77 (patch)
tree367efbdeac65122d4759c59068890d55cdfa0754 /dp
parentabcec8c47e8ff57bf9ef22d56be4eec50406b6b9 (diff)
Initial host-common file folder cleanup and moves
Initial host-common file folder cleanup and moves on top of baseline reference of MCL WLAN driver SU#5.0.0.160. Move dp, ht comm, hif, wmi and qdf folders one level up Change-Id: I2120898024b1eafd5d651c48768dbf48bf05995d
Diffstat (limited to 'dp')
-rw-r--r--dp/htt/htt.c611
-rw-r--r--dp/htt/htt_fw_stats.c1155
-rw-r--r--dp/htt/htt_h2t.c1025
-rw-r--r--dp/htt/htt_internal.h628
-rw-r--r--dp/htt/htt_rx.c2489
-rw-r--r--dp/htt/htt_t2h.c948
-rw-r--r--dp/htt/htt_tx.c1166
-rw-r--r--dp/htt/htt_types.h390
-rw-r--r--dp/htt/rx_desc.h533
-rw-r--r--dp/ol/inc/ol_cfg.h543
-rw-r--r--dp/ol/inc/ol_ctrl_addba_api.h43
-rw-r--r--dp/ol/inc/ol_ctrl_api.h44
-rw-r--r--dp/ol/inc/ol_defines.h47
-rw-r--r--dp/ol/inc/ol_htt_api.h366
-rw-r--r--dp/ol/inc/ol_htt_rx_api.h863
-rw-r--r--dp/ol/inc/ol_htt_tx_api.h979
-rw-r--r--dp/ol/inc/ol_osif_api.h42
-rw-r--r--dp/ol/inc/ol_params.h103
-rw-r--r--dp/ol/inc/ol_txrx_api.h113
-rw-r--r--dp/ol/inc/ol_txrx_ctrl_api.h1332
-rw-r--r--dp/ol/inc/ol_txrx_dbg.h204
-rw-r--r--dp/ol/inc/ol_txrx_htt_api.h579
-rw-r--r--dp/ol/inc/ol_txrx_osif_api.h284
-rw-r--r--dp/ol/inc/ol_txrx_stats.h133
-rw-r--r--dp/ol/inc/ol_vowext_dbg_defs.h66
-rw-r--r--dp/txrx/ipv6_defs.h107
-rw-r--r--dp/txrx/ol_cfg.c335
-rw-r--r--dp/txrx/ol_ctrl_txrx_api.h190
-rw-r--r--dp/txrx/ol_osif_txrx_api.h51
-rw-r--r--dp/txrx/ol_rx.c1493
-rw-r--r--dp/txrx/ol_rx.h67
-rw-r--r--dp/txrx/ol_rx_defrag.c1063
-rw-r--r--dp/txrx/ol_rx_defrag.h197
-rw-r--r--dp/txrx/ol_rx_fwd.c232
-rw-r--r--dp/txrx/ol_rx_fwd.h75
-rw-r--r--dp/txrx/ol_rx_pn.c350
-rw-r--r--dp/txrx/ol_rx_pn.h105
-rw-r--r--dp/txrx/ol_rx_reorder.c827
-rw-r--r--dp/txrx/ol_rx_reorder.h93
-rw-r--r--dp/txrx/ol_rx_reorder_timeout.c219
-rw-r--r--dp/txrx/ol_rx_reorder_timeout.h66
-rw-r--r--dp/txrx/ol_tx.c1368
-rw-r--r--dp/txrx/ol_tx.h92
-rw-r--r--dp/txrx/ol_tx_desc.c581
-rw-r--r--dp/txrx/ol_tx_desc.h253
-rw-r--r--dp/txrx/ol_tx_queue.c429
-rw-r--r--dp/txrx/ol_tx_queue.h92
-rw-r--r--dp/txrx/ol_tx_send.c968
-rw-r--r--dp/txrx/ol_tx_send.h86
-rw-r--r--dp/txrx/ol_txrx.c3398
-rw-r--r--dp/txrx/ol_txrx.h66
-rw-r--r--dp/txrx/ol_txrx_encap.c593
-rw-r--r--dp/txrx/ol_txrx_encap.h120
-rw-r--r--dp/txrx/ol_txrx_event.c228
-rw-r--r--dp/txrx/ol_txrx_flow_control.c695
-rw-r--r--dp/txrx/ol_txrx_internal.h737
-rw-r--r--dp/txrx/ol_txrx_peer_find.c492
-rw-r--r--dp/txrx/ol_txrx_peer_find.h116
-rw-r--r--dp/txrx/ol_txrx_types.h1011
-rw-r--r--dp/txrx/txrx.h235
-rw-r--r--dp/txrx/wdi_event.h92
-rw-r--r--dp/txrx/wdi_event_api.h95
62 files changed, 31903 insertions, 0 deletions
diff --git a/dp/htt/htt.c b/dp/htt/htt.c
new file mode 100644
index 000000000000..686882b6aead
--- /dev/null
+++ b/dp/htt/htt.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt.c
+ * @brief Provide functions to create+init and destroy a HTT instance.
+ * @details
+ * This file contains functions for creating a HTT instance; initializing
+ * the HTT instance, e.g. by allocating a pool of HTT tx descriptors and
+ * connecting the HTT service with HTC; and deleting a HTT instance.
+ */
+
+#include <cdf_memory.h> /* cdf_mem_malloc */
+#include <cdf_types.h> /* cdf_device_t, cdf_print */
+
+#include <htt.h> /* htt_tx_msdu_desc_t */
+#include <ol_cfg.h>
+#include <ol_txrx_htt_api.h> /* ol_tx_dowload_done_ll, etc. */
+#include <ol_htt_api.h>
+
+#include <htt_internal.h>
+#include <ol_htt_tx_api.h>
+#include "hif.h"
+
+#define HTT_HTC_PKT_POOL_INIT_SIZE 100 /* enough for a large A-MPDU */
+
+A_STATUS(*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+
+#ifdef IPA_OFFLOAD
+A_STATUS htt_ipa_config(htt_pdev_handle pdev, A_STATUS status)
+{
+ if ((A_OK == status) &&
+ ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+ status = htt_h2t_ipa_uc_rsc_cfg_msg(pdev);
+ return status;
+}
+
+#define HTT_IPA_CONFIG htt_ipa_config
+#else
+#define HTT_IPA_CONFIG(pdev, status) status /* no-op */
+#endif /* IPA_OFFLOAD */
+
+struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt_union *pkt = NULL;
+
+ HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+ if (pdev->htt_htc_pkt_freelist) {
+ pkt = pdev->htt_htc_pkt_freelist;
+ pdev->htt_htc_pkt_freelist = pdev->htt_htc_pkt_freelist->u.next;
+ }
+ HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+
+ if (pkt == NULL)
+ pkt = cdf_mem_malloc(sizeof(*pkt));
+
+ return &pkt->u.pkt; /* not actually a dereference */
+}
+
+void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
+{
+ struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
+
+ HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+ u_pkt->u.next = pdev->htt_htc_pkt_freelist;
+ pdev->htt_htc_pkt_freelist = u_pkt;
+ HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+}
+
+void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt_union *pkt, *next;
+ pkt = pdev->htt_htc_pkt_freelist;
+ while (pkt) {
+ next = pkt->u.next;
+ cdf_mem_free(pkt);
+ pkt = next;
+ }
+ pdev->htt_htc_pkt_freelist = NULL;
+}
+
+#ifdef ATH_11AC_TXCOMPACT
+void htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
+{
+ struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
+
+ HTT_TX_MUTEX_ACQUIRE(&pdev->htt_tx_mutex);
+ if (pdev->htt_htc_pkt_misclist) {
+ u_pkt->u.next = pdev->htt_htc_pkt_misclist;
+ pdev->htt_htc_pkt_misclist = u_pkt;
+ } else {
+ pdev->htt_htc_pkt_misclist = u_pkt;
+ }
+ HTT_TX_MUTEX_RELEASE(&pdev->htt_tx_mutex);
+}
+
+void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt_union *pkt, *next;
+ cdf_nbuf_t netbuf;
+ pkt = pdev->htt_htc_pkt_misclist;
+
+ while (pkt) {
+ next = pkt->u.next;
+ netbuf = (cdf_nbuf_t) (pkt->u.pkt.htc_pkt.pNetBufContext);
+ cdf_nbuf_unmap(pdev->osdev, netbuf, CDF_DMA_TO_DEVICE);
+ cdf_nbuf_free(netbuf);
+ cdf_mem_free(pkt);
+ pkt = next;
+ }
+ pdev->htt_htc_pkt_misclist = NULL;
+}
+#endif
+
+/**
+ * htt_pdev_alloc() - allocate HTT pdev
+ * @txrx_pdev: txrx pdev
+ * @ctrl_pdev: cfg pdev
+ * @htc_pdev: HTC pdev
+ * @osdev: os device
+ *
+ * Return: HTT pdev handle
+ */
+htt_pdev_handle
+htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
+ ol_pdev_handle ctrl_pdev,
+ HTC_HANDLE htc_pdev, cdf_device_t osdev)
+{
+ struct htt_pdev_t *pdev;
+
+ pdev = cdf_mem_malloc(sizeof(*pdev));
+ if (!pdev)
+ goto fail1;
+
+ pdev->osdev = osdev;
+ pdev->ctrl_pdev = ctrl_pdev;
+ pdev->txrx_pdev = txrx_pdev;
+ pdev->htc_pdev = htc_pdev;
+
+ cdf_mem_set(&pdev->stats, sizeof(pdev->stats), 0);
+ pdev->htt_htc_pkt_freelist = NULL;
+#ifdef ATH_11AC_TXCOMPACT
+ pdev->htt_htc_pkt_misclist = NULL;
+#endif
+ pdev->cfg.default_tx_comp_req =
+ !ol_cfg_tx_free_at_download(pdev->ctrl_pdev);
+
+ pdev->cfg.is_full_reorder_offload =
+ ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
+ cdf_print("is_full_reorder_offloaded? %d\n",
+ (int)pdev->cfg.is_full_reorder_offload);
+
+ pdev->cfg.ce_classify_enabled =
+ ol_cfg_is_ce_classify_enabled(ctrl_pdev);
+ cdf_print("ce_classify_enabled %d\n",
+ pdev->cfg.ce_classify_enabled);
+
+ pdev->targetdef = htc_get_targetdef(htc_pdev);
+#if defined(HELIUMPLUS_PADDR64)
+ /* TODO: OKA: Remove hard-coding */
+ HTT_SET_WIFI_IP(pdev, 2, 0);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+ /*
+ * Connect to HTC service.
+ * This has to be done before calling htt_rx_attach,
+ * since htt_rx_attach involves sending a rx ring configure
+ * message to the target.
+ */
+/* AR6004 don't need HTT layer. */
+#ifndef AR6004_HW
+ if (htt_htc_attach(pdev))
+ goto fail2;
+#endif
+
+ return pdev;
+
+fail2:
+ cdf_mem_free(pdev);
+
+fail1:
+ return NULL;
+
+}
+
+/**
+ * htt_attach() - Allocate and setup HTT TX/RX descriptors
+ * @pdev: pdev ptr
+ * @desc_pool_size: size of tx descriptors
+ *
+ * Return: 0 for success or error code.
+ */
+int
+htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
+{
+ int i;
+ enum wlan_frm_fmt frm_type;
+ int ret = 0;
+
+ ret = htt_tx_attach(pdev, desc_pool_size);
+ if (ret)
+ goto fail1;
+
+ ret = htt_rx_attach(pdev);
+ if (ret)
+ goto fail2;
+
+ HTT_TX_MUTEX_INIT(&pdev->htt_tx_mutex);
+ HTT_TX_NBUF_QUEUE_MUTEX_INIT(pdev);
+
+ /* pre-allocate some HTC_PACKET objects */
+ for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
+ struct htt_htc_pkt_union *pkt;
+ pkt = cdf_mem_malloc(sizeof(*pkt));
+ if (!pkt)
+ break;
+ htt_htc_pkt_free(pdev, &pkt->u.pkt);
+ }
+
+ /*
+ * LL - download just the initial portion of the frame.
+ * Download enough to cover the encapsulation headers checked
+ * by the target's tx classification descriptor engine.
+ */
+
+ /* account for the 802.3 or 802.11 header */
+ frm_type = ol_cfg_frame_type(pdev->ctrl_pdev);
+ if (frm_type == wlan_frm_fmt_native_wifi) {
+ pdev->download_len = HTT_TX_HDR_SIZE_NATIVE_WIFI;
+ } else if (frm_type == wlan_frm_fmt_802_3) {
+ pdev->download_len = HTT_TX_HDR_SIZE_ETHERNET;
+ } else {
+ cdf_print("Unexpected frame type spec: %d\n", frm_type);
+ HTT_ASSERT0(0);
+ }
+ /*
+ * Account for the optional L2 / ethernet header fields:
+ * 802.1Q, LLC/SNAP
+ */
+ pdev->download_len +=
+ HTT_TX_HDR_SIZE_802_1Q + HTT_TX_HDR_SIZE_LLC_SNAP;
+
+ /*
+ * Account for the portion of the L3 (IP) payload that the
+ * target needs for its tx classification.
+ */
+ pdev->download_len += ol_cfg_tx_download_size(pdev->ctrl_pdev);
+
+ /*
+ * Account for the HTT tx descriptor, including the
+ * HTC header + alignment padding.
+ */
+ pdev->download_len += sizeof(struct htt_host_tx_desc_t);
+
+ /*
+ * The TXCOMPACT htt_tx_sched function uses pdev->download_len
+ * to apply for all requeued tx frames. Thus,
+ * pdev->download_len has to be the largest download length of
+ * any tx frame that will be downloaded.
+ * This maximum download length is for management tx frames,
+ * which have an 802.11 header.
+ */
+#ifdef ATH_11AC_TXCOMPACT
+ pdev->download_len = sizeof(struct htt_host_tx_desc_t)
+ + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ + HTT_TX_HDR_SIZE_802_1Q
+ + HTT_TX_HDR_SIZE_LLC_SNAP
+ + ol_cfg_tx_download_size(pdev->ctrl_pdev);
+#endif
+ pdev->tx_send_complete_part2 = ol_tx_download_done_ll;
+
+ /*
+ * For LL, the FW rx desc is alongside the HW rx desc fields in
+ * the htt_host_rx_desc_base struct/.
+ */
+ pdev->rx_fw_desc_offset = RX_STD_DESC_FW_MSDU_OFFSET;
+
+ htt_h2t_rx_ring_cfg_msg = htt_h2t_rx_ring_cfg_msg_ll;
+
+ return 0;
+
+fail2:
+ htt_tx_detach(pdev);
+
+fail1:
+ return ret;
+}
+
+A_STATUS htt_attach_target(htt_pdev_handle pdev)
+{
+ A_STATUS status;
+
+ status = htt_h2t_ver_req_msg(pdev);
+ if (status != A_OK)
+ return status;
+
+#if defined(HELIUMPLUS_PADDR64)
+ /*
+ * Send the frag_desc info to target.
+ */
+ htt_h2t_frag_desc_bank_cfg_msg(pdev);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+
+ /*
+ * If applicable, send the rx ring config message to the target.
+ * The host could wait for the HTT version number confirmation message
+ * from the target before sending any further HTT messages, but it's
+ * reasonable to assume that the host and target HTT version numbers
+ * match, and proceed immediately with the remaining configuration
+ * handshaking.
+ */
+
+ status = htt_h2t_rx_ring_cfg_msg(pdev);
+ status = HTT_IPA_CONFIG(pdev, status);
+
+ return status;
+}
+
+void htt_detach(htt_pdev_handle pdev)
+{
+ htt_rx_detach(pdev);
+ htt_tx_detach(pdev);
+ htt_htc_pkt_pool_free(pdev);
+#ifdef ATH_11AC_TXCOMPACT
+ htt_htc_misc_pkt_pool_free(pdev);
+#endif
+ HTT_TX_MUTEX_DESTROY(&pdev->htt_tx_mutex);
+ HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(pdev);
+ htt_rx_dbg_rxbuf_deinit(pdev);
+}
+
+/**
+ * htt_pdev_free() - Free HTT pdev
+ * @pdev: htt pdev
+ *
+ * Return: none
+ */
+void htt_pdev_free(htt_pdev_handle pdev)
+{
+ cdf_mem_free(pdev);
+}
+
+void htt_detach_target(htt_pdev_handle pdev)
+{
+}
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * htt_pkt_dl_len_get() HTT packet download length for fastpath case
+ *
+ * @htt_dev: pointer to htt device.
+ *
+ * As fragment one already downloaded HTT/HTC header, download length is
+ * remaining bytes.
+ *
+ * Return: download length
+ */
+int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
+{
+ return htt_dev->download_len - sizeof(struct htt_host_tx_desc_t);
+}
+#else
+int htt_pkt_dl_len_get(struct htt_pdev_t *htt_dev)
+{
+ return 0;
+}
+#endif
+
+int htt_htc_attach(struct htt_pdev_t *pdev)
+{
+ HTC_SERVICE_CONNECT_REQ connect;
+ HTC_SERVICE_CONNECT_RESP response;
+ A_STATUS status;
+
+ cdf_mem_set(&connect, sizeof(connect), 0);
+ cdf_mem_set(&response, sizeof(response), 0);
+
+ connect.pMetaData = NULL;
+ connect.MetaDataLength = 0;
+ connect.EpCallbacks.pContext = pdev;
+ connect.EpCallbacks.EpTxComplete = htt_h2t_send_complete;
+ connect.EpCallbacks.EpTxCompleteMultiple = NULL;
+ connect.EpCallbacks.EpRecv = htt_t2h_msg_handler;
+ connect.EpCallbacks.ep_resume_tx_queue = htt_tx_resume_handler;
+
+ /* rx buffers currently are provided by HIF, not by EpRecvRefill */
+ connect.EpCallbacks.EpRecvRefill = NULL;
+ connect.EpCallbacks.RecvRefillWaterMark = 1;
+ /* N/A, fill is done by HIF */
+
+ connect.EpCallbacks.EpSendFull = htt_h2t_full;
+ /*
+ * Specify how deep to let a queue get before htc_send_pkt will
+ * call the EpSendFull function due to excessive send queue depth.
+ */
+ connect.MaxSendQueueDepth = HTT_MAX_SEND_QUEUE_DEPTH;
+
+ /* disable flow control for HTT data message service */
+#ifndef HIF_SDIO
+ connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+#endif
+
+ /* connect to control service */
+ connect.service_id = HTT_DATA_MSG_SVC;
+
+ status = htc_connect_service(pdev->htc_pdev, &connect, &response);
+
+ if (status != A_OK)
+ return -EIO; /* failure */
+
+ pdev->htc_endpoint = response.Endpoint;
+#if defined(HIF_PCI)
+ hif_save_htc_htt_config_endpoint(pdev->htc_endpoint);
+#endif
+
+ return 0; /* success */
+}
+
+#if HTT_DEBUG_LEVEL > 5
+void htt_display(htt_pdev_handle pdev, int indent)
+{
+ cdf_print("%*s%s:\n", indent, " ", "HTT");
+ cdf_print("%*stx desc pool: %d elems of %d bytes, %d allocated\n",
+ indent + 4, " ",
+ pdev->tx_descs.pool_elems,
+ pdev->tx_descs.size, pdev->tx_descs.alloc_cnt);
+ cdf_print("%*srx ring: space for %d elems, filled with %d buffers\n",
+ indent + 4, " ",
+ pdev->rx_ring.size, pdev->rx_ring.fill_level);
+ cdf_print("%*sat %p (%#x paddr)\n", indent + 8, " ",
+ pdev->rx_ring.buf.paddrs_ring, pdev->rx_ring.base_paddr);
+ cdf_print("%*snetbuf ring @ %p\n", indent + 8, " ",
+ pdev->rx_ring.buf.netbufs_ring);
+ cdf_print("%*sFW_IDX shadow register: vaddr = %p, paddr = %#x\n",
+ indent + 8, " ",
+ pdev->rx_ring.alloc_idx.vaddr, pdev->rx_ring.alloc_idx.paddr);
+ cdf_print("%*sSW enqueue idx= %d, SW dequeue idx: desc= %d, buf= %d\n",
+ indent + 8, " ", *pdev->rx_ring.alloc_idx.vaddr,
+ pdev->rx_ring.sw_rd_idx.msdu_desc,
+ pdev->rx_ring.sw_rd_idx.msdu_payld);
+}
+#endif
+
+/* Disable ASPM : Disable PCIe low power */
+void htt_htc_disable_aspm(void)
+{
+ htc_disable_aspm();
+}
+
+#ifdef IPA_OFFLOAD
+/**
+ * htt_ipa_uc_attach() - Allocate UC data path resources
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ * none 0 fail
+ */
+int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
+{
+ int error;
+
+ /* TX resource attach */
+ error = htt_tx_ipa_uc_attach(
+ pdev,
+ ol_cfg_ipa_uc_tx_buf_size(pdev->ctrl_pdev),
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev),
+ ol_cfg_ipa_uc_tx_partition_base(pdev->ctrl_pdev));
+ if (error) {
+ cdf_print("HTT IPA UC TX attach fail code %d\n", error);
+ HTT_ASSERT0(0);
+ return error;
+ }
+
+ /* RX resource attach */
+ error = htt_rx_ipa_uc_attach(
+ pdev,
+ ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+ if (error) {
+ cdf_print("HTT IPA UC RX attach fail code %d\n", error);
+ htt_tx_ipa_uc_detach(pdev);
+ HTT_ASSERT0(0);
+ return error;
+ }
+
+ return 0; /* success */
+}
+
+/**
+ * htt_ipa_uc_attach() - Remove UC data path resources
+ * @pdev: handle to the HTT instance
+ *
+ * Return: None
+ */
+void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ /* TX IPA micro controller detach */
+ htt_tx_ipa_uc_detach(pdev);
+
+ /* RX IPA micro controller detach */
+ htt_rx_ipa_uc_detach(pdev);
+}
+
+/**
+ * htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
+ * @pdev: handle to the HTT instance
+ * @ce_sr_base_paddr: copy engine source ring base physical address
+ * @ce_sr_ring_size: copy engine source ring size
+ * @ce_reg_paddr: copy engine register physical address
+ * @tx_comp_ring_base_paddr: tx comp ring base physical address
+ * @tx_comp_ring_size: tx comp ring size
+ * @tx_num_alloc_buffer: number of allocated tx buffer
+ * @rx_rdy_ring_base_paddr: rx ready ring base physical address
+ * @rx_rdy_ring_size: rx ready ring size
+ * @rx_proc_done_idx_paddr: rx process done index physical address
+ * @rx_proc_done_idx_vaddr: rx process done index virtual address
+ * @rx2_rdy_ring_base_paddr: rx done ring base physical address
+ * @rx2_rdy_ring_size: rx done ring size
+ * @rx2_proc_done_idx_paddr: rx done index physical address
+ * @rx2_proc_done_idx_vaddr: rx done index virtual address
+ *
+ * Return: 0 success
+ */
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx_paddr,
+ void **rx2_proc_done_idx_vaddr)
+{
+ /* Release allocated resource to client */
+ *tx_comp_ring_base_paddr =
+ pdev->ipa_uc_tx_rsc.tx_comp_base.paddr;
+ *tx_comp_ring_size =
+ (uint32_t) ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev);
+ *tx_num_alloc_buffer = (uint32_t) pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
+ *rx_rdy_ring_base_paddr =
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr;
+ *rx_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx_ind_ring_size;
+ *rx_proc_done_idx_paddr =
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr;
+ *rx_proc_done_idx_vaddr =
+ (void *)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr;
+ *rx2_rdy_ring_base_paddr =
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr;
+ *rx2_rdy_ring_size = (uint32_t) pdev->ipa_uc_rx_rsc.rx2_ind_ring_size;
+ *rx2_proc_done_idx_paddr =
+ pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr;
+ *rx2_proc_done_idx_vaddr =
+ (void *)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr;
+
+ /* Get copy engine, bus resource */
+ htc_ipa_get_ce_resource(pdev->htc_pdev,
+ ce_sr_base_paddr,
+ ce_sr_ring_size, ce_reg_paddr);
+
+ return 0;
+}
+
+/**
+ * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
+ * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
+ *
+ * Return: 0 success
+ */
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+ cdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
+ cdf_dma_addr_t ipa_uc_rx_doorbell_paddr)
+{
+ pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr = ipa_uc_tx_doorbell_paddr;
+ pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr = ipa_uc_rx_doorbell_paddr;
+ return 0;
+}
+#endif /* IPA_OFFLOAD */
diff --git a/dp/htt/htt_fw_stats.c b/dp/htt/htt_fw_stats.c
new file mode 100644
index 000000000000..aee627d0e617
--- /dev/null
+++ b/dp/htt/htt_fw_stats.c
@@ -0,0 +1,1155 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_fw_stats.c
+ * @brief Provide functions to process FW status retrieved from FW.
+ */
+
+#include <htc_api.h> /* HTC_PACKET */
+#include <htt.h> /* HTT_T2H_MSG_TYPE, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_memory.h> /* cdf_mem_set */
+#include <ol_fw_tx_dbg.h> /* ol_fw_tx_dbg_ppdu_base */
+
+#include <ol_htt_rx_api.h>
+#include <ol_txrx_htt_api.h> /* htt_tx_status */
+
+#include <htt_internal.h>
+
+#include <wlan_defs.h>
+
+#define ROUND_UP_TO_4(val) (((val) + 3) & ~0x3)
+
+
+static char *bw_str_arr[] = {"20MHz", "40MHz", "80MHz", "160MHz"};
+
+/*
+ * Defined the macro tx_rate_stats_print_cmn()
+ * so that this could be used in both
+ * htt_t2h_stats_tx_rate_stats_print() &
+ * htt_t2h_stats_tx_rate_stats_print_v2().
+ * Each of these functions take a different structure as argument,
+ * but with common fields in the structures--so using a macro
+ * to bypass the strong type-checking of a function seems a simple
+ * trick to use to avoid the code duplication.
+ */
+#define tx_rate_stats_print_cmn(_tx_rate_info, _concise) \
+{ \
+ int i; \
+ \
+ cdf_print("TX Rate Info:\n"); \
+ \
+ /* MCS */ \
+ cdf_print("MCS counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _tx_rate_info->mcs[0], \
+ _tx_rate_info->mcs[1], \
+ _tx_rate_info->mcs[2], \
+ _tx_rate_info->mcs[3], \
+ _tx_rate_info->mcs[4], \
+ _tx_rate_info->mcs[5], \
+ _tx_rate_info->mcs[6], \
+ _tx_rate_info->mcs[7], \
+ _tx_rate_info->mcs[8], \
+ _tx_rate_info->mcs[9]); \
+ \
+ /* SGI */ \
+ cdf_print("SGI counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _tx_rate_info->sgi[0], \
+ _tx_rate_info->sgi[1], \
+ _tx_rate_info->sgi[2], \
+ _tx_rate_info->sgi[3], \
+ _tx_rate_info->sgi[4], \
+ _tx_rate_info->sgi[5], \
+ _tx_rate_info->sgi[6], \
+ _tx_rate_info->sgi[7], \
+ _tx_rate_info->sgi[8], \
+ _tx_rate_info->sgi[9]); \
+ \
+ /* NSS */ \
+ cdf_print("NSS counts: "); \
+ cdf_print("1x1 %d, 2x2 %d, 3x3 %d\n", \
+ _tx_rate_info->nss[0], \
+ _tx_rate_info->nss[1], _tx_rate_info->nss[2]);\
+ \
+ /* BW */ \
+ cdf_print("BW counts: "); \
+ \
+ for (i = 0; \
+ i < sizeof(_tx_rate_info->bw) / sizeof(_tx_rate_info->bw[0]);\
+ i++) { \
+ cdf_print("%s %d ", bw_str_arr[i], _tx_rate_info->bw[i]);\
+ } \
+ cdf_print("\n"); \
+ \
+ /* Preamble */ \
+ cdf_print("Preamble (O C H V) counts: "); \
+ cdf_print("%d, %d, %d, %d\n", \
+ _tx_rate_info->pream[0], \
+ _tx_rate_info->pream[1], \
+ _tx_rate_info->pream[2], \
+ _tx_rate_info->pream[3]); \
+ \
+ /* STBC rate counts */ \
+ cdf_print("STBC rate counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _tx_rate_info->stbc[0], \
+ _tx_rate_info->stbc[1], \
+ _tx_rate_info->stbc[2], \
+ _tx_rate_info->stbc[3], \
+ _tx_rate_info->stbc[4], \
+ _tx_rate_info->stbc[5], \
+ _tx_rate_info->stbc[6], \
+ _tx_rate_info->stbc[7], \
+ _tx_rate_info->stbc[8], \
+ _tx_rate_info->stbc[9]); \
+ \
+ /* LDPC and TxBF counts */ \
+ cdf_print("LDPC Counts: "); \
+ cdf_print("%d\n", _tx_rate_info->ldpc); \
+ cdf_print("RTS Counts: "); \
+ cdf_print("%d\n", _tx_rate_info->rts_cnt); \
+ /* RSSI Values for last ack frames */ \
+ cdf_print("Ack RSSI: %d\n", _tx_rate_info->ack_rssi);\
+}
+
+static void htt_t2h_stats_tx_rate_stats_print(wlan_dbg_tx_rate_info_t *
+ tx_rate_info, int concise)
+{
+ tx_rate_stats_print_cmn(tx_rate_info, concise);
+}
+
+static void htt_t2h_stats_tx_rate_stats_print_v2(wlan_dbg_tx_rate_info_v2_t *
+ tx_rate_info, int concise)
+{
+ tx_rate_stats_print_cmn(tx_rate_info, concise);
+}
+
+/*
+ * Defined the macro rx_rate_stats_print_cmn()
+ * so that this could be used in both
+ * htt_t2h_stats_rx_rate_stats_print() &
+ * htt_t2h_stats_rx_rate_stats_print_v2().
+ * Each of these functions take a different structure as argument,
+ * but with common fields in the structures -- so using a macro
+ * to bypass the strong type-checking of a function seems a simple
+ * trick to use to avoid the code duplication.
+ */
+#define rx_rate_stats_print_cmn(_rx_phy_info, _concise) \
+{ \
+ int i; \
+ \
+ cdf_print("RX Rate Info:\n"); \
+ \
+ /* MCS */ \
+ cdf_print("MCS counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _rx_phy_info->mcs[0], \
+ _rx_phy_info->mcs[1], \
+ _rx_phy_info->mcs[2], \
+ _rx_phy_info->mcs[3], \
+ _rx_phy_info->mcs[4], \
+ _rx_phy_info->mcs[5], \
+ _rx_phy_info->mcs[6], \
+ _rx_phy_info->mcs[7], \
+ _rx_phy_info->mcs[8], \
+ _rx_phy_info->mcs[9]); \
+ \
+ /* SGI */ \
+ cdf_print("SGI counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _rx_phy_info->sgi[0], \
+ _rx_phy_info->sgi[1], \
+ _rx_phy_info->sgi[2], \
+ _rx_phy_info->sgi[3], \
+ _rx_phy_info->sgi[4], \
+ _rx_phy_info->sgi[5], \
+ _rx_phy_info->sgi[6], \
+ _rx_phy_info->sgi[7], \
+ _rx_phy_info->sgi[8], \
+ _rx_phy_info->sgi[9]); \
+ \
+ /* NSS */ \
+ cdf_print("NSS counts: "); \
+ /* nss[0] just holds the count of non-stbc frames that were sent at 1x1 \
+ * rates and nsts holds the count of frames sent with stbc. \
+ * It was decided to not include PPDUs sent w/ STBC in nss[0]\
+ * since it would be easier to change the value that needs to be\
+ * printed (from "stbc+non-stbc count to only non-stbc count")\
+ * if needed in the future. Hence the addition in the host code\
+ * at this line. */ \
+ cdf_print("1x1 %d, 2x2 %d, 3x3 %d, 4x4 %d\n", \
+ _rx_phy_info->nss[0] + _rx_phy_info->nsts, \
+ _rx_phy_info->nss[1], \
+ _rx_phy_info->nss[2], \
+ _rx_phy_info->nss[3]); \
+ \
+ /* NSTS */ \
+ cdf_print("NSTS count: "); \
+ cdf_print("%d\n", _rx_phy_info->nsts); \
+ \
+ /* BW */ \
+ cdf_print("BW counts: "); \
+ for (i = 0; \
+ i < sizeof(_rx_phy_info->bw) / sizeof(_rx_phy_info->bw[0]); \
+ i++) { \
+ cdf_print("%s %d ", bw_str_arr[i], _rx_phy_info->bw[i]);\
+ } \
+ cdf_print("\n"); \
+ \
+ /* Preamble */ \
+ cdf_print("Preamble counts: "); \
+ cdf_print("%d, %d, %d, %d, %d, %d\n", \
+ _rx_phy_info->pream[0], \
+ _rx_phy_info->pream[1], \
+ _rx_phy_info->pream[2], \
+ _rx_phy_info->pream[3], \
+ _rx_phy_info->pream[4], \
+ _rx_phy_info->pream[5]); \
+ \
+ /* STBC rate counts */ \
+ cdf_print("STBC rate counts (0..9): "); \
+ cdf_print("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n",\
+ _rx_phy_info->stbc[0], \
+ _rx_phy_info->stbc[1], \
+ _rx_phy_info->stbc[2], \
+ _rx_phy_info->stbc[3], \
+ _rx_phy_info->stbc[4], \
+ _rx_phy_info->stbc[5], \
+ _rx_phy_info->stbc[6], \
+ _rx_phy_info->stbc[7], \
+ _rx_phy_info->stbc[8], \
+ _rx_phy_info->stbc[9]); \
+ \
+ /* LDPC and TxBF counts */ \
+ cdf_print("LDPC TXBF Counts: "); \
+ cdf_print("%d, %d\n", _rx_phy_info->ldpc, _rx_phy_info->txbf);\
+ /* RSSI Values for last received frames */ \
+ cdf_print("RSSI (data, mgmt): %d, %d\n", _rx_phy_info->data_rssi,\
+ _rx_phy_info->mgmt_rssi); \
+ \
+ cdf_print("RSSI Chain 0 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+ ((_rx_phy_info->rssi_chain0 >> 24) & 0xff), \
+ ((_rx_phy_info->rssi_chain0 >> 16) & 0xff), \
+ ((_rx_phy_info->rssi_chain0 >> 8) & 0xff), \
+ ((_rx_phy_info->rssi_chain0 >> 0) & 0xff)); \
+ \
+ cdf_print("RSSI Chain 1 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+ ((_rx_phy_info->rssi_chain1 >> 24) & 0xff), \
+ ((_rx_phy_info->rssi_chain1 >> 16) & 0xff), \
+ ((_rx_phy_info->rssi_chain1 >> 8) & 0xff), \
+ ((_rx_phy_info->rssi_chain1 >> 0) & 0xff)); \
+ \
+ cdf_print("RSSI Chain 2 (0x%02x 0x%02x 0x%02x 0x%02x)\n",\
+ ((_rx_phy_info->rssi_chain2 >> 24) & 0xff), \
+ ((_rx_phy_info->rssi_chain2 >> 16) & 0xff), \
+ ((_rx_phy_info->rssi_chain2 >> 8) & 0xff), \
+ ((_rx_phy_info->rssi_chain2 >> 0) & 0xff)); \
+}
+
+static void htt_t2h_stats_rx_rate_stats_print(wlan_dbg_rx_rate_info_t *
+ rx_phy_info, int concise)
+{
+ rx_rate_stats_print_cmn(rx_phy_info, concise);
+}
+
+static void htt_t2h_stats_rx_rate_stats_print_v2(wlan_dbg_rx_rate_info_v2_t *
+ rx_phy_info, int concise)
+{
+ rx_rate_stats_print_cmn(rx_phy_info, concise);
+}
+
+static void
+htt_t2h_stats_pdev_stats_print(struct wlan_dbg_stats *wlan_pdev_stats,
+ int concise)
+{
+ struct wlan_dbg_tx_stats *tx = &wlan_pdev_stats->tx;
+ struct wlan_dbg_rx_stats *rx = &wlan_pdev_stats->rx;
+
+ cdf_print("WAL Pdev stats:\n");
+ cdf_print("\n### Tx ###\n");
+
+ /* Num HTT cookies queued to dispatch list */
+ cdf_print("comp_queued :\t%d\n", tx->comp_queued);
+ /* Num HTT cookies dispatched */
+ cdf_print("comp_delivered :\t%d\n", tx->comp_delivered);
+ /* Num MSDU queued to WAL */
+ cdf_print("msdu_enqued :\t%d\n", tx->msdu_enqued);
+ /* Num MPDU queued to WAL */
+ cdf_print("mpdu_enqued :\t%d\n", tx->mpdu_enqued);
+ /* Num MSDUs dropped by WMM limit */
+ cdf_print("wmm_drop :\t%d\n", tx->wmm_drop);
+ /* Num Local frames queued */
+ cdf_print("local_enqued :\t%d\n", tx->local_enqued);
+ /* Num Local frames done */
+ cdf_print("local_freed :\t%d\n", tx->local_freed);
+ /* Num queued to HW */
+ cdf_print("hw_queued :\t%d\n", tx->hw_queued);
+ /* Num PPDU reaped from HW */
+ cdf_print("hw_reaped :\t%d\n", tx->hw_reaped);
+ /* Num underruns */
+ cdf_print("mac underrun :\t%d\n", tx->underrun);
+ /* Num underruns */
+ cdf_print("phy underrun :\t%d\n", tx->phy_underrun);
+ /* Num PPDUs cleaned up in TX abort */
+ cdf_print("tx_abort :\t%d\n", tx->tx_abort);
+ /* Num MPDUs requed by SW */
+ cdf_print("mpdus_requed :\t%d\n", tx->mpdus_requed);
+ /* Excessive retries */
+ cdf_print("excess retries :\t%d\n", tx->tx_ko);
+ /* last data rate */
+ cdf_print("last rc :\t%d\n", tx->data_rc);
+ /* scheduler self triggers */
+ cdf_print("sched self trig :\t%d\n", tx->self_triggers);
+ /* SW retry failures */
+ cdf_print("ampdu retry failed:\t%d\n", tx->sw_retry_failure);
+ /* ilegal phy rate errirs */
+ cdf_print("illegal rate errs :\t%d\n", tx->illgl_rate_phy_err);
+ /* pdev continous excessive retries */
+ cdf_print("pdev cont xretry :\t%d\n", tx->pdev_cont_xretry);
+ /* pdev continous excessive retries */
+ cdf_print("pdev tx timeout :\t%d\n", tx->pdev_tx_timeout);
+ /* pdev resets */
+ cdf_print("pdev resets :\t%d\n", tx->pdev_resets);
+ /* PPDU > txop duration */
+ cdf_print("ppdu txop ovf :\t%d\n", tx->txop_ovf);
+
+ cdf_print("\n### Rx ###\n");
+ /* Cnts any change in ring routing mid-ppdu */
+ cdf_print("ppdu_route_change :\t%d\n", rx->mid_ppdu_route_change);
+ /* Total number of statuses processed */
+ cdf_print("status_rcvd :\t%d\n", rx->status_rcvd);
+ /* Extra frags on rings 0-3 */
+ cdf_print("r0_frags :\t%d\n", rx->r0_frags);
+ cdf_print("r1_frags :\t%d\n", rx->r1_frags);
+ cdf_print("r2_frags :\t%d\n", rx->r2_frags);
+ cdf_print("r3_frags :\t%d\n", rx->r3_frags);
+ /* MSDUs / MPDUs delivered to HTT */
+ cdf_print("htt_msdus :\t%d\n", rx->htt_msdus);
+ cdf_print("htt_mpdus :\t%d\n", rx->htt_mpdus);
+ /* MSDUs / MPDUs delivered to local stack */
+ cdf_print("loc_msdus :\t%d\n", rx->loc_msdus);
+ cdf_print("loc_mpdus :\t%d\n", rx->loc_mpdus);
+ /* AMSDUs that have more MSDUs than the status ring size */
+ cdf_print("oversize_amsdu :\t%d\n", rx->oversize_amsdu);
+ /* Number of PHY errors */
+ cdf_print("phy_errs :\t%d\n", rx->phy_errs);
+ /* Number of PHY errors dropped */
+ cdf_print("phy_errs dropped :\t%d\n", rx->phy_err_drop);
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ cdf_print("mpdu_errs :\t%d\n", rx->mpdu_errs);
+
+}
+
+static void
+htt_t2h_stats_rx_reorder_stats_print(struct rx_reorder_stats *stats_ptr,
+ int concise)
+{
+ cdf_print("Rx reorder statistics:\n");
+ cdf_print(" %u non-QoS frames received\n", stats_ptr->deliver_non_qos);
+ cdf_print(" %u frames received in-order\n",
+ stats_ptr->deliver_in_order);
+ cdf_print(" %u frames flushed due to timeout\n",
+ stats_ptr->deliver_flush_timeout);
+ cdf_print(" %u frames flushed due to moving out of window\n",
+ stats_ptr->deliver_flush_oow);
+ cdf_print(" %u frames flushed due to receiving DELBA\n",
+ stats_ptr->deliver_flush_delba);
+ cdf_print(" %u frames discarded due to FCS error\n",
+ stats_ptr->fcs_error);
+ cdf_print(" %u frames discarded due to invalid peer\n",
+ stats_ptr->invalid_peer);
+ cdf_print
+ (" %u frames discarded due to duplication (non aggregation)\n",
+ stats_ptr->dup_non_aggr);
+ cdf_print(" %u frames discarded due to duplication in reorder queue\n",
+ stats_ptr->dup_in_reorder);
+ cdf_print(" %u frames discarded due to processed before\n",
+ stats_ptr->dup_past);
+ cdf_print(" %u times reorder timeout happened\n",
+ stats_ptr->reorder_timeout);
+ cdf_print(" %u times incorrect bar received\n",
+ stats_ptr->invalid_bar_ssn);
+ cdf_print(" %u times bar ssn reset happened\n",
+ stats_ptr->ssn_reset);
+ cdf_print(" %u times flushed due to peer delete\n",
+ stats_ptr->deliver_flush_delpeer);
+ cdf_print(" %u times flushed due to offload\n",
+ stats_ptr->deliver_flush_offload);
+ cdf_print(" %u times flushed due to ouf of buffer\n",
+ stats_ptr->deliver_flush_oob);
+ cdf_print(" %u MPDU's dropped due to PN check fail\n",
+ stats_ptr->pn_fail);
+ cdf_print(" %u MPDU's dropped due to lack of memory\n",
+ stats_ptr->store_fail);
+ cdf_print(" %u times tid pool alloc succeeded\n",
+ stats_ptr->tid_pool_alloc_succ);
+ cdf_print(" %u times MPDU pool alloc succeeded\n",
+ stats_ptr->mpdu_pool_alloc_succ);
+ cdf_print(" %u times MSDU pool alloc succeeded\n",
+ stats_ptr->msdu_pool_alloc_succ);
+ cdf_print(" %u times tid pool alloc failed\n",
+ stats_ptr->tid_pool_alloc_fail);
+ cdf_print(" %u times MPDU pool alloc failed\n",
+ stats_ptr->mpdu_pool_alloc_fail);
+ cdf_print(" %u times MSDU pool alloc failed\n",
+ stats_ptr->msdu_pool_alloc_fail);
+ cdf_print(" %u times tid pool freed\n",
+ stats_ptr->tid_pool_free);
+ cdf_print(" %u times MPDU pool freed\n",
+ stats_ptr->mpdu_pool_free);
+ cdf_print(" %u times MSDU pool freed\n",
+ stats_ptr->msdu_pool_free);
+ cdf_print(" %u MSDUs undelivered to HTT, queued to Rx MSDU free list\n",
+ stats_ptr->msdu_queued);
+ cdf_print(" %u MSDUs released from Rx MSDU list to MAC ring\n",
+ stats_ptr->msdu_recycled);
+ cdf_print(" %u MPDUs with invalid peer but A2 found in AST\n",
+ stats_ptr->invalid_peer_a2_in_ast);
+ cdf_print(" %u MPDUs with invalid peer but A3 found in AST\n",
+ stats_ptr->invalid_peer_a3_in_ast);
+ cdf_print(" %u MPDUs with invalid peer, Broadcast or Mulitcast frame\n",
+ stats_ptr->invalid_peer_bmc_mpdus);
+ cdf_print(" %u MSDUs with err attention word\n",
+ stats_ptr->rxdesc_err_att);
+ cdf_print(" %u MSDUs with flag of peer_idx_invalid\n",
+ stats_ptr->rxdesc_err_peer_idx_inv);
+ cdf_print(" %u MSDUs with flag of peer_idx_timeout\n",
+ stats_ptr->rxdesc_err_peer_idx_to);
+ cdf_print(" %u MSDUs with flag of overflow\n",
+ stats_ptr->rxdesc_err_ov);
+ cdf_print(" %u MSDUs with flag of msdu_length_err\n",
+ stats_ptr->rxdesc_err_msdu_len);
+ cdf_print(" %u MSDUs with flag of mpdu_length_err\n",
+ stats_ptr->rxdesc_err_mpdu_len);
+ cdf_print(" %u MSDUs with flag of tkip_mic_err\n",
+ stats_ptr->rxdesc_err_tkip_mic);
+ cdf_print(" %u MSDUs with flag of decrypt_err\n",
+ stats_ptr->rxdesc_err_decrypt);
+ cdf_print(" %u MSDUs with flag of fcs_err\n",
+ stats_ptr->rxdesc_err_fcs);
+ cdf_print(" %u Unicast frames with invalid peer handler\n",
+ stats_ptr->rxdesc_uc_msdus_inv_peer);
+ cdf_print(" %u unicast frame directly to DUT with invalid peer handler\n",
+ stats_ptr->rxdesc_direct_msdus_inv_peer);
+ cdf_print(" %u Broadcast/Multicast frames with invalid peer handler\n",
+ stats_ptr->rxdesc_bmc_msdus_inv_peer);
+ cdf_print(" %u MSDUs dropped due to no first MSDU flag\n",
+ stats_ptr->rxdesc_no_1st_msdu);
+ cdf_print(" %u MSDUs dropped due to ring overflow\n",
+ stats_ptr->msdu_drop_ring_ov);
+ cdf_print(" %u MSDUs dropped due to FC mismatch\n",
+ stats_ptr->msdu_drop_fc_mismatch);
+ cdf_print(" %u MSDUs dropped due to mgt frame in Remote ring\n",
+ stats_ptr->msdu_drop_mgmt_remote_ring);
+ cdf_print(" %u MSDUs dropped due to misc non error\n",
+ stats_ptr->msdu_drop_misc);
+ cdf_print(" %u MSDUs go to offload before reorder\n",
+ stats_ptr->offload_msdu_wal);
+ cdf_print(" %u data frame dropped by offload after reorder\n",
+ stats_ptr->offload_msdu_reorder);
+ cdf_print(" %u MPDUs with SN in the past & within BA window\n",
+ stats_ptr->dup_past_within_window);
+ cdf_print(" %u MPDUs with SN in the past & outside BA window\n",
+ stats_ptr->dup_past_outside_window);
+}
+
+static void
+htt_t2h_stats_rx_rem_buf_stats_print(
+ struct rx_remote_buffer_mgmt_stats *stats_ptr, int concise)
+{
+ cdf_print("Rx Remote Buffer Statistics:\n");
+ cdf_print(" %u MSDU's reaped for Rx processing\n",
+ stats_ptr->remote_reaped);
+ cdf_print(" %u MSDU's recycled within firmware\n",
+ stats_ptr->remote_recycled);
+ cdf_print(" %u MSDU's stored by Data Rx\n",
+ stats_ptr->data_rx_msdus_stored);
+ cdf_print(" %u HTT indications from WAL Rx MSDU\n",
+ stats_ptr->wal_rx_ind);
+ cdf_print(" %u HTT indications unconsumed from WAL Rx MSDU\n",
+ stats_ptr->wal_rx_ind_unconsumed);
+ cdf_print(" %u HTT indications from Data Rx MSDU\n",
+ stats_ptr->data_rx_ind);
+ cdf_print(" %u HTT indications unconsumed from Data Rx MSDU\n",
+ stats_ptr->data_rx_ind_unconsumed);
+ cdf_print(" %u HTT indications from ATHBUF\n",
+ stats_ptr->athbuf_rx_ind);
+ cdf_print(" %u Remote buffers requested for refill\n",
+ stats_ptr->refill_buf_req);
+ cdf_print(" %u Remote buffers filled by host\n",
+ stats_ptr->refill_buf_rsp);
+ cdf_print(" %u times MAC has no buffers\n",
+ stats_ptr->mac_no_bufs);
+ cdf_print(" %u times f/w write & read indices on MAC ring are equal\n",
+ stats_ptr->fw_indices_equal);
+ cdf_print(" %u times f/w has no remote buffers to post to MAC\n",
+ stats_ptr->host_no_bufs);
+}
+
+static void
+htt_t2h_stats_txbf_info_buf_stats_print(
+ struct wlan_dbg_txbf_data_stats *stats_ptr)
+{
+ cdf_print("TXBF data Statistics:\n");
+ cdf_print("tx_txbf_vht (0..9): ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %d\n",
+ stats_ptr->tx_txbf_vht[0],
+ stats_ptr->tx_txbf_vht[1],
+ stats_ptr->tx_txbf_vht[2],
+ stats_ptr->tx_txbf_vht[3],
+ stats_ptr->tx_txbf_vht[4],
+ stats_ptr->tx_txbf_vht[5],
+ stats_ptr->tx_txbf_vht[6],
+ stats_ptr->tx_txbf_vht[7],
+ stats_ptr->tx_txbf_vht[8],
+ stats_ptr->tx_txbf_vht[9]);
+ cdf_print("rx_txbf_vht (0..9): ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u, %u\n",
+ stats_ptr->rx_txbf_vht[0],
+ stats_ptr->rx_txbf_vht[1],
+ stats_ptr->rx_txbf_vht[2],
+ stats_ptr->rx_txbf_vht[3],
+ stats_ptr->rx_txbf_vht[4],
+ stats_ptr->rx_txbf_vht[5],
+ stats_ptr->rx_txbf_vht[6],
+ stats_ptr->rx_txbf_vht[7],
+ stats_ptr->rx_txbf_vht[8],
+ stats_ptr->rx_txbf_vht[9]);
+ cdf_print("tx_txbf_ht (0..7): ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
+ stats_ptr->tx_txbf_ht[0],
+ stats_ptr->tx_txbf_ht[1],
+ stats_ptr->tx_txbf_ht[2],
+ stats_ptr->tx_txbf_ht[3],
+ stats_ptr->tx_txbf_ht[4],
+ stats_ptr->tx_txbf_ht[5],
+ stats_ptr->tx_txbf_ht[6],
+ stats_ptr->tx_txbf_ht[7]);
+ cdf_print("tx_txbf_ofdm (0..7): ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u, %u\n",
+ stats_ptr->tx_txbf_ofdm[0],
+ stats_ptr->tx_txbf_ofdm[1],
+ stats_ptr->tx_txbf_ofdm[2],
+ stats_ptr->tx_txbf_ofdm[3],
+ stats_ptr->tx_txbf_ofdm[4],
+ stats_ptr->tx_txbf_ofdm[5],
+ stats_ptr->tx_txbf_ofdm[6],
+ stats_ptr->tx_txbf_ofdm[7]);
+ cdf_print("tx_txbf_cck (0..6): ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u\n",
+ stats_ptr->tx_txbf_cck[0],
+ stats_ptr->tx_txbf_cck[1],
+ stats_ptr->tx_txbf_cck[2],
+ stats_ptr->tx_txbf_cck[3],
+ stats_ptr->tx_txbf_cck[4],
+ stats_ptr->tx_txbf_cck[5],
+ stats_ptr->tx_txbf_cck[6]);
+}
+
+static void
+htt_t2h_stats_txbf_snd_buf_stats_print(
+ struct wlan_dbg_txbf_snd_stats *stats_ptr)
+{
+ cdf_print("TXBF snd Buffer Statistics:\n");
+ cdf_print("cbf_20: ");
+ cdf_print("%u, %u, %u, %u\n",
+ stats_ptr->cbf_20[0],
+ stats_ptr->cbf_20[1],
+ stats_ptr->cbf_20[2],
+ stats_ptr->cbf_20[3]);
+ cdf_print("cbf_40: ");
+ cdf_print("%u, %u, %u, %u\n",
+ stats_ptr->cbf_40[0],
+ stats_ptr->cbf_40[1],
+ stats_ptr->cbf_40[2],
+ stats_ptr->cbf_40[3]);
+ cdf_print("cbf_80: ");
+ cdf_print("%u, %u, %u, %u\n",
+ stats_ptr->cbf_80[0],
+ stats_ptr->cbf_80[1],
+ stats_ptr->cbf_80[2],
+ stats_ptr->cbf_80[3]);
+ cdf_print("sounding: ");
+ cdf_print("%u, %u, %u, %u, %u, %u, %u, %u, %u\n",
+ stats_ptr->sounding[0],
+ stats_ptr->sounding[1],
+ stats_ptr->sounding[2],
+ stats_ptr->sounding[3],
+ stats_ptr->sounding[4],
+ stats_ptr->sounding[5],
+ stats_ptr->sounding[6],
+ stats_ptr->sounding[7],
+ stats_ptr->sounding[8]);
+}
+
+static void
+htt_t2h_stats_tx_selfgen_buf_stats_print(
+ struct wlan_dbg_tx_selfgen_stats *stats_ptr)
+{
+ cdf_print("Tx selfgen Buffer Statistics:\n");
+ cdf_print(" %u su_ndpa\n",
+ stats_ptr->su_ndpa);
+ cdf_print(" %u mu_ndp\n",
+ stats_ptr->mu_ndp);
+ cdf_print(" %u mu_ndpa\n",
+ stats_ptr->mu_ndpa);
+ cdf_print(" %u mu_ndp\n",
+ stats_ptr->mu_ndp);
+ cdf_print(" %u mu_brpoll_1\n",
+ stats_ptr->mu_brpoll_1);
+ cdf_print(" %u mu_brpoll_2\n",
+ stats_ptr->mu_brpoll_2);
+ cdf_print(" %u mu_bar_1\n",
+ stats_ptr->mu_bar_1);
+ cdf_print(" %u mu_bar_2\n",
+ stats_ptr->mu_bar_2);
+ cdf_print(" %u cts_burst\n",
+ stats_ptr->cts_burst);
+ cdf_print(" %u su_ndp_err\n",
+ stats_ptr->su_ndp_err);
+ cdf_print(" %u su_ndpa_err\n",
+ stats_ptr->su_ndpa_err);
+ cdf_print(" %u mu_ndp_err\n",
+ stats_ptr->mu_ndp_err);
+ cdf_print(" %u mu_brp1_err\n",
+ stats_ptr->mu_brp1_err);
+ cdf_print(" %u mu_brp2_err\n",
+ stats_ptr->mu_brp2_err);
+}
+
+static void
+htt_t2h_stats_wifi2_error_stats_print(
+ struct wlan_dbg_wifi2_error_stats *stats_ptr)
+{
+ int i;
+
+ cdf_print("Scheduler error Statistics:\n");
+ cdf_print("urrn_stats: ");
+ cdf_print("%d, %d, %d\n",
+ stats_ptr->urrn_stats[0],
+ stats_ptr->urrn_stats[1],
+ stats_ptr->urrn_stats[2]);
+ cdf_print("flush_errs (0..%d): ",
+ WHAL_DBG_FLUSH_REASON_MAXCNT);
+ for (i = 0; i < WHAL_DBG_FLUSH_REASON_MAXCNT; i++)
+ cdf_print(" %u", stats_ptr->flush_errs[i]);
+ cdf_print("\n");
+ cdf_print("schd_stall_errs (0..3): ");
+ cdf_print("%d, %d, %d, %d\n",
+ stats_ptr->schd_stall_errs[0],
+ stats_ptr->schd_stall_errs[1],
+ stats_ptr->schd_stall_errs[2],
+ stats_ptr->schd_stall_errs[3]);
+ cdf_print("schd_cmd_result (0..%d): ",
+ WHAL_DBG_CMD_RESULT_MAXCNT);
+ for (i = 0; i < WHAL_DBG_CMD_RESULT_MAXCNT; i++)
+ cdf_print(" %u", stats_ptr->schd_cmd_result[i]);
+ cdf_print("\n");
+ cdf_print("sifs_status (0..%d): ",
+ WHAL_DBG_SIFS_STATUS_MAXCNT);
+ for (i = 0; i < WHAL_DBG_SIFS_STATUS_MAXCNT; i++)
+ cdf_print(" %u", stats_ptr->sifs_status[i]);
+ cdf_print("\n");
+ cdf_print("phy_errs (0..%d): ",
+ WHAL_DBG_PHY_ERR_MAXCNT);
+ for (i = 0; i < WHAL_DBG_PHY_ERR_MAXCNT; i++)
+ cdf_print(" %u", stats_ptr->phy_errs[i]);
+ cdf_print("\n");
+ cdf_print(" %u rx_rate_inval\n",
+ stats_ptr->rx_rate_inval);
+}
+
+static void
+htt_t2h_rx_musu_ndpa_pkts_stats_print(
+ struct rx_txbf_musu_ndpa_pkts_stats *stats_ptr)
+{
+ cdf_print("Rx TXBF MU/SU Packets and NDPA Statistics:\n");
+ cdf_print(" %u Number of TXBF MU packets received\n",
+ stats_ptr->number_mu_pkts);
+ cdf_print(" %u Number of TXBF SU packets received\n",
+ stats_ptr->number_su_pkts);
+ cdf_print(" %u Number of TXBF directed NDPA\n",
+ stats_ptr->txbf_directed_ndpa_count);
+ cdf_print(" %u Number of TXBF retried NDPA\n",
+ stats_ptr->txbf_ndpa_retry_count);
+ cdf_print(" %u Total number of TXBF NDPA\n",
+ stats_ptr->txbf_total_ndpa_count);
+}
+
+#define HTT_TICK_TO_USEC(ticks, microsec_per_tick) (ticks * microsec_per_tick)
+static inline int htt_rate_flags_to_mhz(uint8_t rate_flags)
+{
+ if (rate_flags & 0x20)
+ return 40; /* WHAL_RC_FLAG_40MHZ */
+ if (rate_flags & 0x40)
+ return 80; /* WHAL_RC_FLAG_80MHZ */
+ if (rate_flags & 0x80)
+ return 160; /* WHAL_RC_FLAG_160MHZ */
+ return 20;
+}
+
+#define HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW 64
+
+static void
+htt_t2h_tx_ppdu_bitmaps_pr(uint32_t *queued_ptr, uint32_t *acked_ptr)
+{
+ char queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
+ char acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW + 1];
+ int i, j, word;
+
+ cdf_mem_set(queued_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '0');
+ cdf_mem_set(acked_str, HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW, '-');
+ i = 0;
+ for (word = 0; word < 2; word++) {
+ uint32_t queued = *(queued_ptr + word);
+ uint32_t acked = *(acked_ptr + word);
+ for (j = 0; j < 32; j++, i++) {
+ if (queued & (1 << j)) {
+ queued_str[i] = '1';
+ acked_str[i] = (acked & (1 << j)) ? 'y' : 'N';
+ }
+ }
+ }
+ queued_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
+ acked_str[HTT_FW_STATS_MAX_BLOCK_ACK_WINDOW] = '\0';
+ cdf_print("%s\n", queued_str);
+ cdf_print("%s\n", acked_str);
+}
+
+static inline uint16_t htt_msg_read16(uint16_t *p16)
+{
+#ifdef BIG_ENDIAN_HOST
+ /*
+ * During upload, the bytes within each uint32_t word were
+ * swapped by the HIF HW. This results in the lower and upper bytes
+ * of each uint16_t to be in the correct big-endian order with
+ * respect to each other, but for each even-index uint16_t to
+ * have its position switched with its successor neighbor uint16_t.
+ * Undo this uint16_t position swapping.
+ */
+ return (((size_t) p16) & 0x2) ? *(p16 - 1) : *(p16 + 1);
+#else
+ return *p16;
+#endif
+}
+
+static inline uint8_t htt_msg_read8(uint8_t *p8)
+{
+#ifdef BIG_ENDIAN_HOST
+ /*
+ * During upload, the bytes within each uint32_t word were
+ * swapped by the HIF HW.
+ * Undo this byte swapping.
+ */
+ switch (((size_t) p8) & 0x3) {
+ case 0:
+ return *(p8 + 3);
+ case 1:
+ return *(p8 + 1);
+ case 2:
+ return *(p8 - 1);
+ default /* 3 */:
+ return *(p8 - 3);
+ }
+#else
+ return *p8;
+#endif
+}
+
+void htt_make_u8_list_str(uint32_t *aligned_data,
+ char *buffer, int space, int max_elems)
+{
+ uint8_t *p8 = (uint8_t *) aligned_data;
+ char *buf_p = buffer;
+ while (max_elems-- > 0) {
+ int bytes;
+ uint8_t val;
+
+ val = htt_msg_read8(p8);
+ if (val == 0)
+ /* not enough data to fill the reserved msg buffer*/
+ break;
+
+ bytes = cdf_snprint(buf_p, space, "%d,", val);
+ space -= bytes;
+ if (space > 0)
+ buf_p += bytes;
+ else /* not enough print buffer space for all the data */
+ break;
+ p8++;
+ }
+ if (buf_p == buffer)
+ *buf_p = '\0'; /* nothing was written */
+ else
+ *(buf_p - 1) = '\0'; /* erase the final comma */
+
+}
+
+void htt_make_u16_list_str(uint32_t *aligned_data,
+ char *buffer, int space, int max_elems)
+{
+ uint16_t *p16 = (uint16_t *) aligned_data;
+ char *buf_p = buffer;
+ while (max_elems-- > 0) {
+ int bytes;
+ uint16_t val;
+
+ val = htt_msg_read16(p16);
+ if (val == 0)
+ /* not enough data to fill the reserved msg buffer */
+ break;
+ bytes = cdf_snprint(buf_p, space, "%d,", val);
+ space -= bytes;
+ if (space > 0)
+ buf_p += bytes;
+ else /* not enough print buffer space for all the data */
+ break;
+
+ p16++;
+ }
+ if (buf_p == buffer)
+ *buf_p = '\0'; /* nothing was written */
+ else
+ *(buf_p - 1) = '\0'; /* erase the final comma */
+}
+
+void
+htt_t2h_tx_ppdu_log_print(struct ol_fw_tx_dbg_ppdu_msg_hdr *hdr,
+ struct ol_fw_tx_dbg_ppdu_base *record,
+ int length, int concise)
+{
+ int i;
+ int record_size;
+ int num_records;
+
+ record_size =
+ sizeof(*record) +
+ hdr->mpdu_bytes_array_len * sizeof(uint16_t) +
+ hdr->mpdu_msdus_array_len * sizeof(uint8_t) +
+ hdr->msdu_bytes_array_len * sizeof(uint16_t);
+ num_records = (length - sizeof(*hdr)) / record_size;
+ cdf_print("Tx PPDU log elements:\n");
+
+ for (i = 0; i < num_records; i++) {
+ uint16_t start_seq_num;
+ uint16_t start_pn_lsbs;
+ uint8_t num_mpdus;
+ uint16_t peer_id;
+ uint8_t ext_tid;
+ uint8_t rate_code;
+ uint8_t rate_flags;
+ uint8_t tries;
+ uint8_t complete;
+ uint32_t time_enqueue_us;
+ uint32_t time_completion_us;
+ uint32_t *msg_word = (uint32_t *) record;
+
+ /* fields used for both concise and complete printouts */
+ start_seq_num =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_START_SEQ_NUM_16)) &
+ OL_FW_TX_DBG_PPDU_START_SEQ_NUM_M) >>
+ OL_FW_TX_DBG_PPDU_START_SEQ_NUM_S;
+ complete =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_COMPLETE_16)) &
+ OL_FW_TX_DBG_PPDU_COMPLETE_M) >>
+ OL_FW_TX_DBG_PPDU_COMPLETE_S;
+
+ /* fields used only for complete printouts */
+ if (!concise) {
+#define BUF_SIZE 80
+ char buf[BUF_SIZE];
+ uint8_t *p8;
+ time_enqueue_us =
+ HTT_TICK_TO_USEC(record->timestamp_enqueue,
+ hdr->microsec_per_tick);
+ time_completion_us =
+ HTT_TICK_TO_USEC(record->timestamp_completion,
+ hdr->microsec_per_tick);
+
+ start_pn_lsbs =
+ ((*
+ (msg_word +
+ OL_FW_TX_DBG_PPDU_START_PN_LSBS_16)) &
+ OL_FW_TX_DBG_PPDU_START_PN_LSBS_M) >>
+ OL_FW_TX_DBG_PPDU_START_PN_LSBS_S;
+ num_mpdus =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_NUM_MPDUS_16))&
+ OL_FW_TX_DBG_PPDU_NUM_MPDUS_M) >>
+ OL_FW_TX_DBG_PPDU_NUM_MPDUS_S;
+ peer_id =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_PEER_ID_16)) &
+ OL_FW_TX_DBG_PPDU_PEER_ID_M) >>
+ OL_FW_TX_DBG_PPDU_PEER_ID_S;
+ ext_tid =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_EXT_TID_16)) &
+ OL_FW_TX_DBG_PPDU_EXT_TID_M) >>
+ OL_FW_TX_DBG_PPDU_EXT_TID_S;
+ rate_code =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_RATE_CODE_16))&
+ OL_FW_TX_DBG_PPDU_RATE_CODE_M) >>
+ OL_FW_TX_DBG_PPDU_RATE_CODE_S;
+ rate_flags =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_RATEFLAGS_16))&
+ OL_FW_TX_DBG_PPDU_RATE_FLAGS_M) >>
+ OL_FW_TX_DBG_PPDU_RATE_FLAGS_S;
+ tries =
+ ((*(msg_word + OL_FW_TX_DBG_PPDU_TRIES_16)) &
+ OL_FW_TX_DBG_PPDU_TRIES_M) >>
+ OL_FW_TX_DBG_PPDU_TRIES_S;
+
+ cdf_print(" - PPDU tx to peer %d, TID %d\n", peer_id,
+ ext_tid);
+ cdf_print
+ (" start seq num= %u, start PN LSBs= %#04x\n",
+ start_seq_num, start_pn_lsbs);
+ cdf_print
+ (" PPDU: %d MPDUs, (?) MSDUs, %d bytes\n",
+ num_mpdus,
+ /* num_msdus - not yet computed in target */
+ record->num_bytes);
+ if (complete) {
+ cdf_print
+ (" enqueued: %u, completed: %u usec)\n",
+ time_enqueue_us, time_completion_us);
+ cdf_print
+ (" %d tries, last tx used rate %d ",
+ tries, rate_code);
+ cdf_print("on %d MHz chan (flags = %#x)\n",
+ htt_rate_flags_to_mhz
+ (rate_flags), rate_flags);
+ cdf_print
+ (" enqueued and acked MPDU bitmaps:\n");
+ htt_t2h_tx_ppdu_bitmaps_pr(msg_word +
+ OL_FW_TX_DBG_PPDU_ENQUEUED_LSBS_16,
+ msg_word +
+ OL_FW_TX_DBG_PPDU_BLOCK_ACK_LSBS_16);
+ } else {
+ cdf_print
+ (" enqueued: %d us, not yet completed\n",
+ time_enqueue_us);
+ }
+ /* skip the regular msg fields to reach the tail area */
+ p8 = (uint8_t *) record;
+ p8 += sizeof(struct ol_fw_tx_dbg_ppdu_base);
+ if (hdr->mpdu_bytes_array_len) {
+ htt_make_u16_list_str((uint32_t *) p8, buf,
+ BUF_SIZE,
+ hdr->
+ mpdu_bytes_array_len);
+ cdf_print(" MPDU bytes: %s\n", buf);
+ }
+ p8 += hdr->mpdu_bytes_array_len * sizeof(uint16_t);
+ if (hdr->mpdu_msdus_array_len) {
+ htt_make_u8_list_str((uint32_t *) p8, buf,
+ BUF_SIZE,
+ hdr->mpdu_msdus_array_len);
+ cdf_print(" MPDU MSDUs: %s\n", buf);
+ }
+ p8 += hdr->mpdu_msdus_array_len * sizeof(uint8_t);
+ if (hdr->msdu_bytes_array_len) {
+ htt_make_u16_list_str((uint32_t *) p8, buf,
+ BUF_SIZE,
+ hdr->
+ msdu_bytes_array_len);
+ cdf_print(" MSDU bytes: %s\n", buf);
+ }
+ } else {
+ /* concise */
+ cdf_print("start seq num = %u ", start_seq_num);
+ cdf_print("enqueued and acked MPDU bitmaps:\n");
+ if (complete) {
+ htt_t2h_tx_ppdu_bitmaps_pr(msg_word +
+ OL_FW_TX_DBG_PPDU_ENQUEUED_LSBS_16,
+ msg_word +
+ OL_FW_TX_DBG_PPDU_BLOCK_ACK_LSBS_16);
+ } else {
+ cdf_print("(not completed)\n");
+ }
+ }
+ record = (struct ol_fw_tx_dbg_ppdu_base *)
+ (((uint8_t *) record) + record_size);
+ }
+}
+
+void htt_t2h_stats_print(uint8_t *stats_data, int concise)
+{
+ uint32_t *msg_word = (uint32_t *) stats_data;
+ enum htt_dbg_stats_type type;
+ enum htt_dbg_stats_status status;
+ int length;
+
+ type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
+ status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
+ length = HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word);
+
+ /* check that we've been given a valid stats type */
+ if (status == HTT_DBG_STATS_STATUS_SERIES_DONE) {
+ return;
+ } else if (status == HTT_DBG_STATS_STATUS_INVALID) {
+ cdf_print("Target doesn't support stats type %d\n", type);
+ return;
+ } else if (status == HTT_DBG_STATS_STATUS_ERROR) {
+ cdf_print("Target couldn't upload stats type %d (no mem?)\n",
+ type);
+ return;
+ }
+ /* got valid (though perhaps partial) stats - process them */
+ switch (type) {
+ case HTT_DBG_STATS_WAL_PDEV_TXRX:
+ {
+ struct wlan_dbg_stats *wlan_dbg_stats_ptr;
+
+ wlan_dbg_stats_ptr =
+ (struct wlan_dbg_stats *)(msg_word + 1);
+ htt_t2h_stats_pdev_stats_print(wlan_dbg_stats_ptr,
+ concise);
+ break;
+ }
+ case HTT_DBG_STATS_RX_REORDER:
+ {
+ struct rx_reorder_stats *rx_reorder_stats_ptr;
+
+ rx_reorder_stats_ptr =
+ (struct rx_reorder_stats *)(msg_word + 1);
+ htt_t2h_stats_rx_reorder_stats_print
+ (rx_reorder_stats_ptr, concise);
+ break;
+ }
+
+ case HTT_DBG_STATS_RX_RATE_INFO:
+ {
+ wlan_dbg_rx_rate_info_t *rx_phy_info;
+ rx_phy_info =
+ (wlan_dbg_rx_rate_info_t *) (msg_word + 1);
+
+ htt_t2h_stats_rx_rate_stats_print(rx_phy_info, concise);
+
+ break;
+ }
+ case HTT_DBG_STATS_RX_RATE_INFO_V2:
+ {
+ wlan_dbg_rx_rate_info_v2_t *rx_phy_info;
+ rx_phy_info =
+ (wlan_dbg_rx_rate_info_v2_t *) (msg_word + 1);
+ htt_t2h_stats_rx_rate_stats_print_v2(rx_phy_info, concise);
+ break;
+ }
+ case HTT_DBG_STATS_TX_PPDU_LOG:
+ {
+ struct ol_fw_tx_dbg_ppdu_msg_hdr *hdr;
+ struct ol_fw_tx_dbg_ppdu_base *record;
+
+ if (status == HTT_DBG_STATS_STATUS_PARTIAL
+ && length == 0) {
+ cdf_print
+ ("HTT_DBG_STATS_TX_PPDU_LOG -- length = 0!\n");
+ break;
+ }
+ hdr =
+ (struct ol_fw_tx_dbg_ppdu_msg_hdr *)(msg_word + 1);
+ record = (struct ol_fw_tx_dbg_ppdu_base *)(hdr + 1);
+ htt_t2h_tx_ppdu_log_print(hdr, record, length, concise);
+ }
+ break;
+ case HTT_DBG_STATS_TX_RATE_INFO:
+ {
+ wlan_dbg_tx_rate_info_t *tx_rate_info;
+ tx_rate_info =
+ (wlan_dbg_tx_rate_info_t *) (msg_word + 1);
+
+ htt_t2h_stats_tx_rate_stats_print(tx_rate_info, concise);
+
+ break;
+ }
+ case HTT_DBG_STATS_TX_RATE_INFO_V2:
+ {
+ wlan_dbg_tx_rate_info_v2_t *tx_rate_info;
+ tx_rate_info =
+ (wlan_dbg_tx_rate_info_v2_t *) (msg_word + 1);
+ htt_t2h_stats_tx_rate_stats_print_v2(tx_rate_info, concise);
+ break;
+ }
+ case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
+ {
+ struct rx_remote_buffer_mgmt_stats *rx_rem_buf;
+
+ rx_rem_buf = (struct rx_remote_buffer_mgmt_stats *)(msg_word + 1);
+ htt_t2h_stats_rx_rem_buf_stats_print(rx_rem_buf, concise);
+ break;
+ }
+ case HTT_DBG_STATS_TXBF_INFO:
+ {
+ struct wlan_dbg_txbf_data_stats *txbf_info_buf;
+
+ txbf_info_buf =
+ (struct wlan_dbg_txbf_data_stats *)(msg_word + 1);
+ htt_t2h_stats_txbf_info_buf_stats_print(txbf_info_buf);
+ break;
+ }
+ case HTT_DBG_STATS_SND_INFO:
+ {
+ struct wlan_dbg_txbf_snd_stats *txbf_snd_buf;
+
+ txbf_snd_buf =
+ (struct wlan_dbg_txbf_snd_stats *)(msg_word + 1);
+ htt_t2h_stats_txbf_snd_buf_stats_print(txbf_snd_buf);
+ break;
+ }
+ case HTT_DBG_STATS_TX_SELFGEN_INFO:
+ {
+ struct wlan_dbg_tx_selfgen_stats *tx_selfgen_buf;
+
+ tx_selfgen_buf =
+ (struct wlan_dbg_tx_selfgen_stats *)(msg_word + 1);
+ htt_t2h_stats_tx_selfgen_buf_stats_print(tx_selfgen_buf);
+ break;
+ }
+ case HTT_DBG_STATS_ERROR_INFO:
+ {
+ struct wlan_dbg_wifi2_error_stats *wifi2_error_buf;
+
+ wifi2_error_buf =
+ (struct wlan_dbg_wifi2_error_stats *)(msg_word + 1);
+ htt_t2h_stats_wifi2_error_stats_print(wifi2_error_buf);
+ break;
+ }
+ case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
+ {
+ struct rx_txbf_musu_ndpa_pkts_stats *rx_musu_ndpa_stats;
+
+ rx_musu_ndpa_stats = (struct rx_txbf_musu_ndpa_pkts_stats *)
+ (msg_word + 1);
+ htt_t2h_rx_musu_ndpa_pkts_stats_print(rx_musu_ndpa_stats);
+ break;
+ }
+ default:
+ break;
+ }
+}
diff --git a/dp/htt/htt_h2t.c b/dp/htt/htt_h2t.c
new file mode 100644
index 000000000000..cbfa4a7523d0
--- /dev/null
+++ b/dp/htt/htt_h2t.c
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_h2t.c
+ * @brief Provide functions to send host->target HTT messages.
+ * @details
+ * This file contains functions related to host->target HTT messages.
+ * There are a couple aspects of this host->target messaging:
+ * 1. This file contains the function that is called by HTC when
+ * a host->target send completes.
+ * This send-completion callback is primarily relevant to HL,
+ * to invoke the download scheduler to set up a new download,
+ * and optionally free the tx frame whose download is completed.
+ * For both HL and LL, this completion callback frees up the
+ * HTC_PACKET object used to specify the download.
+ * 2. This file contains functions for creating messages to send
+ * from the host to the target.
+ */
+
+#include <cdf_memory.h> /* cdf_mem_copy */
+#include <cdf_nbuf.h> /* cdf_nbuf_map_single */
+#include <htc_api.h> /* HTC_PACKET */
+#include <htc.h> /* HTC_HDR_ALIGNMENT_PADDING */
+#include <htt.h> /* HTT host->target msg defs */
+#include <ol_txrx_htt_api.h> /* ol_tx_completion_handler, htt_tx_status */
+#include <ol_htt_tx_api.h>
+
+#include <htt_internal.h>
+
+#define HTT_MSG_BUF_SIZE(msg_bytes) \
+ ((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
+#endif
+
+static void
+htt_h2t_send_complete_free_netbuf(void *pdev, A_STATUS status,
+ cdf_nbuf_t netbuf, uint16_t msdu_id)
+{
+ cdf_nbuf_free(netbuf);
+}
+
+void htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
+{
+ void (*send_complete_part2)(void *pdev, A_STATUS status,
+ cdf_nbuf_t msdu, uint16_t msdu_id);
+ struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+ struct htt_htc_pkt *htt_pkt;
+ cdf_nbuf_t netbuf;
+
+ send_complete_part2 = htc_pkt->pPktContext;
+
+ htt_pkt = container_of(htc_pkt, struct htt_htc_pkt, htc_pkt);
+
+ /* process (free or keep) the netbuf that held the message */
+ netbuf = (cdf_nbuf_t) htc_pkt->pNetBufContext;
+ if (send_complete_part2 != NULL) {
+ send_complete_part2(htt_pkt->pdev_ctxt, htc_pkt->Status, netbuf,
+ htt_pkt->msdu_id);
+ }
+ /* free the htt_htc_pkt / HTC_PACKET object */
+ htt_htc_pkt_free(pdev, htt_pkt);
+}
+
+HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt)
+{
+/* FIX THIS */
+ return HTC_SEND_FULL_KEEP;
+}
+
+#if defined(HELIUMPLUS_PADDR64)
+A_STATUS htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev)
+{
+ A_STATUS rc = A_OK;
+
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ u_int32_t *msg_word;
+ struct htt_tx_frag_desc_bank_cfg_t *bank_cfg;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return A_ERROR; /* failure */
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ msg = cdf_nbuf_alloc(
+ pdev->osdev,
+ HTT_MSG_BUF_SIZE(sizeof(struct htt_tx_frag_desc_bank_cfg_t)),
+ /* reserve room for the HTC header */
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, true);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_ERROR; /* failure */
+ }
+
+ /*
+ * Set the length of the message.
+ * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+ * separately during the below call to adf_nbuf_push_head.
+ * The contribution from the HTC header is added separately inside HTC.
+ */
+ cdf_nbuf_put_tail(msg, sizeof(struct htt_tx_frag_desc_bank_cfg_t));
+
+ /* fill in the message contents */
+ msg_word = (u_int32_t *) cdf_nbuf_data(msg);
+
+ memset(msg_word, 0 , sizeof(struct htt_tx_frag_desc_bank_cfg_t));
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG);
+
+ bank_cfg = (struct htt_tx_frag_desc_bank_cfg_t *)msg_word;
+
+ /** @note @todo Hard coded to 0 Assuming just one pdev for now.*/
+ HTT_H2T_FRAG_DESC_BANK_PDEVID_SET(*msg_word, 0);
+ /** @note Hard coded to 1.*/
+ HTT_H2T_FRAG_DESC_BANK_NUM_BANKS_SET(*msg_word, 1);
+ HTT_H2T_FRAG_DESC_BANK_DESC_SIZE_SET(*msg_word, pdev->frag_descs.size);
+ HTT_H2T_FRAG_DESC_BANK_SWAP_SET(*msg_word, 0);
+
+ /** Bank specific data structure.*/
+#if HTT_PADDR64
+ bank_cfg->bank_base_address[0].lo =
+ pdev->frag_descs.desc_pages.dma_pages->page_p_addr;
+ bank_cfg->bank_base_address[0].hi = 0;
+#else /* ! HTT_PADDR64 */
+ bank_cfg->bank_base_address[0] =
+ pdev->frag_descs.desc_pages.dma_pages->page_p_addr;
+#endif /* HTT_PADDR64 */
+ /* Logical Min index */
+ HTT_H2T_FRAG_DESC_BANK_MIN_IDX_SET(bank_cfg->bank_info[0], 0);
+ /* Logical Max index */
+ HTT_H2T_FRAG_DESC_BANK_MAX_IDX_SET(bank_cfg->bank_info[0],
+ pdev->frag_descs.pool_elems-1);
+
+ SET_HTC_PACKET_INFO_TX(
+ &pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ rc = htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return rc;
+}
+
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return A_ERROR; /* failure */
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for the HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ true);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_ERROR; /* failure */
+ }
+
+ /*
+ * Set the length of the message.
+ * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+ * separately during the below call to cdf_nbuf_push_head.
+ * The contribution from the HTC header is added separately inside HTC.
+ */
+ cdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg), cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+ if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+ htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+ return A_OK;
+}
+
+A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+ int enable_ctrl_data, enable_mgmt_data,
+ enable_null_data, enable_phy_data, enable_hdr,
+ enable_ppdu_start, enable_ppdu_end;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return A_ERROR; /* failure */
+
+ /* show that this is not a tx frame download
+ (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for the HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_RX_RING_CFG_BYTES(1)),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ true);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_ERROR; /* failure */
+ }
+ /*
+ * Set the length of the message.
+ * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+ * separately during the below call to cdf_nbuf_push_head.
+ * The contribution from the HTC header is added separately inside HTC.
+ */
+ cdf_nbuf_put_tail(msg, HTT_RX_RING_CFG_BYTES(1));
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RX_RING_CFG);
+ HTT_RX_RING_CFG_NUM_RINGS_SET(*msg_word, 1);
+
+ msg_word++;
+ *msg_word = 0;
+#if HTT_PADDR64
+ HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_LO_SET(*msg_word,
+ pdev->rx_ring.alloc_idx.paddr);
+ msg_word++;
+ HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_HI_SET(*msg_word, 0);
+#else /* ! HTT_PADDR64 */
+ HTT_RX_RING_CFG_IDX_SHADOW_REG_PADDR_SET(*msg_word,
+ pdev->rx_ring.alloc_idx.paddr);
+#endif /* HTT_PADDR64 */
+
+ msg_word++;
+ *msg_word = 0;
+#if HTT_PADDR64
+ HTT_RX_RING_CFG_BASE_PADDR_LO_SET(*msg_word,
+ pdev->rx_ring.base_paddr);
+ msg_word++;
+ HTT_RX_RING_CFG_BASE_PADDR_HI_SET(*msg_word, 0);
+#else /* ! HTT_PADDR64 */
+ HTT_RX_RING_CFG_BASE_PADDR_SET(*msg_word, pdev->rx_ring.base_paddr);
+#endif /* HTT_PADDR64 */
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_LEN_SET(*msg_word, pdev->rx_ring.size);
+ HTT_RX_RING_CFG_BUF_SZ_SET(*msg_word, HTT_RX_BUF_SIZE);
+
+/* FIX THIS: if the FW creates a complete translated rx descriptor,
+ * then the MAC DMA of the HW rx descriptor should be disabled.
+ */
+ msg_word++;
+ *msg_word = 0;
+#ifndef REMOVE_PKT_LOG
+ if (ol_cfg_is_packet_log_enabled(pdev->ctrl_pdev)) {
+ enable_ctrl_data = 1;
+ enable_mgmt_data = 1;
+ enable_null_data = 1;
+ enable_phy_data = 1;
+ enable_hdr = 1;
+ enable_ppdu_start = 1;
+ enable_ppdu_end = 1;
+ /* Disable ASPM when pkt log is enabled */
+ cdf_print("Pkt log is enabled\n");
+ htt_htc_disable_aspm();
+ } else {
+ cdf_print("Pkt log is disabled\n");
+ enable_ctrl_data = 0;
+ enable_mgmt_data = 0;
+ enable_null_data = 0;
+ enable_phy_data = 0;
+ enable_hdr = 0;
+ enable_ppdu_start = 0;
+ enable_ppdu_end = 0;
+ }
+#else
+ enable_ctrl_data = 0;
+ enable_mgmt_data = 0;
+ enable_null_data = 0;
+ enable_phy_data = 0;
+ enable_hdr = 0;
+ enable_ppdu_start = 0;
+ enable_ppdu_end = 0;
+#endif
+ HTT_RX_RING_CFG_ENABLED_802_11_HDR_SET(*msg_word, enable_hdr);
+ HTT_RX_RING_CFG_ENABLED_MSDU_PAYLD_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_PPDU_START_SET(*msg_word, enable_ppdu_start);
+ HTT_RX_RING_CFG_ENABLED_PPDU_END_SET(*msg_word, enable_ppdu_end);
+ HTT_RX_RING_CFG_ENABLED_MPDU_START_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_MPDU_END_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_MSDU_START_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_MSDU_END_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_RX_ATTN_SET(*msg_word, 1);
+ /* always present? */
+ HTT_RX_RING_CFG_ENABLED_FRAG_INFO_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_UCAST_SET(*msg_word, 1);
+ HTT_RX_RING_CFG_ENABLED_MCAST_SET(*msg_word, 1);
+ /* Must change to dynamic enable at run time
+ * rather than at compile time
+ */
+ HTT_RX_RING_CFG_ENABLED_CTRL_SET(*msg_word, enable_ctrl_data);
+ HTT_RX_RING_CFG_ENABLED_MGMT_SET(*msg_word, enable_mgmt_data);
+ HTT_RX_RING_CFG_ENABLED_NULL_SET(*msg_word, enable_null_data);
+ HTT_RX_RING_CFG_ENABLED_PHY_SET(*msg_word, enable_phy_data);
+ HTT_RX_RING_CFG_IDX_INIT_VAL_SET(*msg_word,
+ *pdev->rx_ring.alloc_idx.vaddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_OFFSET_802_11_HDR_SET(*msg_word,
+ RX_DESC_HDR_STATUS_OFFSET32);
+ HTT_RX_RING_CFG_OFFSET_MSDU_PAYLD_SET(*msg_word,
+ HTT_RX_DESC_RESERVATION32);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_OFFSET_PPDU_START_SET(*msg_word,
+ RX_DESC_PPDU_START_OFFSET32);
+ HTT_RX_RING_CFG_OFFSET_PPDU_END_SET(*msg_word,
+ RX_DESC_PPDU_END_OFFSET32);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_OFFSET_MPDU_START_SET(*msg_word,
+ RX_DESC_MPDU_START_OFFSET32);
+ HTT_RX_RING_CFG_OFFSET_MPDU_END_SET(*msg_word,
+ RX_DESC_MPDU_END_OFFSET32);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_OFFSET_MSDU_START_SET(*msg_word,
+ RX_DESC_MSDU_START_OFFSET32);
+ HTT_RX_RING_CFG_OFFSET_MSDU_END_SET(*msg_word,
+ RX_DESC_MSDU_END_OFFSET32);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_RX_RING_CFG_OFFSET_RX_ATTN_SET(*msg_word,
+ RX_DESC_ATTN_OFFSET32);
+ HTT_RX_RING_CFG_OFFSET_FRAG_INFO_SET(*msg_word,
+ RX_DESC_FRAG_INFO_OFFSET32);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ HTC_TX_PACKET_TAG_RUNTIME_PUT);
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+ if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+ htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+ return A_OK;
+}
+
+int
+htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
+ uint32_t stats_type_upload_mask,
+ uint32_t stats_type_reset_mask,
+ uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+ uint16_t htc_tag = 1;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -EINVAL; /* failure */
+
+ if (stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
+ stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
+ /* FIX THIS - add more details? */
+ cdf_print("%#x %#x stats not supported\n",
+ stats_type_upload_mask, stats_type_reset_mask);
+ return -EINVAL; /* failure */
+ }
+
+ if (stats_type_reset_mask)
+ htc_tag = HTC_TX_PACKET_TAG_RUNTIME_PUT;
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+
+ msg = cdf_nbuf_alloc(pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_H2T_STATS_REQ_MSG_SZ),
+ /* reserve room for HTC header */
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return -EINVAL; /* failure */
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_H2T_STATS_REQ_MSG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_STATS_REQ);
+ HTT_H2T_STATS_REQ_UPLOAD_TYPES_SET(*msg_word, stats_type_upload_mask);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_H2T_STATS_REQ_RESET_TYPES_SET(*msg_word, stats_type_reset_mask);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_H2T_STATS_REQ_CFG_VAL_SET(*msg_word, cfg_val);
+ HTT_H2T_STATS_REQ_CFG_STAT_TYPE_SET(*msg_word, cfg_stat_type);
+
+ /* cookie LSBs */
+ msg_word++;
+ *msg_word = cookie & 0xffffffff;
+
+ /* cookie MSBs */
+ msg_word++;
+ *msg_word = cookie >> 32;
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ htc_tag); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+ if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+ htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+ return 0;
+}
+
+A_STATUS htt_h2t_sync_msg(struct htt_pdev_t *pdev, uint8_t sync_cnt)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return A_NO_MEMORY;
+
+ /* show that this is not a tx frame download
+ (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_H2T_SYNC_MSG_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_H2T_SYNC_MSG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SYNC);
+ HTT_H2T_SYNC_COUNT_SET(*msg_word, sync_cnt);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ HTC_TX_PACKET_TAG_RUNTIME_PUT);
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+ if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+ htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+ return A_OK;
+}
+
+int
+htt_h2t_aggr_cfg_msg(struct htt_pdev_t *pdev,
+ int max_subfrms_ampdu, int max_subfrms_amsdu)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -EINVAL; /* failure */
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_AGGR_CFG_MSG_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return -EINVAL; /* failure */
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_AGGR_CFG_MSG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_AGGR_CFG);
+
+ if (max_subfrms_ampdu && (max_subfrms_ampdu <= 64)) {
+ HTT_AGGR_CFG_MAX_NUM_AMPDU_SUBFRM_SET(*msg_word,
+ max_subfrms_ampdu);
+ }
+
+ if (max_subfrms_amsdu && (max_subfrms_amsdu < 32)) {
+ HTT_AGGR_CFG_MAX_NUM_AMSDU_SUBFRM_SET(*msg_word,
+ max_subfrms_amsdu);
+ }
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ HTC_TX_PACKET_TAG_RUNTIME_PUT);
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+#ifdef ATH_11AC_TXCOMPACT
+ if (htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt) == A_OK)
+ htt_htc_misc_pkt_list_add(pdev, pkt);
+#else
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+#endif
+
+ return 0;
+}
+
+#ifdef IPA_OFFLOAD
+/**
+ * htt_h2t_ipa_uc_rsc_cfg_msg() - Send WDI IPA config message to firmware
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ * A_NO_MEMORY No memory fail
+ */
+#ifdef QCA_WIFI_2_0
+/* Rome Support only WDI 1.0 */
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return A_NO_MEMORY;
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
+ pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ HTC_TX_PACKET_TAG_RUNTIME_PUT);
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+#else
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -A_NO_MEMORY;
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev, HTT_MSG_BUF_SIZE(HTT_WDI_IPA_CFG_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return -A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_WDI_IPA_CFG_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_PKT_POOL_SIZE_SET(*msg_word,
+ pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_CFG);
+
+ msg_word++;
+ *msg_word = 0;
+ /* TX COMP RING BASE LO */
+ HTT_WDI_IPA_CFG_TX_COMP_RING_BASE_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_base.paddr);
+ msg_word++;
+ *msg_word = 0;
+ /* TX COMP RING BASE HI, NONE */
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_COMP_WR_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_comp_idx_paddr);
+ msg_word++;
+ *msg_word = 0;
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_TX_CE_WR_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr);
+ msg_word++;
+ *msg_word = 0;
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_BASE_ADDR_HI_SET(*msg_word,
+ 0);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RING_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_RD_IDX_ADDR_HI_SET(*msg_word,
+ 0);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx_rdy_idx_paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_IND_WR_IDX_ADDR_HI_SET(*msg_word,
+ 0);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_BASE_ADDR_HI_SET(*msg_word,
+ 0);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_SIZE_SET(*msg_word,
+ (unsigned int)ol_cfg_ipa_uc_rx_ind_ring_size(pdev->ctrl_pdev));
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_RD_IDX_ADDR_HI_SET(*msg_word,
+ 0);
+
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_LO_SET(*msg_word,
+ (unsigned int)pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr);
+ msg_word++;
+ *msg_word = 0;
+ HTT_WDI_IPA_CFG_RX_RING2_WR_IDX_ADDR_HI_SET(*msg_word,
+ 0);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+#endif
+
+/**
+ * htt_h2t_ipa_uc_set_active() - Propagate WDI path enable/disable to firmware
+ * @pdev: handle to the HTT instance
+ * @uc_active: WDI UC path enable or not
+ * @is_tx: TX path or RX path
+ *
+ * Return: 0 success
+ * A_NO_MEMORY No memory fail
+ */
+int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
+ bool uc_active, bool is_tx)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+ uint8_t active_target = 0;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -A_NO_MEMORY;
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return -A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ if (uc_active && is_tx)
+ active_target = HTT_WDI_IPA_OPCODE_TX_RESUME;
+ else if (!uc_active && is_tx)
+ active_target = HTT_WDI_IPA_OPCODE_TX_SUSPEND;
+ else if (uc_active && !is_tx)
+ active_target = HTT_WDI_IPA_OPCODE_RX_RESUME;
+ else if (!uc_active && !is_tx)
+ active_target = HTT_WDI_IPA_OPCODE_RX_SUSPEND;
+
+ HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word, active_target);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+
+/**
+ * htt_h2t_ipa_uc_get_stats() - WDI UC state query request to firmware
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ * A_NO_MEMORY No memory fail
+ */
+int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
+{
+ struct htt_htc_pkt *pkt;
+ cdf_nbuf_t msg;
+ uint32_t *msg_word;
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -A_NO_MEMORY;
+
+ /* show that this is not a tx frame download
+ * (not required, but helpful)
+ */
+ pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
+ pkt->pdev_ctxt = NULL; /* not used during send-done callback */
+
+ /* reserve room for HTC header */
+ msg = cdf_nbuf_alloc(pdev->osdev,
+ HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),
+ HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4,
+ false);
+ if (!msg) {
+ htt_htc_pkt_free(pdev, pkt);
+ return -A_NO_MEMORY;
+ }
+ /* set the length of the message */
+ cdf_nbuf_put_tail(msg, HTT_WDI_IPA_OP_REQUEST_SZ);
+
+ /* fill in the message contents */
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+
+ /* rewind beyond alignment pad to get to the HTC header reserved area */
+ cdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+ *msg_word = 0;
+ HTT_WDI_IPA_OP_REQUEST_OP_CODE_SET(*msg_word,
+ HTT_WDI_IPA_OPCODE_DBG_STATS);
+ HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_WDI_IPA_OP_REQ);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ htt_h2t_send_complete_free_netbuf,
+ cdf_nbuf_data(msg),
+ cdf_nbuf_len(msg),
+ pdev->htc_endpoint,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+
+ htc_send_pkt(pdev->htc_pdev, &pkt->htc_pkt);
+
+ return A_OK;
+}
+#endif /* IPA_OFFLOAD */
diff --git a/dp/htt/htt_internal.h b/dp/htt/htt_internal.h
new file mode 100644
index 000000000000..28b1e08b3da6
--- /dev/null
+++ b/dp/htt/htt_internal.h
@@ -0,0 +1,628 @@
+/*
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HTT_INTERNAL__H_
+#define _HTT_INTERNAL__H_
+
+#include <athdefs.h> /* A_STATUS */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_util.h> /* cdf_assert */
+#include <htc_api.h> /* HTC_PACKET */
+
+#include <htt_types.h>
+
+#ifndef offsetof
+#define offsetof(type, field) ((size_t)(&((type *)0)->field))
+#endif
+
+#undef MS
+#define MS(_v, _f) (((_v) & _f ## _MASK) >> _f ## _LSB)
+#undef SM
+#define SM(_v, _f) (((_v) << _f ## _LSB) & _f ## _MASK)
+#undef WO
+#define WO(_f) ((_f ## _OFFSET) >> 2)
+
+#define GET_FIELD(_addr, _f) MS(*((A_UINT32 *)(_addr) + WO(_f)), _f)
+
+#include <rx_desc.h>
+#include <wal_rx_desc.h> /* struct rx_attention, etc */
+
+struct htt_host_fw_desc_base {
+ union {
+ struct fw_rx_desc_base val;
+ A_UINT32 dummy_pad; /* make sure it is DOWRD aligned */
+ } u;
+};
+
+/*
+ * This struct defines the basic descriptor information used by host,
+ * which is written either by the 11ac HW MAC into the host Rx data
+ * buffer ring directly or generated by FW and copied from Rx indication
+ */
+#define RX_HTT_HDR_STATUS_LEN 64
+struct htt_host_rx_desc_base {
+ struct htt_host_fw_desc_base fw_desc;
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ char rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+};
+
+#define RX_STD_DESC_ATTN_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, attention))
+#define RX_STD_DESC_FRAG_INFO_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, frag_info))
+#define RX_STD_DESC_MPDU_START_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, mpdu_start))
+#define RX_STD_DESC_MSDU_START_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, msdu_start))
+#define RX_STD_DESC_MSDU_END_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, msdu_end))
+#define RX_STD_DESC_MPDU_END_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, mpdu_end))
+#define RX_STD_DESC_PPDU_START_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, ppdu_start))
+#define RX_STD_DESC_PPDU_END_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, ppdu_end))
+#define RX_STD_DESC_HDR_STATUS_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, rx_hdr_status))
+
+#define RX_STD_DESC_FW_MSDU_OFFSET \
+ (offsetof(struct htt_host_rx_desc_base, fw_desc))
+
+#define RX_STD_DESC_SIZE (sizeof(struct htt_host_rx_desc_base))
+
+#define RX_DESC_ATTN_OFFSET32 (RX_STD_DESC_ATTN_OFFSET >> 2)
+#define RX_DESC_FRAG_INFO_OFFSET32 (RX_STD_DESC_FRAG_INFO_OFFSET >> 2)
+#define RX_DESC_MPDU_START_OFFSET32 (RX_STD_DESC_MPDU_START_OFFSET >> 2)
+#define RX_DESC_MSDU_START_OFFSET32 (RX_STD_DESC_MSDU_START_OFFSET >> 2)
+#define RX_DESC_MSDU_END_OFFSET32 (RX_STD_DESC_MSDU_END_OFFSET >> 2)
+#define RX_DESC_MPDU_END_OFFSET32 (RX_STD_DESC_MPDU_END_OFFSET >> 2)
+#define RX_DESC_PPDU_START_OFFSET32 (RX_STD_DESC_PPDU_START_OFFSET >> 2)
+#define RX_DESC_PPDU_END_OFFSET32 (RX_STD_DESC_PPDU_END_OFFSET >> 2)
+#define RX_DESC_HDR_STATUS_OFFSET32 (RX_STD_DESC_HDR_STATUS_OFFSET >> 2)
+
+#define RX_STD_DESC_SIZE_DWORD (RX_STD_DESC_SIZE >> 2)
+
+/*
+ * Make sure there is a minimum headroom provided in the rx netbufs
+ * for use by the OS shim and OS and rx data consumers.
+ */
+#define HTT_RX_BUF_OS_MIN_HEADROOM 32
+#define HTT_RX_STD_DESC_RESERVATION \
+ ((HTT_RX_BUF_OS_MIN_HEADROOM > RX_STD_DESC_SIZE) ? \
+ HTT_RX_BUF_OS_MIN_HEADROOM : RX_STD_DESC_SIZE)
+#define HTT_RX_DESC_RESERVATION32 \
+ (HTT_RX_STD_DESC_RESERVATION >> 2)
+
+#define HTT_RX_DESC_ALIGN_MASK 7 /* 8-byte alignment */
+#ifdef DEBUG_RX_RING_BUFFER
+#define HTT_RX_RING_BUFF_DBG_LIST 1024
+struct rx_buf_debug {
+ uint32_t paddr;
+ void *vaddr;
+ bool in_use;
+};
+#endif
+static inline struct htt_host_rx_desc_base *htt_rx_desc(cdf_nbuf_t msdu)
+{
+ return (struct htt_host_rx_desc_base *)
+ (((size_t) (cdf_nbuf_head(msdu) + HTT_RX_DESC_ALIGN_MASK)) &
+ ~HTT_RX_DESC_ALIGN_MASK);
+}
+
+#if defined(FEATURE_LRO)
+/**
+ * htt_print_rx_desc_lro() - print LRO information in the rx
+ * descriptor
+ * @rx_desc: HTT rx descriptor
+ *
+ * Prints the LRO related fields in the HTT rx descriptor
+ *
+ * Return: none
+ */
+static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
+{
+ cdf_print
+ ("----------------------RX DESC LRO----------------------\n");
+ cdf_print("msdu_end.lro_eligible:0x%x\n",
+ rx_desc->msdu_end.lro_eligible);
+ cdf_print("msdu_start.tcp_only_ack:0x%x\n",
+ rx_desc->msdu_start.tcp_only_ack);
+ cdf_print("msdu_end.tcp_udp_chksum:0x%x\n",
+ rx_desc->msdu_end.tcp_udp_chksum);
+ cdf_print("msdu_end.tcp_seq_number:0x%x\n",
+ rx_desc->msdu_end.tcp_seq_number);
+ cdf_print("msdu_end.tcp_ack_number:0x%x\n",
+ rx_desc->msdu_end.tcp_ack_number);
+ cdf_print("msdu_start.tcp_proto:0x%x\n",
+ rx_desc->msdu_start.tcp_proto);
+ cdf_print("msdu_start.ipv6_proto:0x%x\n",
+ rx_desc->msdu_start.ipv6_proto);
+ cdf_print("msdu_start.ipv4_proto:0x%x\n",
+ rx_desc->msdu_start.ipv4_proto);
+ cdf_print("msdu_start.l3_offset:0x%x\n",
+ rx_desc->msdu_start.l3_offset);
+ cdf_print("msdu_start.l4_offset:0x%x\n",
+ rx_desc->msdu_start.l4_offset);
+ cdf_print("msdu_start.flow_id_toeplitz:0x%x\n",
+ rx_desc->msdu_start.flow_id_toeplitz);
+ cdf_print
+ ("---------------------------------------------------------\n");
+}
+
+/**
+ * htt_print_rx_desc_lro() - extract LRO information from the rx
+ * descriptor
+ * @msdu: network buffer
+ * @rx_desc: HTT rx descriptor
+ *
+ * Extracts the LRO related fields from the HTT rx descriptor
+ * and stores them in the network buffer's control block
+ *
+ * Return: none
+ */
+static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+ struct htt_host_rx_desc_base *rx_desc)
+{
+ NBUF_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
+ if (rx_desc->msdu_end.lro_eligible) {
+ NBUF_TCP_PURE_ACK(msdu) = rx_desc->msdu_start.tcp_only_ack;
+ NBUF_TCP_CHKSUM(msdu) = rx_desc->msdu_end.tcp_udp_chksum;
+ NBUF_TCP_SEQ_NUM(msdu) = rx_desc->msdu_end.tcp_seq_number;
+ NBUF_TCP_ACK_NUM(msdu) = rx_desc->msdu_end.tcp_ack_number;
+ NBUF_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
+ NBUF_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
+ NBUF_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
+ NBUF_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
+ NBUF_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
+ NBUF_FLOW_ID_TOEPLITZ(msdu) =
+ rx_desc->msdu_start.flow_id_toeplitz;
+ }
+}
+#else
+static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
+{}
+static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
+ struct htt_host_rx_desc_base *rx_desc) {}
+#endif /* FEATURE_LRO */
+
+static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
+{
+ cdf_print
+ ("----------------------RX DESC----------------------------\n");
+ cdf_print("attention: %#010x\n",
+ (unsigned int)(*(uint32_t *) &rx_desc->attention));
+ cdf_print("frag_info: %#010x\n",
+ (unsigned int)(*(uint32_t *) &rx_desc->frag_info));
+ cdf_print("mpdu_start: %#010x %#010x %#010x\n",
+ (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[0]),
+ (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[1]),
+ (unsigned int)(((uint32_t *) &rx_desc->mpdu_start)[2]));
+ cdf_print("msdu_start: %#010x %#010x %#010x\n",
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[0]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[1]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_start)[2]));
+ cdf_print("msdu_end: %#010x %#010x %#010x %#010x %#010x\n",
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[0]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[1]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[2]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[3]),
+ (unsigned int)(((uint32_t *) &rx_desc->msdu_end)[4]));
+ cdf_print("mpdu_end: %#010x\n",
+ (unsigned int)(*(uint32_t *) &rx_desc->mpdu_end));
+ cdf_print("ppdu_start: " "%#010x %#010x %#010x %#010x %#010x\n"
+ "%#010x %#010x %#010x %#010x %#010x\n",
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[0]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[1]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[2]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[3]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[4]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[5]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[6]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[7]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[8]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_start)[9]));
+ cdf_print("ppdu_end:" "%#010x %#010x %#010x %#010x %#010x\n"
+ "%#010x %#010x %#010x %#010x %#010x\n"
+ "%#010x,%#010x %#010x %#010x %#010x\n"
+ "%#010x %#010x %#010x %#010x %#010x\n" "%#010x %#010x\n",
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[0]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[1]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[2]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[3]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[4]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[5]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[6]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[7]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[8]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[9]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[10]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[11]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[12]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[13]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[14]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[15]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[16]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[17]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[18]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[19]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[20]),
+ (unsigned int)(((uint32_t *) &rx_desc->ppdu_end)[21]));
+ cdf_print
+ ("---------------------------------------------------------\n");
+}
+
+#ifndef HTT_ASSERT_LEVEL
+#define HTT_ASSERT_LEVEL 3
+#endif
+
+#define HTT_ASSERT_ALWAYS(condition) cdf_assert_always((condition))
+
+#define HTT_ASSERT0(condition) cdf_assert((condition))
+#if HTT_ASSERT_LEVEL > 0
+#define HTT_ASSERT1(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT1(condition)
+#endif
+
+#if HTT_ASSERT_LEVEL > 1
+#define HTT_ASSERT2(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT2(condition)
+#endif
+
+#if HTT_ASSERT_LEVEL > 2
+#define HTT_ASSERT3(condition) cdf_assert((condition))
+#else
+#define HTT_ASSERT3(condition)
+#endif
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * HTT_MAX_SEND_QUEUE_DEPTH -
+ * How many packets HTC should allow to accumulate in a send queue
+ * before calling the EpSendFull callback to see whether to retain
+ * or drop packets.
+ * This is not relevant for LL, where tx descriptors should be immediately
+ * downloaded to the target.
+ * This is not very relevant for HL either, since it is anticipated that
+ * the HL tx download scheduler will not work this far in advance - rather,
+ * it will make its decisions just-in-time, so it can be responsive to
+ * changing conditions.
+ * Hence, this queue depth threshold spec is mostly just a formality.
+ */
+#define HTT_MAX_SEND_QUEUE_DEPTH 64
+
+#define IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
+
+/* FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+#ifdef BIG_ENDIAN_HOST
+/*
+ * big-endian: bytes within a 4-byte "word" are swapped:
+ * pre-swap post-swap
+ * index index
+ * 0 3
+ * 1 2
+ * 2 1
+ * 3 0
+ * 4 7
+ * 5 6
+ * etc.
+ * To compute the post-swap index from the pre-swap index, compute
+ * the byte offset for the start of the word (index & ~0x3) and add
+ * the swapped byte offset within the word (3 - (index & 0x3)).
+ */
+#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) (((idx) & ~0x3) + (3 - ((idx) & 0x3)))
+#else
+/* little-endian: no adjustment needed */
+#define HTT_ENDIAN_BYTE_IDX_SWAP(idx) idx
+#endif
+
+#define HTT_TX_MUTEX_INIT(_mutex) \
+ cdf_spinlock_init(_mutex)
+
+#define HTT_TX_MUTEX_ACQUIRE(_mutex) \
+ cdf_spin_lock_bh(_mutex)
+
+#define HTT_TX_MUTEX_RELEASE(_mutex) \
+ cdf_spin_unlock_bh(_mutex)
+
+#define HTT_TX_MUTEX_DESTROY(_mutex) \
+ cdf_spinlock_destroy(_mutex)
+
+#define HTT_TX_DESC_PADDR(_pdev, _tx_desc_vaddr) \
+ ((_pdev)->tx_descs.pool_paddr + (uint32_t) \
+ ((char *)(_tx_desc_vaddr) - \
+ (char *)((_pdev)->tx_descs.pool_vaddr)))
+
+#ifdef ATH_11AC_TXCOMPACT
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev) \
+ cdf_spinlock_init(&_pdev->txnbufq_mutex)
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev) \
+ HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)
+
+#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu) do { \
+ HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
+ _msdu = cdf_nbuf_queue_remove(&_pdev->txnbufq);\
+ HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
+ } while (0)
+
+#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu) do { \
+ HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
+ cdf_nbuf_queue_add(&_pdev->txnbufq, _msdu); \
+ HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
+ } while (0)
+
+#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu) do { \
+ HTT_TX_MUTEX_ACQUIRE(&_pdev->txnbufq_mutex); \
+ cdf_nbuf_queue_insert_head(&_pdev->txnbufq, _msdu);\
+ HTT_TX_MUTEX_RELEASE(&_pdev->txnbufq_mutex); \
+ } while (0)
+#else
+
+#define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)
+#define HTT_TX_NBUF_QUEUE_REMOVE(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_ADD(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_INSERT_HEAD(_pdev, _msdu)
+#define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)
+
+#endif
+
+void htt_tx_resume_handler(void *);
+#ifdef ATH_11AC_TXCOMPACT
+#define HTT_TX_SCHED htt_tx_sched
+#else
+#define HTT_TX_SCHED(pdev) /* no-op */
+#endif
+
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems);
+
+void htt_tx_detach(struct htt_pdev_t *pdev);
+
+int htt_rx_attach(struct htt_pdev_t *pdev);
+
+void htt_rx_detach(struct htt_pdev_t *pdev);
+
+int htt_htc_attach(struct htt_pdev_t *pdev);
+
+void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt);
+
+void htt_h2t_send_complete(void *context, HTC_PACKET *pkt);
+
+A_STATUS htt_h2t_ver_req_msg(struct htt_pdev_t *pdev);
+
+#if defined(HELIUMPLUS_PADDR64)
+A_STATUS
+htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+extern A_STATUS htt_h2t_rx_ring_cfg_msg_ll(struct htt_pdev_t *pdev);
+extern A_STATUS (*htt_h2t_rx_ring_cfg_msg)(struct htt_pdev_t *pdev);
+
+HTC_SEND_FULL_ACTION htt_h2t_full(void *context, HTC_PACKET *pkt);
+
+struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev);
+
+void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
+
+void htt_htc_pkt_pool_free(struct htt_pdev_t *pdev);
+
+#ifdef ATH_11AC_TXCOMPACT
+void
+htt_htc_misc_pkt_list_add(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt);
+
+void htt_htc_misc_pkt_pool_free(struct htt_pdev_t *pdev);
+#endif
+
+void htt_htc_disable_aspm(void);
+
+int
+htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
+ cdf_nbuf_t netbuf);
+
+cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr);
+
+#ifdef IPA_OFFLOAD
+int
+htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base);
+
+int
+htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size);
+
+int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
+#else
+/**
+ * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
+ * @pdev: htt context
+ * @uc_tx_buf_sz: single tx buffer size
+ * @uc_tx_buf_cnt: total tx buffer count
+ * @uc_tx_partition_base: tx buffer partition start
+ *
+ * Return: 0 success
+ */
+static inline int
+htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base)
+{
+ return 0;
+}
+
+/**
+ * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
+ * @pdev: htt context
+ * @rx_ind_ring_size: rx ring size
+ *
+ * Return: 0 success
+ */
+static inline int
+htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev, unsigned int rx_ind_ring_size)
+{
+ return 0;
+}
+
+static inline int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ return 0;
+}
+
+static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ return 0;
+}
+#endif /* IPA_OFFLOAD */
+#ifdef DEBUG_RX_RING_BUFFER
+/**
+ * htt_rx_dbg_rxbuf_init() - init debug rx buff list
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
+{
+ pdev->rx_buff_list = cdf_mem_malloc(
+ HTT_RX_RING_BUFF_DBG_LIST *
+ sizeof(struct rx_buf_debug));
+ if (!pdev->rx_buff_list) {
+ cdf_print("HTT: debug RX buffer allocation failed\n");
+ CDF_ASSERT(0);
+ }
+}
+/**
+ * htt_rx_dbg_rxbuf_set() - set element of rx buff list
+ * @pdev: pdev handle
+ * @paddr: physical address of netbuf
+ * @rx_netbuf: received netbuf
+ *
+ * Return: none
+ */
+static inline
+void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
+ uint32_t paddr,
+ cdf_nbuf_t rx_netbuf)
+{
+ if (pdev->rx_buff_list) {
+ pdev->rx_buff_list[pdev->rx_buff_index].paddr =
+ paddr;
+ pdev->rx_buff_list[pdev->rx_buff_index].in_use =
+ true;
+ pdev->rx_buff_list[pdev->rx_buff_index].vaddr =
+ rx_netbuf;
+ NBUF_MAP_ID(rx_netbuf) = pdev->rx_buff_index;
+ if (++pdev->rx_buff_index ==
+ HTT_RX_RING_BUFF_DBG_LIST)
+ pdev->rx_buff_index = 0;
+ }
+}
+/**
+ * htt_rx_dbg_rxbuf_set() - reset element of rx buff list
+ * @pdev: pdev handle
+ * @netbuf: rx sk_buff
+ * Return: none
+ */
+static inline
+void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
+ cdf_nbuf_t netbuf)
+{
+ uint32_t index;
+
+ if (pdev->rx_buff_list) {
+ index = NBUF_MAP_ID(netbuf);
+ if (index < HTT_RX_RING_BUFF_DBG_LIST) {
+ pdev->rx_buff_list[index].in_use =
+ false;
+ pdev->rx_buff_list[index].paddr = 0;
+ pdev->rx_buff_list[index].vaddr = NULL;
+ }
+ }
+}
+/**
+ * htt_rx_dbg_rxbuf_deinit() - deinit debug rx buff list
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
+{
+ if (pdev->rx_buff_list)
+ cdf_mem_free(pdev->rx_buff_list);
+}
+#else
+static inline
+void htt_rx_dbg_rxbuf_init(struct htt_pdev_t *pdev)
+{
+ return;
+}
+static inline
+void htt_rx_dbg_rxbuf_set(struct htt_pdev_t *pdev,
+ uint32_t paddr,
+ cdf_nbuf_t rx_netbuf)
+{
+ return;
+}
+static inline
+void htt_rx_dbg_rxbuf_reset(struct htt_pdev_t *pdev,
+ cdf_nbuf_t netbuf)
+{
+ return;
+}
+static inline
+void htt_rx_dbg_rxbuf_deinit(struct htt_pdev_t *pdev)
+{
+ return;
+}
+#endif
+#endif /* _HTT_INTERNAL__H_ */
diff --git a/dp/htt/htt_rx.c b/dp/htt/htt_rx.c
new file mode 100644
index 000000000000..2eb0d68cfaca
--- /dev/null
+++ b/dp/htt/htt_rx.c
@@ -0,0 +1,2489 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_rx.c
+ * @brief Implement receive aspects of HTT.
+ * @details
+ * This file contains three categories of HTT rx code:
+ * 1. An abstraction of the rx descriptor, to hide the
+ * differences between the HL vs. LL rx descriptor.
+ * 2. Functions for providing access to the (series of)
+ * rx descriptor(s) and rx frame(s) associated with
+ * an rx indication message.
+ * 3. Functions for setting up and using the MAC DMA
+ * rx ring (applies to LL only).
+ */
+
+#include <cdf_memory.h> /* cdf_mem_malloc,free, etc. */
+#include <cdf_types.h> /* cdf_print, bool */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_softirq_timer.h> /* cdf_softirq_timer_free */
+
+#include <htt.h> /* HTT_HL_RX_DESC_SIZE */
+#include <ol_cfg.h>
+#include <ol_rx.h>
+#include <ol_htt_rx_api.h>
+#include <htt_internal.h> /* HTT_ASSERT, htt_pdev_t, HTT_RX_BUF_SIZE */
+#include "regtable.h"
+
+#include <cds_ieee80211_common.h> /* ieee80211_frame, ieee80211_qoscntl */
+#include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
+
+#ifdef DEBUG_DMA_DONE
+#include <asm/barrier.h>
+#include <wma_api.h>
+#endif
+
+/* AR9888v1 WORKAROUND for EV#112367 */
+/* FIX THIS - remove this WAR when the bug is fixed */
+#define PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR
+
+/*--- setup / tear-down functions -------------------------------------------*/
+
+#ifndef HTT_RX_RING_SIZE_MIN
+#define HTT_RX_RING_SIZE_MIN 128 /* slightly > than one large A-MPDU */
+#endif
+
+#ifndef HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_SIZE_MAX 2048 /* ~20 ms @ 1 Gbps of 1500B MSDUs */
+#endif
+
+#ifndef HTT_RX_AVG_FRM_BYTES
+#define HTT_RX_AVG_FRM_BYTES 1000
+#endif
+
+#ifndef HTT_RX_HOST_LATENCY_MAX_MS
+#define HTT_RX_HOST_LATENCY_MAX_MS 20 /* ms */ /* very conservative */
+#endif
+
+#ifndef HTT_RX_HOST_LATENCY_WORST_LIKELY_MS
+#define HTT_RX_HOST_LATENCY_WORST_LIKELY_MS 10 /* ms */ /* conservative */
+#endif
+
+#ifndef HTT_RX_RING_REFILL_RETRY_TIME_MS
+#define HTT_RX_RING_REFILL_RETRY_TIME_MS 50
+#endif
+
+/*--- RX In Order Definitions ------------------------------------------------*/
+
+/* Number of buckets in the hash table */
+#define RX_NUM_HASH_BUCKETS 1024 /* This should always be a power of 2 */
+#define RX_NUM_HASH_BUCKETS_MASK (RX_NUM_HASH_BUCKETS - 1)
+
+/* Number of hash entries allocated per bucket */
+#define RX_ENTRIES_SIZE 10
+
+#define RX_HASH_FUNCTION(a) (((a >> 14) ^ (a >> 4)) & RX_NUM_HASH_BUCKETS_MASK)
+
+#ifdef RX_HASH_DEBUG_LOG
+#define RX_HASH_LOG(x) x
+#else
+#define RX_HASH_LOG(x) /* no-op */
+#endif
+
+/* De -initialization function of the rx buffer hash table. This function will
+ free up the hash table which includes freeing all the pending rx buffers*/
+void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
+{
+
+ uint32_t i;
+ struct htt_rx_hash_entry *hash_entry;
+ struct htt_list_node *list_iter = NULL;
+
+ if (NULL == pdev->rx_ring.hash_table)
+ return;
+ for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+ /* Free the hash entries in hash bucket i */
+ list_iter = pdev->rx_ring.hash_table[i].listhead.next;
+ while (list_iter != &pdev->rx_ring.hash_table[i].listhead) {
+ hash_entry =
+ (struct htt_rx_hash_entry *)((char *)list_iter -
+ pdev->rx_ring.
+ listnode_offset);
+ if (hash_entry->netbuf) {
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
+ CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
+ CDF_DMA_FROM_DEVICE);
+#endif
+ cdf_nbuf_free(hash_entry->netbuf);
+ hash_entry->paddr = 0;
+ }
+ list_iter = list_iter->next;
+
+ if (!hash_entry->fromlist)
+ cdf_mem_free(hash_entry);
+ }
+
+ cdf_mem_free(pdev->rx_ring.hash_table[i].entries);
+
+ }
+ cdf_mem_free(pdev->rx_ring.hash_table);
+ pdev->rx_ring.hash_table = NULL;
+}
+
+static bool
+htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)msdu_desc;
+ return (bool)
+ (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+ RX_MSDU_END_4_FIRST_MSDU_MASK) >>
+ RX_MSDU_END_4_FIRST_MSDU_LSB);
+}
+
+static int htt_rx_ring_size(struct htt_pdev_t *pdev)
+{
+ int size;
+
+ /*
+ * It is expected that the host CPU will typically be able to service
+ * the rx indication from one A-MPDU before the rx indication from
+ * the subsequent A-MPDU happens, roughly 1-2 ms later.
+ * However, the rx ring should be sized very conservatively, to
+ * accomodate the worst reasonable delay before the host CPU services
+ * a rx indication interrupt.
+ * The rx ring need not be kept full of empty buffers. In theory,
+ * the htt host SW can dynamically track the low-water mark in the
+ * rx ring, and dynamically adjust the level to which the rx ring
+ * is filled with empty buffers, to dynamically meet the desired
+ * low-water mark.
+ * In contrast, it's difficult to resize the rx ring itself, once
+ * it's in use.
+ * Thus, the ring itself should be sized very conservatively, while
+ * the degree to which the ring is filled with empty buffers should
+ * be sized moderately conservatively.
+ */
+ size =
+ ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
+ 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
+ (8 * HTT_RX_AVG_FRM_BYTES) * HTT_RX_HOST_LATENCY_MAX_MS;
+
+ if (size < HTT_RX_RING_SIZE_MIN)
+ size = HTT_RX_RING_SIZE_MIN;
+ else if (size > HTT_RX_RING_SIZE_MAX)
+ size = HTT_RX_RING_SIZE_MAX;
+
+ size = cdf_get_pwr2(size);
+ return size;
+}
+
+static int htt_rx_ring_fill_level(struct htt_pdev_t *pdev)
+{
+ int size;
+
+ size = ol_cfg_max_thruput_mbps(pdev->ctrl_pdev) *
+ 1000 /* 1e6 bps/mbps / 1e3 ms per sec = 1000 */ /
+ 8 * HTT_RX_AVG_FRM_BYTES * HTT_RX_HOST_LATENCY_WORST_LIKELY_MS;
+ /*
+ * Make sure the fill level is at least 1 less than the ring size.
+ * Leaving 1 element empty allows the SW to easily distinguish
+ * between a full ring vs. an empty ring.
+ */
+ if (size >= pdev->rx_ring.size)
+ size = pdev->rx_ring.size - 1;
+
+ return size;
+}
+
+static void htt_rx_ring_refill_retry(void *arg)
+{
+ htt_pdev_handle pdev = (htt_pdev_handle) arg;
+ htt_rx_msdu_buff_replenish(pdev);
+}
+
+void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
+{
+ int idx;
+ CDF_STATUS status;
+ struct htt_host_rx_desc_base *rx_desc;
+
+ idx = *(pdev->rx_ring.alloc_idx.vaddr);
+ while (num > 0) {
+ uint32_t paddr;
+ cdf_nbuf_t rx_netbuf;
+ int headroom;
+
+ rx_netbuf =
+ cdf_nbuf_alloc(pdev->osdev, HTT_RX_BUF_SIZE,
+ 0, 4, false);
+ if (!rx_netbuf) {
+ cdf_softirq_timer_cancel(&pdev->rx_ring.
+ refill_retry_timer);
+ /*
+ * Failed to fill it to the desired level -
+ * we'll start a timer and try again next time.
+ * As long as enough buffers are left in the ring for
+ * another A-MPDU rx, no special recovery is needed.
+ */
+#ifdef DEBUG_DMA_DONE
+ pdev->rx_ring.dbg_refill_cnt++;
+#endif
+ cdf_softirq_timer_start(
+ &pdev->rx_ring.refill_retry_timer,
+ HTT_RX_RING_REFILL_RETRY_TIME_MS);
+ goto fail;
+ }
+
+ /* Clear rx_desc attention word before posting to Rx ring */
+ rx_desc = htt_rx_desc(rx_netbuf);
+ *(uint32_t *) &rx_desc->attention = 0;
+
+#ifdef DEBUG_DMA_DONE
+ *(uint32_t *) &rx_desc->msdu_end = 1;
+
+#define MAGIC_PATTERN 0xDEADBEEF
+ *(uint32_t *) &rx_desc->msdu_start = MAGIC_PATTERN;
+
+ /* To ensure that attention bit is reset and msdu_end is set
+ before calling dma_map */
+ smp_mb();
+#endif
+ /*
+ * Adjust cdf_nbuf_data to point to the location in the buffer
+ * where the rx descriptor will be filled in.
+ */
+ headroom = cdf_nbuf_data(rx_netbuf) - (uint8_t *) rx_desc;
+ cdf_nbuf_push_head(rx_netbuf, headroom);
+
+#ifdef DEBUG_DMA_DONE
+ status =
+ cdf_nbuf_map(pdev->osdev, rx_netbuf,
+ CDF_DMA_BIDIRECTIONAL);
+#else
+ status =
+ cdf_nbuf_map(pdev->osdev, rx_netbuf,
+ CDF_DMA_FROM_DEVICE);
+#endif
+ if (status != CDF_STATUS_SUCCESS) {
+ cdf_nbuf_free(rx_netbuf);
+ goto fail;
+ }
+ paddr = cdf_nbuf_get_frag_paddr_lo(rx_netbuf, 0);
+ if (pdev->cfg.is_full_reorder_offload) {
+ if (cdf_unlikely
+ (htt_rx_hash_list_insert(pdev, paddr,
+ rx_netbuf))) {
+ cdf_print("%s: hash insert failed!\n",
+ __func__);
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+ CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, rx_netbuf,
+ CDF_DMA_FROM_DEVICE);
+#endif
+ cdf_nbuf_free(rx_netbuf);
+ goto fail;
+ }
+ htt_rx_dbg_rxbuf_set(pdev, paddr, rx_netbuf);
+ } else {
+ pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
+ }
+#if HTT_PADDR64
+ pdev->rx_ring.buf.paddrs_ring[idx] = 0;
+ pdev->rx_ring.buf.paddrs_ring[idx] = (uint32_t)paddr;
+#else
+ pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
+#endif /* HTT_PADDR64 */
+ pdev->rx_ring.fill_cnt++;
+
+ num--;
+ idx++;
+ idx &= pdev->rx_ring.size_mask;
+ }
+
+fail:
+ *(pdev->rx_ring.alloc_idx.vaddr) = idx;
+ return;
+}
+
+unsigned htt_rx_ring_elems(struct htt_pdev_t *pdev)
+{
+ return
+ (*pdev->rx_ring.alloc_idx.vaddr -
+ pdev->rx_ring.sw_rd_idx.msdu_payld) & pdev->rx_ring.size_mask;
+}
+
+unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
+{
+ return
+ (*pdev->rx_ring.alloc_idx.vaddr -
+ *pdev->rx_ring.target_idx.vaddr) &
+ pdev->rx_ring.size_mask;
+}
+
+void htt_rx_detach(struct htt_pdev_t *pdev)
+{
+ cdf_softirq_timer_cancel(&pdev->rx_ring.refill_retry_timer);
+ cdf_softirq_timer_free(&pdev->rx_ring.refill_retry_timer);
+
+ if (pdev->cfg.is_full_reorder_offload) {
+ cdf_os_mem_free_consistent(pdev->osdev,
+ sizeof(uint32_t),
+ pdev->rx_ring.target_idx.vaddr,
+ pdev->rx_ring.target_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->
+ rx_ring.
+ target_idx),
+ memctx));
+ htt_rx_hash_deinit(pdev);
+ } else {
+ int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+
+ while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev,
+ pdev->rx_ring.buf.
+ netbufs_ring[sw_rd_idx],
+ CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev,
+ pdev->rx_ring.buf.
+ netbufs_ring[sw_rd_idx],
+ CDF_DMA_FROM_DEVICE);
+#endif
+ cdf_nbuf_free(pdev->rx_ring.buf.
+ netbufs_ring[sw_rd_idx]);
+ sw_rd_idx++;
+ sw_rd_idx &= pdev->rx_ring.size_mask;
+ }
+ cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
+ }
+
+ cdf_os_mem_free_consistent(pdev->osdev,
+ sizeof(uint32_t),
+ pdev->rx_ring.alloc_idx.vaddr,
+ pdev->rx_ring.alloc_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->rx_ring.
+ alloc_idx),
+ memctx));
+
+ cdf_os_mem_free_consistent(pdev->osdev,
+ pdev->rx_ring.size * sizeof(uint32_t),
+ pdev->rx_ring.buf.paddrs_ring,
+ pdev->rx_ring.base_paddr,
+ cdf_get_dma_mem_context((&pdev->rx_ring.buf),
+ memctx));
+}
+
+/*--- rx descriptor field access functions ----------------------------------*/
+/*
+ * These functions need to use bit masks and shifts to extract fields
+ * from the rx descriptors, rather than directly using the bitfields.
+ * For example, use
+ * (desc & FIELD_MASK) >> FIELD_LSB
+ * rather than
+ * desc.field
+ * This allows the functions to work correctly on either little-endian
+ * machines (no endianness conversion needed) or big-endian machines
+ * (endianness conversion provided automatically by the HW DMA's
+ * byte-swizzling).
+ */
+/* FIX THIS: APPLIES TO LL ONLY */
+
+/**
+ * htt_rx_mpdu_desc_retry_ll() - Returns the retry bit from the Rx descriptor
+ * for the Low Latency driver
+ * @pdev: Handle (pointer) to HTT pdev.
+ * @mpdu_desc: Void pointer to the Rx descriptor for MPDU
+ * before the beginning of the payload.
+ *
+ * This function returns the retry bit of the 802.11 header for the
+ * provided rx MPDU descriptor.
+ *
+ * Return: boolean -- true if retry is set, false otherwise
+ */
+bool
+htt_rx_mpdu_desc_retry_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *) mpdu_desc;
+
+ return
+ (bool)(((*((uint32_t *) &rx_desc->mpdu_start)) &
+ RX_MPDU_START_0_RETRY_MASK) >>
+ RX_MPDU_START_0_RETRY_LSB);
+}
+
+uint16_t htt_rx_mpdu_desc_seq_num_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)mpdu_desc;
+
+ return
+ (uint16_t) (((*((uint32_t *) &rx_desc->mpdu_start)) &
+ RX_MPDU_START_0_SEQ_NUM_MASK) >>
+ RX_MPDU_START_0_SEQ_NUM_LSB);
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+void
+htt_rx_mpdu_desc_pn_ll(htt_pdev_handle pdev,
+ void *mpdu_desc, union htt_rx_pn_t *pn, int pn_len_bits)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)mpdu_desc;
+
+ switch (pn_len_bits) {
+ case 24:
+ /* bits 23:0 */
+ pn->pn24 = rx_desc->mpdu_start.pn_31_0 & 0xffffff;
+ break;
+ case 48:
+ /* bits 31:0 */
+ pn->pn48 = rx_desc->mpdu_start.pn_31_0;
+ /* bits 47:32 */
+ pn->pn48 |= ((uint64_t)
+ ((*(((uint32_t *) &rx_desc->mpdu_start) + 2))
+ & RX_MPDU_START_2_PN_47_32_MASK))
+ << (32 - RX_MPDU_START_2_PN_47_32_LSB);
+ break;
+ case 128:
+ /* bits 31:0 */
+ pn->pn128[0] = rx_desc->mpdu_start.pn_31_0;
+ /* bits 47:32 */
+ pn->pn128[0] |=
+ ((uint64_t) ((*(((uint32_t *)&rx_desc->mpdu_start) + 2))
+ & RX_MPDU_START_2_PN_47_32_MASK))
+ << (32 - RX_MPDU_START_2_PN_47_32_LSB);
+ /* bits 63:48 */
+ pn->pn128[0] |=
+ ((uint64_t) ((*(((uint32_t *) &rx_desc->msdu_end) + 2))
+ & RX_MSDU_END_1_EXT_WAPI_PN_63_48_MASK))
+ << (48 - RX_MSDU_END_1_EXT_WAPI_PN_63_48_LSB);
+ /* bits 95:64 */
+ pn->pn128[1] = rx_desc->msdu_end.ext_wapi_pn_95_64;
+ /* bits 127:96 */
+ pn->pn128[1] |=
+ ((uint64_t) rx_desc->msdu_end.ext_wapi_pn_127_96) << 32;
+ break;
+ default:
+ cdf_print("Error: invalid length spec (%d bits) for PN\n",
+ pn_len_bits);
+ };
+}
+
+/**
+ * htt_rx_mpdu_desc_tid_ll() - Returns the TID value from the Rx descriptor
+ * for Low Latency driver
+ * @pdev: Handle (pointer) to HTT pdev.
+ * @mpdu_desc: Void pointer to the Rx descriptor for the MPDU
+ * before the beginning of the payload.
+ *
+ * This function returns the TID set in the 802.11 QoS Control for the MPDU
+ * in the packet header, by looking at the mpdu_start of the Rx descriptor.
+ * Rx descriptor gets a copy of the TID from the MAC.
+ *
+ * Return: Actual TID set in the packet header.
+ */
+uint8_t
+htt_rx_mpdu_desc_tid_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *) mpdu_desc;
+
+ return
+ (uint8_t)(((*(((uint32_t *) &rx_desc->mpdu_start) + 2)) &
+ RX_MPDU_START_2_TID_MASK) >>
+ RX_MPDU_START_2_TID_LSB);
+}
+
+uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc)
+{
+/* FIX THIS */
+ return 0;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)mpdu_desc;
+ return rx_desc->rx_hdr_status;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+bool htt_rx_msdu_desc_completes_mpdu_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)msdu_desc;
+ return (bool)
+ (((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+ RX_MSDU_END_4_LAST_MSDU_MASK) >> RX_MSDU_END_4_LAST_MSDU_LSB);
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+int htt_rx_msdu_has_wlan_mcast_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)msdu_desc;
+ /* HW rx desc: the mcast_bcast flag is only valid
+ if first_msdu is set */
+ return
+ ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+ RX_MSDU_END_4_FIRST_MSDU_MASK) >> RX_MSDU_END_4_FIRST_MSDU_LSB;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+bool htt_rx_msdu_is_wlan_mcast_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)msdu_desc;
+ return
+ ((*((uint32_t *) &rx_desc->attention)) &
+ RX_ATTENTION_0_MCAST_BCAST_MASK)
+ >> RX_ATTENTION_0_MCAST_BCAST_LSB;
+}
+
+/* FIX THIS: APPLIES TO LL ONLY */
+int htt_rx_msdu_is_frag_ll(htt_pdev_handle pdev, void *msdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)msdu_desc;
+ return
+ ((*((uint32_t *) &rx_desc->attention)) &
+ RX_ATTENTION_0_FRAGMENT_MASK) >> RX_ATTENTION_0_FRAGMENT_LSB;
+}
+
+static inline
+uint8_t htt_rx_msdu_fw_desc_get(htt_pdev_handle pdev, void *msdu_desc)
+{
+ /*
+ * HL and LL use the same format for FW rx desc, but have the FW rx desc
+ * in different locations.
+ * In LL, the FW rx descriptor has been copied into the same
+ * htt_host_rx_desc_base struct that holds the HW rx desc.
+ * In HL, the FW rx descriptor, along with the MSDU payload,
+ * is in the same buffer as the rx indication message.
+ *
+ * Use the FW rx desc offset configured during startup to account for
+ * this difference between HL vs. LL.
+ *
+ * An optimization would be to define the LL and HL msdu_desc pointer
+ * in such a way that they both use the same offset to the FW rx desc.
+ * Then the following functions could be converted to macros, without
+ * needing to expose the htt_pdev_t definition outside HTT.
+ */
+ return *(((uint8_t *) msdu_desc) + pdev->rx_fw_desc_offset);
+}
+
+int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc)
+{
+ return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_DISCARD_M;
+}
+
+int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc)
+{
+ return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_FORWARD_M;
+}
+
+int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc)
+{
+ return htt_rx_msdu_fw_desc_get(pdev, msdu_desc) & FW_RX_DESC_INSPECT_M;
+}
+
+void
+htt_rx_msdu_actions(htt_pdev_handle pdev,
+ void *msdu_desc, int *discard, int *forward, int *inspect)
+{
+ uint8_t rx_msdu_fw_desc = htt_rx_msdu_fw_desc_get(pdev, msdu_desc);
+#ifdef HTT_DEBUG_DATA
+ HTT_PRINT("act:0x%x ", rx_msdu_fw_desc);
+#endif
+ *discard = rx_msdu_fw_desc & FW_RX_DESC_DISCARD_M;
+ *forward = rx_msdu_fw_desc & FW_RX_DESC_FORWARD_M;
+ *inspect = rx_msdu_fw_desc & FW_RX_DESC_INSPECT_M;
+}
+
+static inline cdf_nbuf_t htt_rx_netbuf_pop(htt_pdev_handle pdev)
+{
+ int idx;
+ cdf_nbuf_t msdu;
+
+ HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
+
+#ifdef DEBUG_DMA_DONE
+ pdev->rx_ring.dbg_ring_idx++;
+ pdev->rx_ring.dbg_ring_idx &= pdev->rx_ring.size_mask;
+#endif
+
+ idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = pdev->rx_ring.buf.netbufs_ring[idx];
+ idx++;
+ idx &= pdev->rx_ring.size_mask;
+ pdev->rx_ring.sw_rd_idx.msdu_payld = idx;
+ pdev->rx_ring.fill_cnt--;
+ return msdu;
+}
+
+static inline cdf_nbuf_t
+htt_rx_in_order_netbuf_pop(htt_pdev_handle pdev, uint32_t paddr)
+{
+ HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
+ pdev->rx_ring.fill_cnt--;
+ return htt_rx_hash_list_lookup(pdev, paddr);
+}
+
+/* FIX ME: this function applies only to LL rx descs.
+ An equivalent for HL rx descs is needed. */
+#ifdef CHECKSUM_OFFLOAD
+static inline
+void
+htt_set_checksum_result_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu,
+ struct htt_host_rx_desc_base *rx_desc)
+{
+#define MAX_IP_VER 2
+#define MAX_PROTO_VAL 4
+ struct rx_msdu_start *rx_msdu = &rx_desc->msdu_start;
+ unsigned int proto = (rx_msdu->tcp_proto) | (rx_msdu->udp_proto << 1);
+
+ /*
+ * HW supports TCP & UDP checksum offload for ipv4 and ipv6
+ */
+ static const cdf_nbuf_l4_rx_cksum_type_t
+ cksum_table[][MAX_PROTO_VAL][MAX_IP_VER] = {
+ {
+ /* non-fragmented IP packet */
+ /* non TCP/UDP packet */
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ /* TCP packet */
+ {CDF_NBUF_RX_CKSUM_TCP, CDF_NBUF_RX_CKSUM_TCPIPV6},
+ /* UDP packet */
+ {CDF_NBUF_RX_CKSUM_UDP, CDF_NBUF_RX_CKSUM_UDPIPV6},
+ /* invalid packet type */
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ },
+ {
+ /* fragmented IP packet */
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ {CDF_NBUF_RX_CKSUM_NONE, CDF_NBUF_RX_CKSUM_NONE},
+ }
+ };
+
+ cdf_nbuf_rx_cksum_t cksum = {
+ cksum_table[rx_msdu->ip_frag][proto][rx_msdu->ipv6_proto],
+ CDF_NBUF_RX_CKSUM_NONE,
+ 0
+ };
+
+ if (cksum.l4_type !=
+ (cdf_nbuf_l4_rx_cksum_type_t) CDF_NBUF_RX_CKSUM_NONE) {
+ cksum.l4_result =
+ ((*(uint32_t *) &rx_desc->attention) &
+ RX_ATTENTION_0_TCP_UDP_CHKSUM_FAIL_MASK) ?
+ CDF_NBUF_RX_CKSUM_NONE :
+ CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY;
+ }
+ cdf_nbuf_set_rx_cksum(msdu, &cksum);
+#undef MAX_IP_VER
+#undef MAX_PROTO_VAL
+}
+#else
+#define htt_set_checksum_result_ll(pdev, msdu, rx_desc) /* no-op */
+#endif
+
+#ifdef DEBUG_DMA_DONE
+void htt_rx_print_rx_indication(cdf_nbuf_t rx_ind_msg, htt_pdev_handle pdev)
+{
+ uint32_t *msg_word;
+ int byte_offset;
+ int mpdu_range, num_mpdu_range;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+
+ cdf_print
+ ("------------------HTT RX IND-----------------------------\n");
+ cdf_print("alloc idx paddr %x (*vaddr) %d\n",
+ pdev->rx_ring.alloc_idx.paddr,
+ *pdev->rx_ring.alloc_idx.vaddr);
+
+ cdf_print("sw_rd_idx msdu_payld %d msdu_desc %d\n",
+ pdev->rx_ring.sw_rd_idx.msdu_payld,
+ pdev->rx_ring.sw_rd_idx.msdu_desc);
+
+ cdf_print("dbg_ring_idx %d\n", pdev->rx_ring.dbg_ring_idx);
+
+ cdf_print("fill_level %d fill_cnt %d\n", pdev->rx_ring.fill_level,
+ pdev->rx_ring.fill_cnt);
+
+ cdf_print("initial msdu_payld %d curr mpdu range %d curr mpdu cnt %d\n",
+ pdev->rx_ring.dbg_initial_msdu_payld,
+ pdev->rx_ring.dbg_mpdu_range, pdev->rx_ring.dbg_mpdu_count);
+
+ /* Print the RX_IND contents */
+
+ cdf_print("peer id %x RV %x FV %x ext_tid %x msg_type %x\n",
+ HTT_RX_IND_PEER_ID_GET(*msg_word),
+ HTT_RX_IND_REL_VALID_GET(*msg_word),
+ HTT_RX_IND_FLUSH_VALID_GET(*msg_word),
+ HTT_RX_IND_EXT_TID_GET(*msg_word),
+ HTT_T2H_MSG_TYPE_GET(*msg_word));
+
+ cdf_print("num_mpdu_ranges %x rel_seq_num_end %x rel_seq_num_start %x\n"
+ " flush_seq_num_end %x flush_seq_num_start %x\n",
+ HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1)),
+ HTT_RX_IND_REL_SEQ_NUM_END_GET(*(msg_word + 1)),
+ HTT_RX_IND_REL_SEQ_NUM_START_GET(*(msg_word + 1)),
+ HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1)),
+ HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1)));
+
+ cdf_print("fw_rx_desc_bytes %x\n",
+ HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
+ (msg_word + 2 +
+ HTT_RX_PPDU_DESC_SIZE32)));
+
+ /* receive MSDU desc for current frame */
+ byte_offset =
+ HTT_ENDIAN_BYTE_IDX_SWAP(HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
+ pdev->rx_ind_msdu_byte_idx);
+
+ cdf_print("msdu byte idx %x msdu desc %x\n", pdev->rx_ind_msdu_byte_idx,
+ HTT_RX_IND_FW_RX_DESC_BYTES_GET(*
+ (msg_word + 2 +
+ HTT_RX_PPDU_DESC_SIZE32)));
+
+ num_mpdu_range = HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
+
+ for (mpdu_range = 0; mpdu_range < num_mpdu_range; mpdu_range++) {
+ enum htt_rx_status status;
+ int num_mpdus;
+
+ htt_rx_ind_mpdu_range_info(pdev, rx_ind_msg, mpdu_range,
+ &status, &num_mpdus);
+
+ cdf_print("mpdu_range %x status %x num_mpdus %x\n",
+ pdev->rx_ind_msdu_byte_idx, status, num_mpdus);
+ }
+ cdf_print
+ ("---------------------------------------------------------\n");
+}
+#endif
+
+#ifdef DEBUG_DMA_DONE
+#define MAX_DONE_BIT_CHECK_ITER 5
+#endif
+
+int
+htt_rx_amsdu_pop_ll(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+{
+ int msdu_len, msdu_chaining = 0;
+ cdf_nbuf_t msdu;
+ struct htt_host_rx_desc_base *rx_desc;
+ uint8_t *rx_ind_data;
+ uint32_t *msg_word, num_msdu_bytes;
+ enum htt_t2h_msg_type msg_type;
+ uint8_t pad_bytes = 0;
+
+ HTT_ASSERT1(htt_rx_ring_elems(pdev) != 0);
+ rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) rx_ind_data;
+
+ msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+
+ if (cdf_unlikely(HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type)) {
+ num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
+ *(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
+ } else {
+ num_msdu_bytes = HTT_RX_IND_FW_RX_DESC_BYTES_GET(
+ *(msg_word
+ + HTT_RX_IND_HDR_PREFIX_SIZE32
+ + HTT_RX_PPDU_DESC_SIZE32));
+ }
+ msdu = *head_msdu = htt_rx_netbuf_pop(pdev);
+ while (1) {
+ int last_msdu, msdu_len_invalid, msdu_chained;
+ int byte_offset;
+
+ /*
+ * Set the netbuf length to be the entire buffer length
+ * initially, so the unmap will unmap the entire buffer.
+ */
+ cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+#endif
+
+ /* cache consistency has been taken care of by cdf_nbuf_unmap */
+
+ /*
+ * Now read the rx descriptor.
+ * Set the length to the appropriate value.
+ * Check if this MSDU completes a MPDU.
+ */
+ rx_desc = htt_rx_desc(msdu);
+#if defined(HELIUMPLUS_PADDR64)
+ if (HTT_WIFI_IP(pdev, 2, 0))
+ pad_bytes = rx_desc->msdu_end.l3_header_padding;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ /*
+ * Make the netbuf's data pointer point to the payload rather
+ * than the descriptor.
+ */
+
+ cdf_nbuf_pull_head(msdu,
+ HTT_RX_STD_DESC_RESERVATION + pad_bytes);
+
+ /*
+ * Sanity check - confirm the HW is finished filling in
+ * the rx data.
+ * If the HW and SW are working correctly, then it's guaranteed
+ * that the HW's MAC DMA is done before this point in the SW.
+ * To prevent the case that we handle a stale Rx descriptor,
+ * just assert for now until we have a way to recover.
+ */
+
+#ifdef DEBUG_DMA_DONE
+ if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
+ & RX_ATTENTION_0_MSDU_DONE_MASK))) {
+
+ int dbg_iter = MAX_DONE_BIT_CHECK_ITER;
+
+ cdf_print("malformed frame\n");
+
+ while (dbg_iter &&
+ (!((*(uint32_t *) &rx_desc->attention) &
+ RX_ATTENTION_0_MSDU_DONE_MASK))) {
+ cdf_mdelay(1);
+
+ cdf_invalidate_range((void *)rx_desc,
+ (void *)((char *)rx_desc +
+ HTT_RX_STD_DESC_RESERVATION));
+
+ cdf_print("debug iter %d success %d\n",
+ dbg_iter,
+ pdev->rx_ring.dbg_sync_success);
+
+ dbg_iter--;
+ }
+
+ if (cdf_unlikely(!((*(uint32_t *) &rx_desc->attention)
+ & RX_ATTENTION_0_MSDU_DONE_MASK))) {
+
+#ifdef HTT_RX_RESTORE
+ cdf_print("RX done bit error detected!\n");
+ cdf_nbuf_set_next(msdu, NULL);
+ *tail_msdu = msdu;
+ pdev->rx_ring.rx_reset = 1;
+ return msdu_chaining;
+#else
+ wma_cli_set_command(0, GEN_PARAM_CRASH_INJECT,
+ 0, GEN_CMD);
+ HTT_ASSERT_ALWAYS(0);
+#endif
+ }
+ pdev->rx_ring.dbg_sync_success++;
+ cdf_print("debug iter %d success %d\n", dbg_iter,
+ pdev->rx_ring.dbg_sync_success);
+ }
+#else
+ HTT_ASSERT_ALWAYS((*(uint32_t *) &rx_desc->attention) &
+ RX_ATTENTION_0_MSDU_DONE_MASK);
+#endif
+ /*
+ * Copy the FW rx descriptor for this MSDU from the rx
+ * indication message into the MSDU's netbuf.
+ * HL uses the same rx indication message definition as LL, and
+ * simply appends new info (fields from the HW rx desc, and the
+ * MSDU payload itself).
+ * So, the offset into the rx indication message only has to
+ * account for the standard offset of the per-MSDU FW rx
+ * desc info within the message, and how many bytes of the
+ * per-MSDU FW rx desc info have already been consumed.
+ * (And the endianness of the host,
+ * since for a big-endian host, the rx ind message contents,
+ * including the per-MSDU rx desc bytes, were byteswapped during
+ * upload.)
+ */
+ if (pdev->rx_ind_msdu_byte_idx < num_msdu_bytes) {
+ if (cdf_unlikely
+ (HTT_T2H_MSG_TYPE_RX_FRAG_IND == msg_type))
+ byte_offset =
+ HTT_ENDIAN_BYTE_IDX_SWAP
+ (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET);
+ else
+ byte_offset =
+ HTT_ENDIAN_BYTE_IDX_SWAP
+ (HTT_RX_IND_FW_RX_DESC_BYTE_OFFSET +
+ pdev->rx_ind_msdu_byte_idx);
+
+ *((uint8_t *) &rx_desc->fw_desc.u.val) =
+ rx_ind_data[byte_offset];
+ /*
+ * The target is expected to only provide the basic
+ * per-MSDU rx descriptors. Just to be sure,
+ * verify that the target has not attached
+ * extension data (e.g. LRO flow ID).
+ */
+ /*
+ * The assertion below currently doesn't work for
+ * RX_FRAG_IND messages, since their format differs
+ * from the RX_IND format (no FW rx PPDU desc in
+ * the current RX_FRAG_IND message).
+ * If the RX_FRAG_IND message format is updated to match
+ * the RX_IND message format, then the following
+ * assertion can be restored.
+ */
+ /* cdf_assert((rx_ind_data[byte_offset] &
+ FW_RX_DESC_EXT_M) == 0); */
+ pdev->rx_ind_msdu_byte_idx += 1;
+ /* or more, if there's ext data */
+ } else {
+ /*
+ * When an oversized AMSDU happened, FW will lost some
+ * of MSDU status - in this case, the FW descriptors
+ * provided will be less than the actual MSDUs
+ * inside this MPDU.
+ * Mark the FW descriptors so that it will still
+ * deliver to upper stack, if no CRC error for the MPDU.
+ *
+ * FIX THIS - the FW descriptors are actually for MSDUs
+ * in the end of this A-MSDU instead of the beginning.
+ */
+ *((uint8_t *) &rx_desc->fw_desc.u.val) = 0;
+ }
+
+ /*
+ * TCP/UDP checksum offload support
+ */
+ htt_set_checksum_result_ll(pdev, msdu, rx_desc);
+
+ msdu_len_invalid = (*(uint32_t *) &rx_desc->attention) &
+ RX_ATTENTION_0_MPDU_LENGTH_ERR_MASK;
+ msdu_chained = (((*(uint32_t *) &rx_desc->frag_info) &
+ RX_FRAG_INFO_0_RING2_MORE_COUNT_MASK) >>
+ RX_FRAG_INFO_0_RING2_MORE_COUNT_LSB);
+ msdu_len =
+ ((*((uint32_t *) &rx_desc->msdu_start)) &
+ RX_MSDU_START_0_MSDU_LENGTH_MASK) >>
+ RX_MSDU_START_0_MSDU_LENGTH_LSB;
+
+ do {
+ if (!msdu_len_invalid && !msdu_chained) {
+#if defined(PEREGRINE_1_0_ZERO_LEN_PHY_ERR_WAR)
+ if (msdu_len > 0x3000)
+ break;
+#endif
+ cdf_nbuf_trim_tail(msdu,
+ HTT_RX_BUF_SIZE -
+ (RX_STD_DESC_SIZE +
+ msdu_len));
+ }
+ } while (0);
+
+ while (msdu_chained--) {
+ cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+ cdf_nbuf_set_pktlen(next, HTT_RX_BUF_SIZE);
+ msdu_len -= HTT_RX_BUF_SIZE;
+ cdf_nbuf_set_next(msdu, next);
+ msdu = next;
+ msdu_chaining = 1;
+
+ if (msdu_chained == 0) {
+ /* Trim the last one to the correct size -
+ * accounting for inconsistent HW lengths
+ * causing length overflows and underflows
+ */
+ if (((unsigned)msdu_len) >
+ ((unsigned)
+ (HTT_RX_BUF_SIZE - RX_STD_DESC_SIZE))) {
+ msdu_len =
+ (HTT_RX_BUF_SIZE -
+ RX_STD_DESC_SIZE);
+ }
+
+ cdf_nbuf_trim_tail(next,
+ HTT_RX_BUF_SIZE -
+ (RX_STD_DESC_SIZE +
+ msdu_len));
+ }
+ }
+
+ last_msdu =
+ ((*(((uint32_t *) &rx_desc->msdu_end) + 4)) &
+ RX_MSDU_END_4_LAST_MSDU_MASK) >>
+ RX_MSDU_END_4_LAST_MSDU_LSB;
+
+ if (last_msdu) {
+ cdf_nbuf_set_next(msdu, NULL);
+ break;
+ } else {
+ cdf_nbuf_t next = htt_rx_netbuf_pop(pdev);
+ cdf_nbuf_set_next(msdu, next);
+ msdu = next;
+ }
+ }
+ *tail_msdu = msdu;
+
+ /*
+ * Don't refill the ring yet.
+ * First, the elements popped here are still in use - it is
+ * not safe to overwrite them until the matching call to
+ * mpdu_desc_list_next.
+ * Second, for efficiency it is preferable to refill the rx ring
+ * with 1 PPDU's worth of rx buffers (something like 32 x 3 buffers),
+ * rather than one MPDU's worth of rx buffers (sth like 3 buffers).
+ * Consequently, we'll rely on the txrx SW to tell us when it is done
+ * pulling all the PPDU's rx buffers out of the rx ring, and then
+ * refill it just once.
+ */
+ return msdu_chaining;
+}
+
+int
+htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
+ cdf_nbuf_t offload_deliver_msg,
+ int *vdev_id,
+ int *peer_id,
+ int *tid,
+ uint8_t *fw_desc,
+ cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+{
+ cdf_nbuf_t buf;
+ uint32_t *msdu_hdr, msdu_len;
+
+ *head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
+ /* Fake read mpdu_desc to keep desc ptr in sync */
+ htt_rx_mpdu_desc_list_next(pdev, NULL);
+ cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
+#endif
+ msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+
+ /* First dword */
+ msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
+ *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
+
+ /* Second dword */
+ msdu_hdr++;
+ *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
+ *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
+ *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
+
+ cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+ cdf_nbuf_set_pktlen(buf, msdu_len);
+ return 0;
+}
+
+int
+htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
+ uint32_t *msg_word,
+ int msdu_iter,
+ int *vdev_id,
+ int *peer_id,
+ int *tid,
+ uint8_t *fw_desc,
+ cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf)
+{
+ cdf_nbuf_t buf;
+ uint32_t *msdu_hdr, msdu_len;
+ uint32_t *curr_msdu;
+ uint32_t paddr;
+
+ curr_msdu =
+ msg_word + (msdu_iter * HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS);
+ paddr = HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*curr_msdu);
+ *head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
+
+ if (cdf_unlikely(NULL == buf)) {
+ cdf_print("%s: netbuf pop failed!\n", __func__);
+ return 0;
+ }
+ cdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, buf, CDF_DMA_FROM_DEVICE);
+#endif
+ msdu_hdr = (uint32_t *) cdf_nbuf_data(buf);
+
+ /* First dword */
+ msdu_len = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_LEN_GET(*msdu_hdr);
+ *peer_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_PEER_ID_GET(*msdu_hdr);
+
+ /* Second dword */
+ msdu_hdr++;
+ *vdev_id = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_VDEV_ID_GET(*msdu_hdr);
+ *tid = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_TID_GET(*msdu_hdr);
+ *fw_desc = HTT_RX_OFFLOAD_DELIVER_IND_MSDU_DESC_GET(*msdu_hdr);
+
+ cdf_nbuf_pull_head(buf, HTT_RX_OFFLOAD_DELIVER_IND_MSDU_HDR_BYTES);
+ cdf_nbuf_set_pktlen(buf, msdu_len);
+ return 0;
+}
+
+extern void
+dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
+
+#ifdef RX_HASH_DEBUG
+#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) HTT_ASSERT_ALWAYS(msdu_count)
+#else
+#define HTT_RX_CHECK_MSDU_COUNT(msdu_count) /* no-op */
+#endif
+
+/* Return values: 1 - success, 0 - failure */
+int
+htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu)
+{
+ cdf_nbuf_t msdu, next, prev = NULL;
+ uint8_t *rx_ind_data;
+ uint32_t *msg_word;
+ unsigned int msdu_count = 0;
+ uint8_t offload_ind;
+ struct htt_host_rx_desc_base *rx_desc;
+
+ HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
+
+ rx_ind_data = cdf_nbuf_data(rx_ind_msg);
+ msg_word = (uint32_t *) rx_ind_data;
+
+ offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
+
+ /* Get the total number of MSDUs */
+ msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
+ HTT_RX_CHECK_MSDU_COUNT(msdu_count);
+
+ msg_word =
+ (uint32_t *) (rx_ind_data + HTT_RX_IN_ORD_PADDR_IND_HDR_BYTES);
+ if (offload_ind) {
+ ol_rx_offload_paddr_deliver_ind_handler(pdev, msdu_count,
+ msg_word);
+ *head_msdu = *tail_msdu = NULL;
+ return 0;
+ }
+
+ (*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(
+ pdev,
+ HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
+
+ if (cdf_unlikely(NULL == msdu)) {
+ cdf_print("%s: netbuf pop failed!\n", __func__);
+ *tail_msdu = NULL;
+ return 0;
+ }
+
+ while (msdu_count > 0) {
+
+ /*
+ * Set the netbuf length to be the entire buffer length
+ * initially, so the unmap will unmap the entire buffer.
+ */
+ cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+#ifdef DEBUG_DMA_DONE
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_BIDIRECTIONAL);
+#else
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+#endif
+
+ /* cache consistency has been taken care of by cdf_nbuf_unmap */
+ rx_desc = htt_rx_desc(msdu);
+
+ htt_rx_extract_lro_info(msdu, rx_desc);
+
+ /*
+ * Make the netbuf's data pointer point to the payload rather
+ * than the descriptor.
+ */
+ cdf_nbuf_pull_head(msdu, HTT_RX_STD_DESC_RESERVATION);
+#if HTT_PADDR64
+#define NEXT_FIELD_OFFSET_IN32 2
+#else /* ! HTT_PADDR64 */
+#define NEXT_FIELD_OFFSET_IN32 1
+#endif /* HTT_PADDR64 */
+#
+ cdf_nbuf_trim_tail(msdu,
+ HTT_RX_BUF_SIZE -
+ (RX_STD_DESC_SIZE +
+ HTT_RX_IN_ORD_PADDR_IND_MSDU_LEN_GET(
+ *(msg_word + NEXT_FIELD_OFFSET_IN32))));
+#if defined(HELIUMPLUS_DEBUG)
+ dump_pkt(msdu, 0, 64);
+#endif
+ *((uint8_t *) &rx_desc->fw_desc.u.val) =
+ HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word + NEXT_FIELD_OFFSET_IN32));
+#undef NEXT_FIELD_OFFSET_IN32
+
+ msdu_count--;
+
+ if (cdf_unlikely((*((u_int8_t *) &rx_desc->fw_desc.u.val)) &
+ FW_RX_DESC_MIC_ERR_M)) {
+ u_int8_t tid =
+ HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(
+ *(u_int32_t *)rx_ind_data);
+ u_int16_t peer_id =
+ HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(
+ *(u_int32_t *)rx_ind_data);
+ ol_rx_mic_error_handler(pdev->txrx_pdev, tid, peer_id,
+ rx_desc, msdu);
+
+ htt_rx_desc_frame_free(pdev, msdu);
+ /* if this is the last msdu */
+ if (!msdu_count) {
+ /* if this is the only msdu */
+ if (!prev) {
+ *head_msdu = *tail_msdu = NULL;
+ return 0;
+ } else {
+ *tail_msdu = prev;
+ cdf_nbuf_set_next(prev, NULL);
+ return 1;
+ }
+ } else { /* if this is not the last msdu */
+ /* get the next msdu */
+ msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
+ next = htt_rx_in_order_netbuf_pop(
+ pdev,
+ HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(
+ *msg_word));
+ if (cdf_unlikely(NULL == next)) {
+ cdf_print("%s: netbuf pop failed!\n",
+ __func__);
+ *tail_msdu = NULL;
+ return 0;
+ }
+
+ /* if this is not the first msdu, update the
+ * next pointer of the preceding msdu
+ */
+ if (prev) {
+ cdf_nbuf_set_next(prev, next);
+ } else {
+ /* if this is the first msdu, update the
+ * head pointer
+ */
+ *head_msdu = next;
+ }
+ msdu = next;
+ continue;
+ }
+ }
+
+ /* Update checksum result */
+ htt_set_checksum_result_ll(pdev, msdu, rx_desc);
+
+ /* check if this is the last msdu */
+ if (msdu_count) {
+ msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
+ next = htt_rx_in_order_netbuf_pop(
+ pdev,
+ HTT_RX_IN_ORD_PADDR_IND_PADDR_GET(*msg_word));
+ if (cdf_unlikely(NULL == next)) {
+ cdf_print("%s: netbuf pop failed!\n",
+ __func__);
+ *tail_msdu = NULL;
+ return 0;
+ }
+ cdf_nbuf_set_next(msdu, next);
+ prev = msdu;
+ msdu = next;
+ } else {
+ *tail_msdu = msdu;
+ cdf_nbuf_set_next(msdu, NULL);
+ }
+ }
+
+ return 1;
+}
+
+/* Util fake function that has same prototype as cdf_nbuf_clone that just
+ * retures the same nbuf
+ */
+cdf_nbuf_t htt_rx_cdf_noclone_buf(cdf_nbuf_t buf)
+{
+ return buf;
+}
+
+/* FIXME: This is a HW definition not provded by HW, where does it go ? */
+enum {
+ HW_RX_DECAP_FORMAT_RAW = 0,
+ HW_RX_DECAP_FORMAT_NWIFI,
+ HW_RX_DECAP_FORMAT_8023,
+ HW_RX_DECAP_FORMAT_ETH2,
+};
+
+#define HTT_FCS_LEN (4)
+
+static void
+htt_rx_parse_ppdu_start_status(struct htt_host_rx_desc_base *rx_desc,
+ struct ieee80211_rx_status *rs)
+{
+
+ struct rx_ppdu_start *ppdu_start = &rx_desc->ppdu_start;
+
+ /* RSSI */
+ rs->rs_rssi = ppdu_start->rssi_comb;
+
+ /* PHY rate */
+ /* rs_ratephy coding
+ [b3 - b0]
+ 0 -> OFDM
+ 1 -> CCK
+ 2 -> HT
+ 3 -> VHT
+ OFDM / CCK
+ [b7 - b4 ] => LSIG rate
+ [b23 - b8 ] => service field
+ (b'12 static/dynamic,
+ b'14..b'13 BW for VHT)
+ [b31 - b24 ] => Reserved
+ HT / VHT
+ [b15 - b4 ] => SIG A_2 12 LSBs
+ [b31 - b16] => SIG A_1 16 LSBs
+
+ */
+ if (ppdu_start->preamble_type == 0x4) {
+ rs->rs_ratephy = ppdu_start->l_sig_rate_select;
+ rs->rs_ratephy |= ppdu_start->l_sig_rate << 4;
+ rs->rs_ratephy |= ppdu_start->service << 8;
+ } else {
+ rs->rs_ratephy = (ppdu_start->preamble_type & 0x4) ? 3 : 2;
+#ifdef HELIUMPLUS
+ rs->rs_ratephy |=
+ (ppdu_start->ht_sig_vht_sig_ah_sig_a_2 & 0xFFF) << 4;
+ rs->rs_ratephy |=
+ (ppdu_start->ht_sig_vht_sig_ah_sig_a_1 & 0xFFFF) << 16;
+#else
+ rs->rs_ratephy |= (ppdu_start->ht_sig_vht_sig_a_2 & 0xFFF) << 4;
+ rs->rs_ratephy |=
+ (ppdu_start->ht_sig_vht_sig_a_1 & 0xFFFF) << 16;
+#endif
+ }
+
+ return;
+}
+
+/* This function is used by montior mode code to restitch an MSDU list
+ * corresponding to an MPDU back into an MPDU by linking up the skbs.
+ */
+cdf_nbuf_t
+htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
+ cdf_nbuf_t head_msdu,
+ struct ieee80211_rx_status *rx_status,
+ unsigned clone_not_reqd)
+{
+
+ cdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list_cloned;
+ cdf_nbuf_t (*clone_nbuf_fn)(cdf_nbuf_t buf);
+ unsigned decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
+ mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
+ is_amsdu, is_first_frag, amsdu_pad, msdu_len;
+ struct htt_host_rx_desc_base *rx_desc;
+ char *hdr_desc;
+ unsigned char *dest;
+ struct ieee80211_frame *wh;
+ struct ieee80211_qoscntl *qos;
+
+ /* If this packet does not go up the normal stack path we dont need to
+ * waste cycles cloning the packets
+ */
+ clone_nbuf_fn =
+ clone_not_reqd ? htt_rx_cdf_noclone_buf : cdf_nbuf_clone;
+
+ /* The nbuf has been pulled just beyond the status and points to the
+ * payload
+ */
+ msdu_orig = head_msdu;
+ rx_desc = htt_rx_desc(msdu_orig);
+
+ /* Fill out the rx_status from the PPDU start and end fields */
+ if (rx_desc->attention.first_mpdu) {
+ htt_rx_parse_ppdu_start_status(rx_desc, rx_status);
+
+ /* The timestamp is no longer valid - It will be valid only for
+ * the last MPDU
+ */
+ rx_status->rs_tstamp.tsf = ~0;
+ }
+
+ decap_format =
+ GET_FIELD(&rx_desc->msdu_start, RX_MSDU_START_2_DECAP_FORMAT);
+
+ head_frag_list_cloned = NULL;
+
+ /* Easy case - The MSDU status indicates that this is a non-decapped
+ * packet in RAW mode.
+ * return
+ */
+ if (decap_format == HW_RX_DECAP_FORMAT_RAW) {
+ /* Note that this path might suffer from headroom unavailabilty,
+ * but the RX status is usually enough
+ */
+ mpdu_buf = clone_nbuf_fn(head_msdu);
+
+ prev_buf = mpdu_buf;
+
+ frag_list_sum_len = 0;
+ is_first_frag = 1;
+ msdu_len = cdf_nbuf_len(mpdu_buf);
+
+ /* Drop the zero-length msdu */
+ if (!msdu_len)
+ goto mpdu_stitch_fail;
+
+ msdu_orig = cdf_nbuf_next(head_msdu);
+
+ while (msdu_orig) {
+
+ /* TODO: intra AMSDU padding - do we need it ??? */
+ msdu = clone_nbuf_fn(msdu_orig);
+ if (!msdu)
+ goto mpdu_stitch_fail;
+
+ if (is_first_frag) {
+ is_first_frag = 0;
+ head_frag_list_cloned = msdu;
+ }
+
+ msdu_len = cdf_nbuf_len(msdu);
+ /* Drop the zero-length msdu */
+ if (!msdu_len)
+ goto mpdu_stitch_fail;
+
+ frag_list_sum_len += msdu_len;
+
+ /* Maintain the linking of the cloned MSDUS */
+ cdf_nbuf_set_next_ext(prev_buf, msdu);
+
+ /* Move to the next */
+ prev_buf = msdu;
+ msdu_orig = cdf_nbuf_next(msdu_orig);
+ }
+
+ /* The last msdu length need be larger than HTT_FCS_LEN */
+ if (msdu_len < HTT_FCS_LEN)
+ goto mpdu_stitch_fail;
+
+ cdf_nbuf_trim_tail(prev_buf, HTT_FCS_LEN);
+
+ /* If there were more fragments to this RAW frame */
+ if (head_frag_list_cloned) {
+ cdf_nbuf_append_ext_list(mpdu_buf,
+ head_frag_list_cloned,
+ frag_list_sum_len);
+ }
+
+ goto mpdu_stitch_done;
+ }
+
+ /* Decap mode:
+ * Calculate the amount of header in decapped packet to knock off based
+ * on the decap type and the corresponding number of raw bytes to copy
+ * status header
+ */
+
+ hdr_desc = &rx_desc->rx_hdr_status[0];
+
+ /* Base size */
+ wifi_hdr_len = sizeof(struct ieee80211_frame);
+ wh = (struct ieee80211_frame *)hdr_desc;
+
+ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ if (dir == IEEE80211_FC1_DIR_DSTODS)
+ wifi_hdr_len += 6;
+
+ is_amsdu = 0;
+ if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+ qos = (struct ieee80211_qoscntl *)
+ (hdr_desc + wifi_hdr_len);
+ wifi_hdr_len += 2;
+
+ is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
+ }
+
+ /* TODO: Any security headers associated with MPDU */
+ sec_hdr_len = 0;
+
+ /* MSDU related stuff LLC - AMSDU subframe header etc */
+ msdu_llc_len = is_amsdu ? (14 + 8) : 8;
+
+ mpdu_buf_len = wifi_hdr_len + sec_hdr_len + msdu_llc_len;
+
+ /* "Decap" header to remove from MSDU buffer */
+ decap_hdr_pull_bytes = 14;
+
+ /* Allocate a new nbuf for holding the 802.11 header retrieved from the
+ * status of the now decapped first msdu. Leave enough headroom for
+ * accomodating any radio-tap /prism like PHY header
+ */
+#define HTT_MAX_MONITOR_HEADER (512)
+ mpdu_buf = cdf_nbuf_alloc(pdev->osdev,
+ HTT_MAX_MONITOR_HEADER + mpdu_buf_len,
+ HTT_MAX_MONITOR_HEADER, 4, false);
+
+ if (!mpdu_buf)
+ goto mpdu_stitch_fail;
+
+ /* Copy the MPDU related header and enc headers into the first buffer
+ * - Note that there can be a 2 byte pad between heaader and enc header
+ */
+
+ prev_buf = mpdu_buf;
+ dest = cdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
+ if (!dest)
+ goto mpdu_stitch_fail;
+ cdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
+ hdr_desc += wifi_hdr_len;
+
+ /* NOTE - This padding is present only in the RAW header status - not
+ * when the MSDU data payload is in RAW format.
+ */
+ /* Skip the "IV pad" */
+ if (wifi_hdr_len & 0x3)
+ hdr_desc += 2;
+
+ /* The first LLC len is copied into the MPDU buffer */
+ frag_list_sum_len = 0;
+ frag_list_sum_len -= msdu_llc_len;
+
+ msdu_orig = head_msdu;
+ is_first_frag = 1;
+ amsdu_pad = 0;
+
+ while (msdu_orig) {
+
+ /* TODO: intra AMSDU padding - do we need it ??? */
+
+ msdu = clone_nbuf_fn(msdu_orig);
+ if (!msdu)
+ goto mpdu_stitch_fail;
+
+ if (is_first_frag) {
+ is_first_frag = 0;
+ head_frag_list_cloned = msdu;
+ } else {
+
+ /* Maintain the linking of the cloned MSDUS */
+ cdf_nbuf_set_next_ext(prev_buf, msdu);
+
+ /* Reload the hdr ptr only on non-first MSDUs */
+ rx_desc = htt_rx_desc(msdu_orig);
+ hdr_desc = &rx_desc->rx_hdr_status[0];
+
+ }
+
+ /* Copy this buffers MSDU related status into the prev buffer */
+ dest = cdf_nbuf_put_tail(prev_buf, msdu_llc_len + amsdu_pad);
+ dest += amsdu_pad;
+ cdf_mem_copy(dest, hdr_desc, msdu_llc_len);
+
+ /* Push the MSDU buffer beyond the decap header */
+ cdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
+ frag_list_sum_len +=
+ msdu_llc_len + cdf_nbuf_len(msdu) + amsdu_pad;
+
+ /* Set up intra-AMSDU pad to be added to start of next buffer -
+ * AMSDU pad is 4 byte pad on AMSDU subframe */
+ amsdu_pad = (msdu_llc_len + cdf_nbuf_len(msdu)) & 0x3;
+ amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
+
+ /* TODO FIXME How do we handle MSDUs that have fraglist - Should
+ * probably iterate all the frags cloning them along the way and
+ * and also updating the prev_buf pointer
+ */
+
+ /* Move to the next */
+ prev_buf = msdu;
+ msdu_orig = cdf_nbuf_next(msdu_orig);
+
+ }
+
+ /* TODO: Convert this to suitable cdf routines */
+ cdf_nbuf_append_ext_list(mpdu_buf, head_frag_list_cloned,
+ frag_list_sum_len);
+
+mpdu_stitch_done:
+ /* Check if this buffer contains the PPDU end status for TSF */
+ if (rx_desc->attention.last_mpdu)
+#ifdef HELIUMPLUS
+ rx_status->rs_tstamp.tsf =
+ rx_desc->ppdu_end.rx_pkt_end.phy_timestamp_1_lower_32;
+#else
+ rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
+#endif
+ /* All the nbufs have been linked into the ext list and
+ then unlink the nbuf list */
+ if (clone_not_reqd) {
+ msdu = head_msdu;
+ while (msdu) {
+ msdu_orig = msdu;
+ msdu = cdf_nbuf_next(msdu);
+ cdf_nbuf_set_next(msdu_orig, NULL);
+ }
+ }
+
+ return mpdu_buf;
+
+mpdu_stitch_fail:
+ /* Free these alloced buffers and the orig buffers in non-clone case */
+ if (!clone_not_reqd) {
+ /* Free the head buffer */
+ if (mpdu_buf)
+ cdf_nbuf_free(mpdu_buf);
+
+ /* Free the partial list */
+ while (head_frag_list_cloned) {
+ msdu = head_frag_list_cloned;
+ head_frag_list_cloned =
+ cdf_nbuf_next_ext(head_frag_list_cloned);
+ cdf_nbuf_free(msdu);
+ }
+ } else {
+ /* Free the alloced head buffer */
+ if (decap_format != HW_RX_DECAP_FORMAT_RAW)
+ if (mpdu_buf)
+ cdf_nbuf_free(mpdu_buf);
+
+ /* Free the orig buffers */
+ msdu = head_msdu;
+ while (msdu) {
+ msdu_orig = msdu;
+ msdu = cdf_nbuf_next(msdu);
+ cdf_nbuf_free(msdu_orig);
+ }
+ }
+
+ return NULL;
+}
+
+int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ /*
+ * Currently the RSSI is provided only as a field in the
+ * HTT_T2H_RX_IND message, rather than in each rx descriptor.
+ */
+ return HTT_RSSI_INVALID;
+}
+
+/*
+ * htt_rx_amsdu_pop -
+ * global function pointer that is programmed during attach to point
+ * to either htt_rx_amsdu_pop_ll or htt_rx_amsdu_rx_in_order_pop_ll.
+ */
+int (*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+/*
+ * htt_rx_frag_pop -
+ * global function pointer that is programmed during attach to point
+ * to either htt_rx_amsdu_pop_ll
+ */
+int (*htt_rx_frag_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+int
+(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t offload_deliver_msg,
+ int *vdev_id,
+ int *peer_id,
+ int *tid,
+ uint8_t *fw_desc,
+ cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+
+void * (*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg);
+
+bool (*htt_rx_mpdu_desc_retry)(
+ htt_pdev_handle pdev, void *mpdu_desc);
+
+uint16_t (*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
+
+void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
+ void *mpdu_desc,
+ union htt_rx_pn_t *pn, int pn_len_bits);
+
+uint8_t (*htt_rx_mpdu_desc_tid)(
+ htt_pdev_handle pdev, void *mpdu_desc);
+
+bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev, void *msdu_desc);
+
+bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev, void *msdu_desc);
+
+int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev, void *msdu_desc);
+
+bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
+
+int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
+
+void * (*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+
+bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
+
+bool (*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
+ void *mpdu_desc, uint8_t *key_id);
+
+void *htt_rx_mpdu_desc_list_next_ll(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ int idx = pdev->rx_ring.sw_rd_idx.msdu_desc;
+ cdf_nbuf_t netbuf = pdev->rx_ring.buf.netbufs_ring[idx];
+ pdev->rx_ring.sw_rd_idx.msdu_desc = pdev->rx_ring.sw_rd_idx.msdu_payld;
+ return (void *)htt_rx_desc(netbuf);
+}
+
+bool (*htt_rx_msdu_chan_info_present)(
+ htt_pdev_handle pdev,
+ void *mpdu_desc);
+
+bool (*htt_rx_msdu_center_freq)(
+ htt_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ void *mpdu_desc,
+ uint16_t *primary_chan_center_freq_mhz,
+ uint16_t *contig_chan1_center_freq_mhz,
+ uint16_t *contig_chan2_center_freq_mhz,
+ uint8_t *phy_mode);
+
+void *htt_rx_in_ord_mpdu_desc_list_next_ll(htt_pdev_handle pdev,
+ cdf_nbuf_t netbuf)
+{
+ return (void *)htt_rx_desc(netbuf);
+}
+
+void *htt_rx_msdu_desc_retrieve_ll(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+ return htt_rx_desc(msdu);
+}
+
+bool htt_rx_mpdu_is_encrypted_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ struct htt_host_rx_desc_base *rx_desc =
+ (struct htt_host_rx_desc_base *)mpdu_desc;
+
+ return (((*((uint32_t *) &rx_desc->mpdu_start)) &
+ RX_MPDU_START_0_ENCRYPTED_MASK) >>
+ RX_MPDU_START_0_ENCRYPTED_LSB) ? true : false;
+}
+
+bool htt_rx_msdu_chan_info_present_ll(htt_pdev_handle pdev, void *mpdu_desc)
+{
+ return false;
+}
+
+bool htt_rx_msdu_center_freq_ll(htt_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ void *mpdu_desc,
+ uint16_t *primary_chan_center_freq_mhz,
+ uint16_t *contig_chan1_center_freq_mhz,
+ uint16_t *contig_chan2_center_freq_mhz,
+ uint8_t *phy_mode)
+{
+ if (primary_chan_center_freq_mhz)
+ *primary_chan_center_freq_mhz = 0;
+ if (contig_chan1_center_freq_mhz)
+ *contig_chan1_center_freq_mhz = 0;
+ if (contig_chan2_center_freq_mhz)
+ *contig_chan2_center_freq_mhz = 0;
+ if (phy_mode)
+ *phy_mode = 0;
+ return false;
+}
+
+bool
+htt_rx_msdu_desc_key_id_ll(htt_pdev_handle pdev, void *mpdu_desc,
+ uint8_t *key_id)
+{
+ struct htt_host_rx_desc_base *rx_desc = (struct htt_host_rx_desc_base *)
+ mpdu_desc;
+
+ if (!htt_rx_msdu_first_msdu_flag_ll(pdev, mpdu_desc))
+ return false;
+
+ *key_id = ((*(((uint32_t *) &rx_desc->msdu_end) + 1)) &
+ (RX_MSDU_END_1_KEY_ID_OCT_MASK >>
+ RX_MSDU_END_1_KEY_ID_OCT_LSB));
+
+ return true;
+}
+
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+{
+ cdf_nbuf_free(msdu);
+}
+
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu)
+{
+ /*
+ * The rx descriptor is in the same buffer as the rx MSDU payload,
+ * and does not need to be freed separately.
+ */
+}
+
+void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev)
+{
+ if (cdf_atomic_dec_and_test(&pdev->rx_ring.refill_ref_cnt)) {
+ int num_to_fill;
+ num_to_fill = pdev->rx_ring.fill_level -
+ pdev->rx_ring.fill_cnt;
+
+ htt_rx_ring_fill_n(pdev,
+ num_to_fill /* okay if <= 0 */);
+ }
+ cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
+}
+
+#define AR600P_ASSEMBLE_HW_RATECODE(_rate, _nss, _pream) \
+ (((_pream) << 6) | ((_nss) << 4) | (_rate))
+
+enum AR600P_HW_RATECODE_PREAM_TYPE {
+ AR600P_HW_RATECODE_PREAM_OFDM,
+ AR600P_HW_RATECODE_PREAM_CCK,
+ AR600P_HW_RATECODE_PREAM_HT,
+ AR600P_HW_RATECODE_PREAM_VHT,
+};
+
+/*--- RX In Order Hash Code --------------------------------------------------*/
+
+/* Initializes the circular linked list */
+static inline void htt_list_init(struct htt_list_node *head)
+{
+ head->prev = head;
+ head->next = head;
+}
+
+/* Adds entry to the end of the linked list */
+static inline void htt_list_add_tail(struct htt_list_node *head,
+ struct htt_list_node *node)
+{
+ head->prev->next = node;
+ node->prev = head->prev;
+ node->next = head;
+ head->prev = node;
+}
+
+/* Removes the entry corresponding to the input node from the linked list */
+static inline void htt_list_remove(struct htt_list_node *node)
+{
+ node->prev->next = node->next;
+ node->next->prev = node->prev;
+}
+
+/* Helper macro to iterate through the linked list */
+#define HTT_LIST_ITER_FWD(iter, head) for (iter = (head)->next; \
+ (iter) != (head); \
+ (iter) = (iter)->next) \
+
+#ifdef RX_HASH_DEBUG
+/* Hash cookie related macros */
+#define HTT_RX_HASH_COOKIE 0xDEED
+
+#define HTT_RX_HASH_COOKIE_SET(hash_element) \
+ ((hash_element)->cookie = HTT_RX_HASH_COOKIE)
+
+#define HTT_RX_HASH_COOKIE_CHECK(hash_element) \
+ HTT_ASSERT_ALWAYS((hash_element)->cookie == HTT_RX_HASH_COOKIE)
+
+/* Hash count related macros */
+#define HTT_RX_HASH_COUNT_INCR(hash_bucket) \
+ ((hash_bucket).count++)
+
+#define HTT_RX_HASH_COUNT_DECR(hash_bucket) \
+ ((hash_bucket).count--)
+
+#define HTT_RX_HASH_COUNT_RESET(hash_bucket) ((hash_bucket).count = 0)
+
+#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) \
+ RX_HASH_LOG(cdf_print(" count %d\n", (hash_bucket).count))
+#else /* RX_HASH_DEBUG */
+/* Hash cookie related macros */
+#define HTT_RX_HASH_COOKIE_SET(hash_element) /* no-op */
+#define HTT_RX_HASH_COOKIE_CHECK(hash_element) /* no-op */
+/* Hash count related macros */
+#define HTT_RX_HASH_COUNT_INCR(hash_bucket) /* no-op */
+#define HTT_RX_HASH_COUNT_DECR(hash_bucket) /* no-op */
+#define HTT_RX_HASH_COUNT_PRINT(hash_bucket) /* no-op */
+#define HTT_RX_HASH_COUNT_RESET(hash_bucket) /* no-op */
+#endif /* RX_HASH_DEBUG */
+
+/* Inserts the given "physical address - network buffer" pair into the
+ hash table for the given pdev. This function will do the following:
+ 1. Determine which bucket to insert the pair into
+ 2. First try to allocate the hash entry for this pair from the pre-allocated
+ entries list
+ 3. If there are no more entries in the pre-allocated entries list, allocate
+ the hash entry from the hash memory pool
+ Note: this function is not thread-safe
+ Returns 0 - success, 1 - failure */
+int
+htt_rx_hash_list_insert(struct htt_pdev_t *pdev, uint32_t paddr,
+ cdf_nbuf_t netbuf)
+{
+ int i;
+ struct htt_rx_hash_entry *hash_element = NULL;
+
+ i = RX_HASH_FUNCTION(paddr);
+
+ /* Check if there are any entries in the pre-allocated free list */
+ if (pdev->rx_ring.hash_table[i].freepool.next !=
+ &pdev->rx_ring.hash_table[i].freepool) {
+
+ hash_element =
+ (struct htt_rx_hash_entry *)(
+ (char *)
+ pdev->rx_ring.hash_table[i].freepool.next -
+ pdev->rx_ring.listnode_offset);
+ if (cdf_unlikely(NULL == hash_element)) {
+ HTT_ASSERT_ALWAYS(0);
+ return 1;
+ }
+
+ htt_list_remove(pdev->rx_ring.hash_table[i].freepool.next);
+ } else {
+ hash_element = cdf_mem_malloc(sizeof(struct htt_rx_hash_entry));
+ if (cdf_unlikely(NULL == hash_element)) {
+ HTT_ASSERT_ALWAYS(0);
+ return 1;
+ }
+ hash_element->fromlist = 0;
+ }
+
+ hash_element->netbuf = netbuf;
+ hash_element->paddr = paddr;
+ HTT_RX_HASH_COOKIE_SET(hash_element);
+
+ htt_list_add_tail(&pdev->rx_ring.hash_table[i].listhead,
+ &hash_element->listnode);
+
+ RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x netbuf %p bucket %d\n",
+ __func__, paddr, netbuf, (int)i));
+
+ HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
+ HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
+
+ return 0;
+}
+
+/* Given a physical address this function will find the corresponding network
+ buffer from the hash table.
+ Note: this function is not thread-safe */
+cdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev, uint32_t paddr)
+{
+ uint32_t i;
+ struct htt_list_node *list_iter = NULL;
+ cdf_nbuf_t netbuf = NULL;
+ struct htt_rx_hash_entry *hash_entry;
+
+ i = RX_HASH_FUNCTION(paddr);
+
+ HTT_LIST_ITER_FWD(list_iter, &pdev->rx_ring.hash_table[i].listhead) {
+ hash_entry = (struct htt_rx_hash_entry *)
+ ((char *)list_iter -
+ pdev->rx_ring.listnode_offset);
+
+ HTT_RX_HASH_COOKIE_CHECK(hash_entry);
+
+ if (hash_entry->paddr == paddr) {
+ /* Found the entry corresponding to paddr */
+ netbuf = hash_entry->netbuf;
+ htt_list_remove(&hash_entry->listnode);
+ HTT_RX_HASH_COUNT_DECR(pdev->rx_ring.hash_table[i]);
+ /* if the rx entry is from the pre-allocated list,
+ return it */
+ if (hash_entry->fromlist)
+ htt_list_add_tail(&pdev->rx_ring.hash_table[i].
+ freepool,
+ &hash_entry->listnode);
+ else
+ cdf_mem_free(hash_entry);
+
+ htt_rx_dbg_rxbuf_reset(pdev, netbuf);
+ break;
+ }
+ }
+
+ RX_HASH_LOG(cdf_print("rx hash: %s: paddr 0x%x, netbuf %p, bucket %d\n",
+ __func__, paddr, netbuf, (int)i));
+ HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
+
+ if (netbuf == NULL) {
+ cdf_print("rx hash: %s: no entry found for 0x%x!!!\n",
+ __func__, paddr);
+ HTT_ASSERT_ALWAYS(0);
+ }
+
+ return netbuf;
+}
+
+/* Initialization function of the rx buffer hash table. This function will
+ allocate a hash table of a certain pre-determined size and initialize all
+ the elements */
+int htt_rx_hash_init(struct htt_pdev_t *pdev)
+{
+ int i, j;
+
+ HTT_ASSERT2(CDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
+
+ pdev->rx_ring.hash_table =
+ cdf_mem_malloc(RX_NUM_HASH_BUCKETS *
+ sizeof(struct htt_rx_hash_bucket));
+
+ if (NULL == pdev->rx_ring.hash_table) {
+ cdf_print("rx hash table allocation failed!\n");
+ return 1;
+ }
+
+ for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+ HTT_RX_HASH_COUNT_RESET(pdev->rx_ring.hash_table[i]);
+
+ /* initialize the hash table buckets */
+ htt_list_init(&pdev->rx_ring.hash_table[i].listhead);
+
+ /* initialize the hash table free pool per bucket */
+ htt_list_init(&pdev->rx_ring.hash_table[i].freepool);
+
+ /* pre-allocate a pool of entries for this bucket */
+ pdev->rx_ring.hash_table[i].entries =
+ cdf_mem_malloc(RX_ENTRIES_SIZE *
+ sizeof(struct htt_rx_hash_entry));
+
+ if (NULL == pdev->rx_ring.hash_table[i].entries) {
+ cdf_print("rx hash bucket %d entries alloc failed\n",
+ (int)i);
+ while (i) {
+ i--;
+ cdf_mem_free(pdev->rx_ring.hash_table[i].
+ entries);
+ }
+ cdf_mem_free(pdev->rx_ring.hash_table);
+ pdev->rx_ring.hash_table = NULL;
+ return 1;
+ }
+
+ /* initialize the free list with pre-allocated entries */
+ for (j = 0; j < RX_ENTRIES_SIZE; j++) {
+ pdev->rx_ring.hash_table[i].entries[j].fromlist = 1;
+ htt_list_add_tail(&pdev->rx_ring.hash_table[i].freepool,
+ &pdev->rx_ring.hash_table[i].
+ entries[j].listnode);
+ }
+ }
+
+ pdev->rx_ring.listnode_offset =
+ cdf_offsetof(struct htt_rx_hash_entry, listnode);
+
+ return 0;
+}
+
+void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
+{
+ uint32_t i;
+ struct htt_rx_hash_entry *hash_entry;
+ struct htt_list_node *list_iter = NULL;
+
+ for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+ HTT_LIST_ITER_FWD(list_iter,
+ &pdev->rx_ring.hash_table[i].listhead) {
+ hash_entry =
+ (struct htt_rx_hash_entry *)((char *)list_iter -
+ pdev->rx_ring.
+ listnode_offset);
+ cdf_print("hash_table[%d]: netbuf %p paddr 0x%x\n", i,
+ hash_entry->netbuf, hash_entry->paddr);
+ }
+ }
+}
+
+/*--- RX In Order Hash Code --------------------------------------------------*/
+
+/* move the function to the end of file
+ * to omit ll/hl pre-declaration
+ */
+int htt_rx_attach(struct htt_pdev_t *pdev)
+{
+ cdf_dma_addr_t paddr;
+#if HTT_PADDR64
+ uint32_t ring_elem_size = sizeof(uint64_t);
+#else
+ uint32_t ring_elem_size = sizeof(uint32_t);
+#endif /* HTT_PADDR64 */
+ pdev->rx_ring.size = htt_rx_ring_size(pdev);
+ HTT_ASSERT2(CDF_IS_PWR2(pdev->rx_ring.size));
+ pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
+
+ /*
+ * Set the initial value for the level to which the rx ring
+ * should be filled, based on the max throughput and the worst
+ * likely latency for the host to fill the rx ring.
+ * In theory, this fill level can be dynamically adjusted from
+ * the initial value set here to reflect the actual host latency
+ * rather than a conservative assumption.
+ */
+ pdev->rx_ring.fill_level = htt_rx_ring_fill_level(pdev);
+
+ if (pdev->cfg.is_full_reorder_offload) {
+ if (htt_rx_hash_init(pdev))
+ goto fail1;
+
+ /* allocate the target index */
+ pdev->rx_ring.target_idx.vaddr =
+ cdf_os_mem_alloc_consistent(pdev->osdev,
+ sizeof(uint32_t),
+ &paddr,
+ cdf_get_dma_mem_context(
+ (&pdev->rx_ring.target_idx),
+ memctx));
+
+ if (!pdev->rx_ring.target_idx.vaddr)
+ goto fail1;
+
+ pdev->rx_ring.target_idx.paddr = paddr;
+ *pdev->rx_ring.target_idx.vaddr = 0;
+ } else {
+ pdev->rx_ring.buf.netbufs_ring =
+ cdf_mem_malloc(pdev->rx_ring.size * sizeof(cdf_nbuf_t));
+ if (!pdev->rx_ring.buf.netbufs_ring)
+ goto fail1;
+
+ pdev->rx_ring.sw_rd_idx.msdu_payld = 0;
+ pdev->rx_ring.sw_rd_idx.msdu_desc = 0;
+ }
+
+ pdev->rx_ring.buf.paddrs_ring =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ pdev->rx_ring.size * ring_elem_size,
+ &paddr,
+ cdf_get_dma_mem_context(
+ (&pdev->rx_ring.buf),
+ memctx));
+ if (!pdev->rx_ring.buf.paddrs_ring)
+ goto fail2;
+
+ pdev->rx_ring.base_paddr = paddr;
+ pdev->rx_ring.alloc_idx.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ sizeof(uint32_t),
+ &paddr,
+ cdf_get_dma_mem_context(
+ (&pdev->rx_ring.alloc_idx),
+ memctx));
+
+ if (!pdev->rx_ring.alloc_idx.vaddr)
+ goto fail3;
+
+ pdev->rx_ring.alloc_idx.paddr = paddr;
+ *pdev->rx_ring.alloc_idx.vaddr = 0;
+
+ /*
+ * Initialize the Rx refill reference counter to be one so that
+ * only one thread is allowed to refill the Rx ring.
+ */
+ cdf_atomic_init(&pdev->rx_ring.refill_ref_cnt);
+ cdf_atomic_inc(&pdev->rx_ring.refill_ref_cnt);
+
+ /* Initialize the Rx refill retry timer */
+ cdf_softirq_timer_init(pdev->osdev,
+ &pdev->rx_ring.refill_retry_timer,
+ htt_rx_ring_refill_retry, (void *)pdev,
+ CDF_TIMER_TYPE_SW);
+
+ pdev->rx_ring.fill_cnt = 0;
+#ifdef DEBUG_DMA_DONE
+ pdev->rx_ring.dbg_ring_idx = 0;
+ pdev->rx_ring.dbg_refill_cnt = 0;
+ pdev->rx_ring.dbg_sync_success = 0;
+#endif
+#ifdef HTT_RX_RESTORE
+ pdev->rx_ring.rx_reset = 0;
+ pdev->rx_ring.htt_rx_restore = 0;
+#endif
+ htt_rx_dbg_rxbuf_init(pdev);
+ htt_rx_ring_fill_n(pdev, pdev->rx_ring.fill_level);
+
+ if (pdev->cfg.is_full_reorder_offload) {
+ cdf_print("HTT: full reorder offload enabled\n");
+ htt_rx_amsdu_pop = htt_rx_amsdu_rx_in_order_pop_ll;
+ htt_rx_frag_pop = htt_rx_amsdu_rx_in_order_pop_ll;
+ htt_rx_mpdu_desc_list_next =
+ htt_rx_in_ord_mpdu_desc_list_next_ll;
+ } else {
+ htt_rx_amsdu_pop = htt_rx_amsdu_pop_ll;
+ htt_rx_frag_pop = htt_rx_amsdu_pop_ll;
+ htt_rx_mpdu_desc_list_next = htt_rx_mpdu_desc_list_next_ll;
+ }
+
+ htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
+ htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
+ htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
+ htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
+ htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
+ htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
+ htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
+ htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
+ htt_rx_msdu_is_wlan_mcast = htt_rx_msdu_is_wlan_mcast_ll;
+ htt_rx_msdu_is_frag = htt_rx_msdu_is_frag_ll;
+ htt_rx_msdu_desc_retrieve = htt_rx_msdu_desc_retrieve_ll;
+ htt_rx_mpdu_is_encrypted = htt_rx_mpdu_is_encrypted_ll;
+ htt_rx_msdu_desc_key_id = htt_rx_msdu_desc_key_id_ll;
+ htt_rx_msdu_chan_info_present = htt_rx_msdu_chan_info_present_ll;
+ htt_rx_msdu_center_freq = htt_rx_msdu_center_freq_ll;
+
+ return 0; /* success */
+
+fail3:
+ cdf_os_mem_free_consistent(pdev->osdev,
+ pdev->rx_ring.size * sizeof(uint32_t),
+ pdev->rx_ring.buf.paddrs_ring,
+ pdev->rx_ring.base_paddr,
+ cdf_get_dma_mem_context((&pdev->rx_ring.buf),
+ memctx));
+
+fail2:
+ if (pdev->cfg.is_full_reorder_offload) {
+ cdf_os_mem_free_consistent(pdev->osdev,
+ sizeof(uint32_t),
+ pdev->rx_ring.target_idx.vaddr,
+ pdev->rx_ring.target_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->
+ rx_ring.
+ target_idx),
+ memctx));
+ htt_rx_hash_deinit(pdev);
+ } else {
+ cdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
+ }
+
+fail1:
+ return 1; /* failure */
+}
+
+#ifdef IPA_OFFLOAD
+#ifdef QCA_WIFI_3_0
+/**
+ * htt_rx_ipa_uc_alloc_wdi2_rsc() - Allocate WDI2.0 resources
+ * @pdev: htt context
+ * @rx_ind_ring_elements: rx ring elements
+ *
+ * Return: 0 success
+ */
+int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
+ unsigned int rx_ind_ring_elements)
+{
+ /* Allocate RX2 indication ring */
+ /* RX2 IND ring element
+ * 4bytes: pointer
+ * 2bytes: VDEV ID
+ * 2bytes: length */
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ rx_ind_ring_elements *
+ sizeof(struct ipa_uc_rx_ring_elem_t),
+ &pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx2_ind_ring_base),
+ memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+ cdf_print("%s: RX IND RING alloc fail", __func__);
+ return -ENOBUFS;
+ }
+
+ /* RX indication ring size, by bytes */
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_size =
+ rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
+ cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_size);
+
+ /* Allocate RX process done index */
+ pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ 4,
+ &pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ipa_prc_done_idx),
+ memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+ cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx2_ind_ring_base),
+ memctx));
+ return -ENOBUFS;
+ }
+ cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr, 4);
+ return 0;
+}
+#else
+int htt_rx_ipa_uc_alloc_wdi2_rsc(struct htt_pdev_t *pdev,
+ unsigned int rx_ind_ring_elements)
+{
+ return 0;
+}
+#endif
+
+/**
+ * htt_rx_ipa_uc_attach() - attach htt ipa uc rx resource
+ * @pdev: htt context
+ * @rx_ind_ring_size: rx ring size
+ *
+ * Return: 0 success
+ */
+int htt_rx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int rx_ind_ring_elements)
+{
+ int ret = 0;
+ /* Allocate RX indication ring */
+ /* RX IND ring element
+ * 4bytes: pointer
+ * 2bytes: VDEV ID
+ * 2bytes: length */
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ rx_ind_ring_elements *
+ sizeof(struct ipa_uc_rx_ring_elem_t),
+ &pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ind_ring_base),
+ memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+ cdf_print("%s: RX IND RING alloc fail", __func__);
+ return -ENOBUFS;
+ }
+
+ /* RX indication ring size, by bytes */
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size =
+ rx_ind_ring_elements * sizeof(struct ipa_uc_rx_ring_elem_t);
+ cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size);
+
+ /* Allocate RX process done index */
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ 4,
+ &pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ipa_prc_done_idx),
+ memctx));
+ if (!pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+ cdf_print("%s: RX PROC DONE IND alloc fail", __func__);
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ind_ring_base),
+ memctx));
+ return -ENOBUFS;
+ }
+ cdf_mem_zero(pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr, 4);
+
+ ret = htt_rx_ipa_uc_alloc_wdi2_rsc(pdev, rx_ind_ring_elements);
+ return ret;
+}
+
+#ifdef QCA_WIFI_3_0
+/**
+ * htt_rx_ipa_uc_free_wdi2_rsc() - Free WDI2.0 resources
+ * @pdev: htt context
+ *
+ * Return: None
+ */
+void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
+{
+ if (pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx2_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx2_ind_ring_base),
+ memctx));
+ }
+
+ if (pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ 4,
+ pdev->ipa_uc_rx_rsc.
+ rx_ipa_prc_done_idx.vaddr,
+ pdev->ipa_uc_rx_rsc.rx2_ipa_prc_done_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ipa_prc_done_idx),
+ memctx));
+ }
+}
+#else
+void htt_rx_ipa_uc_free_wdi2_rsc(struct htt_pdev_t *pdev)
+{
+ return;
+}
+#endif
+
+int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ if (pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_size,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ind_ring_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx_ind_ring_base),
+ memctx));
+ }
+
+ if (pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ 4,
+ pdev->ipa_uc_rx_rsc.
+ rx_ipa_prc_done_idx.vaddr,
+ pdev->ipa_uc_rx_rsc.rx_ipa_prc_done_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_rx_rsc.
+ rx2_ipa_prc_done_idx),
+ memctx));
+ }
+
+ htt_rx_ipa_uc_free_wdi2_rsc(pdev);
+ return 0;
+}
+#endif /* IPA_OFFLOAD */
diff --git a/dp/htt/htt_t2h.c b/dp/htt/htt_t2h.c
new file mode 100644
index 000000000000..639c34506c8a
--- /dev/null
+++ b/dp/htt/htt_t2h.c
@@ -0,0 +1,948 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_t2h.c
+ * @brief Provide functions to process target->host HTT messages.
+ * @details
+ * This file contains functions related to target->host HTT messages.
+ * There are two categories of functions:
+ * 1. A function that receives a HTT message from HTC, and dispatches it
+ * based on the HTT message type.
+ * 2. functions that provide the info elements from specific HTT messages.
+ */
+
+#include <htc_api.h> /* HTC_PACKET */
+#include <htt.h> /* HTT_T2H_MSG_TYPE, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+#include <ol_htt_rx_api.h>
+#include <ol_htt_tx_api.h>
+#include <ol_txrx_htt_api.h> /* htt_tx_status */
+
+#include <htt_internal.h> /* HTT_TX_SCHED, etc. */
+#include <pktlog_ac_fmt.h>
+#include <wdi_event.h>
+#include <ol_htt_tx_api.h>
+#include <ol_txrx_types.h>
+/*--- target->host HTT message dispatch function ----------------------------*/
+
+#ifndef DEBUG_CREDIT
+#define DEBUG_CREDIT 0
+#endif
+
+static uint8_t *htt_t2h_mac_addr_deswizzle(uint8_t *tgt_mac_addr,
+ uint8_t *buffer)
+{
+#ifdef BIG_ENDIAN_HOST
+ /*
+ * The host endianness is opposite of the target endianness.
+ * To make uint32_t elements come out correctly, the target->host
+ * upload has swizzled the bytes in each uint32_t element of the
+ * message.
+ * For byte-array message fields like the MAC address, this
+ * upload swizzling puts the bytes in the wrong order, and needs
+ * to be undone.
+ */
+ buffer[0] = tgt_mac_addr[3];
+ buffer[1] = tgt_mac_addr[2];
+ buffer[2] = tgt_mac_addr[1];
+ buffer[3] = tgt_mac_addr[0];
+ buffer[4] = tgt_mac_addr[7];
+ buffer[5] = tgt_mac_addr[6];
+ return buffer;
+#else
+ /*
+ * The host endianness matches the target endianness -
+ * we can use the mac addr directly from the message buffer.
+ */
+ return tgt_mac_addr;
+#endif
+}
+
+static void htt_rx_frag_set_last_msdu(struct htt_pdev_t *pdev, cdf_nbuf_t msg)
+{
+ uint32_t *msg_word;
+ unsigned num_msdu_bytes;
+ cdf_nbuf_t msdu;
+ struct htt_host_rx_desc_base *rx_desc;
+ int start_idx;
+ uint8_t *p_fw_msdu_rx_desc = 0;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(msg);
+ num_msdu_bytes = HTT_RX_FRAG_IND_FW_RX_DESC_BYTES_GET(
+ *(msg_word + HTT_RX_FRAG_IND_HDR_PREFIX_SIZE32));
+ /*
+ * 1 word for the message header,
+ * 1 word to specify the number of MSDU bytes,
+ * 1 word for every 4 MSDU bytes (round up),
+ * 1 word for the MPDU range header
+ */
+ pdev->rx_mpdu_range_offset_words = 3 + ((num_msdu_bytes + 3) >> 2);
+ pdev->rx_ind_msdu_byte_idx = 0;
+
+ p_fw_msdu_rx_desc = ((uint8_t *) (msg_word) +
+ HTT_ENDIAN_BYTE_IDX_SWAP
+ (HTT_RX_FRAG_IND_FW_DESC_BYTE_OFFSET));
+
+ /*
+ * Fix for EV126710, in which BSOD occurs due to last_msdu bit
+ * not set while the next pointer is deliberately set to NULL
+ * before calling ol_rx_pn_check_base()
+ *
+ * For fragment frames, the HW may not have set the last_msdu bit
+ * in the rx descriptor, but the SW expects this flag to be set,
+ * since each fragment is in a separate MPDU. Thus, set the flag here,
+ * just in case the HW didn't.
+ */
+ start_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = pdev->rx_ring.buf.netbufs_ring[start_idx];
+ cdf_nbuf_set_pktlen(msdu, HTT_RX_BUF_SIZE);
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+ rx_desc = htt_rx_desc(msdu);
+ *((uint8_t *) &rx_desc->fw_desc.u.val) = *p_fw_msdu_rx_desc;
+ rx_desc->msdu_end.last_msdu = 1;
+ cdf_nbuf_map(pdev->osdev, msdu, CDF_DMA_FROM_DEVICE);
+}
+
+/* Target to host Msg/event handler for low priority messages*/
+void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
+{
+ struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+ uint32_t *msg_word;
+ enum htt_t2h_msg_type msg_type;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+ msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+ switch (msg_type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ {
+ cdf_runtime_pm_put();
+ pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
+ pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
+ cdf_print
+ ("target uses HTT version %d.%d; host uses %d.%d\n",
+ pdev->tgt_ver.major, pdev->tgt_ver.minor,
+ HTT_CURRENT_VERSION_MAJOR,
+ HTT_CURRENT_VERSION_MINOR);
+ if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
+ cdf_print
+ ("*** Incompatible host/target HTT versions!\n");
+ /* abort if the target is incompatible with the host */
+ cdf_assert(pdev->tgt_ver.major ==
+ HTT_CURRENT_VERSION_MAJOR);
+ if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
+ cdf_print("*** Warning: host/target HTT versions are ");
+ cdf_print(" different, though compatible!\n");
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FLUSH:
+ {
+ uint16_t peer_id;
+ uint8_t tid;
+ int seq_num_start, seq_num_end;
+ enum htt_rx_flush_action action;
+
+ peer_id = HTT_RX_FLUSH_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_FLUSH_TID_GET(*msg_word);
+ seq_num_start =
+ HTT_RX_FLUSH_SEQ_NUM_START_GET(*(msg_word + 1));
+ seq_num_end =
+ HTT_RX_FLUSH_SEQ_NUM_END_GET(*(msg_word + 1));
+ action =
+ HTT_RX_FLUSH_MPDU_STATUS_GET(*(msg_word + 1)) ==
+ 1 ? htt_rx_flush_release : htt_rx_flush_discard;
+ ol_rx_flush_handler(pdev->txrx_pdev, peer_id, tid,
+ seq_num_start, seq_num_end, action);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND:
+ {
+ int msdu_cnt;
+ msdu_cnt =
+ HTT_RX_OFFLOAD_DELIVER_IND_MSDU_CNT_GET(*msg_word);
+ ol_rx_offload_deliver_ind_handler(pdev->txrx_pdev,
+ htt_t2h_msg,
+ msdu_cnt);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND:
+ {
+ uint16_t peer_id;
+ uint8_t tid;
+
+ peer_id = HTT_RX_FRAG_IND_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_FRAG_IND_EXT_TID_GET(*msg_word);
+ htt_rx_frag_set_last_msdu(pdev, htt_t2h_msg);
+
+ ol_rx_frag_indication_handler(pdev->txrx_pdev,
+ htt_t2h_msg,
+ peer_id, tid);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ {
+ uint16_t peer_id;
+ uint8_t tid;
+ uint8_t win_sz;
+ uint16_t start_seq_num;
+
+ /*
+ * FOR NOW, the host doesn't need to know the initial
+ * sequence number for rx aggregation.
+ * Thus, any value will do - specify 0.
+ */
+ start_seq_num = 0;
+ peer_id = HTT_RX_ADDBA_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_ADDBA_TID_GET(*msg_word);
+ win_sz = HTT_RX_ADDBA_WIN_SIZE_GET(*msg_word);
+ ol_rx_addba_handler(pdev->txrx_pdev, peer_id, tid,
+ win_sz, start_seq_num,
+ 0 /* success */);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
+ {
+ uint16_t peer_id;
+ uint8_t tid;
+
+ peer_id = HTT_RX_DELBA_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_DELBA_TID_GET(*msg_word);
+ ol_rx_delba_handler(pdev->txrx_pdev, peer_id, tid);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ {
+ uint8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
+ uint8_t *peer_mac_addr;
+ uint16_t peer_id;
+ uint8_t vdev_id;
+
+ peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
+ vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
+ peer_mac_addr = htt_t2h_mac_addr_deswizzle(
+ (uint8_t *) (msg_word + 1),
+ &mac_addr_deswizzle_buf[0]);
+
+ ol_rx_peer_map_handler(pdev->txrx_pdev, peer_id,
+ vdev_id, peer_mac_addr,
+ 1 /*can tx */);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ {
+ uint16_t peer_id;
+ peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
+
+ ol_rx_peer_unmap_handler(pdev->txrx_pdev, peer_id);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_SEC_IND:
+ {
+ uint16_t peer_id;
+ enum htt_sec_type sec_type;
+ int is_unicast;
+
+ peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
+ sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
+ is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
+ msg_word++; /* point to the first part of the Michael key */
+ ol_rx_sec_ind_handler(pdev->txrx_pdev, peer_id,
+ sec_type, is_unicast, msg_word,
+ msg_word + 2);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND:
+ {
+ struct htt_mgmt_tx_compl_ind *compl_msg;
+
+ compl_msg =
+ (struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
+
+ if (!ol_tx_get_is_mgmt_over_wmi_enabled()) {
+ ol_tx_single_completion_handler(pdev->txrx_pdev,
+ compl_msg->status,
+ compl_msg->desc_id);
+ cdf_runtime_pm_put();
+ HTT_TX_SCHED(pdev);
+ } else {
+ cdf_print("Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication\n");
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
+ {
+ uint64_t cookie;
+ uint8_t *stats_info_list;
+
+ cookie = *(msg_word + 1);
+ cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
+
+ stats_info_list = (uint8_t *) (msg_word + 3);
+ cdf_runtime_pm_put();
+ ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
+ stats_info_list);
+ break;
+ }
+#ifndef REMOVE_PKT_LOG
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ {
+ uint32_t *pl_hdr;
+ uint32_t log_type;
+ pl_hdr = (msg_word + 1);
+ log_type =
+ (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
+ ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
+ if ((log_type == PKTLOG_TYPE_TX_CTRL)
+ || (log_type == PKTLOG_TYPE_TX_STAT)
+ || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
+ || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
+ || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
+ wdi_event_handler(WDI_EVENT_TX_STATUS,
+ pdev->txrx_pdev, pl_hdr);
+ else if (log_type == PKTLOG_TYPE_RC_FIND)
+ wdi_event_handler(WDI_EVENT_RATE_FIND,
+ pdev->txrx_pdev, pl_hdr);
+ else if (log_type == PKTLOG_TYPE_RC_UPDATE)
+ wdi_event_handler(WDI_EVENT_RATE_UPDATE,
+ pdev->txrx_pdev, pl_hdr);
+ else if (log_type == PKTLOG_TYPE_RX_STAT)
+ wdi_event_handler(WDI_EVENT_RX_DESC,
+ pdev->txrx_pdev, pl_hdr);
+
+ break;
+ }
+#endif
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ {
+ uint32_t htt_credit_delta_abs;
+ int32_t htt_credit_delta;
+ int sign;
+
+ htt_credit_delta_abs =
+ HTT_TX_CREDIT_DELTA_ABS_GET(*msg_word);
+ sign = HTT_TX_CREDIT_SIGN_BIT_GET(*msg_word) ? -1 : 1;
+ htt_credit_delta = sign * htt_credit_delta_abs;
+ ol_tx_credit_completion_handler(pdev->txrx_pdev,
+ htt_credit_delta);
+ break;
+ }
+
+ case HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE:
+ {
+ uint8_t op_code;
+ uint16_t len;
+ uint8_t *op_msg_buffer;
+ uint8_t *msg_start_ptr;
+
+ cdf_runtime_pm_put();
+ msg_start_ptr = (uint8_t *) msg_word;
+ op_code =
+ HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);
+ msg_word++;
+ len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
+
+ op_msg_buffer =
+ cdf_mem_malloc(sizeof
+ (struct htt_wdi_ipa_op_response_t) +
+ len);
+ if (!op_msg_buffer) {
+ cdf_print("OPCODE messsage buffer alloc fail");
+ break;
+ }
+ cdf_mem_copy(op_msg_buffer,
+ msg_start_ptr,
+ sizeof(struct htt_wdi_ipa_op_response_t) +
+ len);
+ ol_txrx_ipa_uc_op_response(pdev->txrx_pdev,
+ op_msg_buffer);
+ break;
+ }
+
+ case HTT_T2H_MSG_TYPE_FLOW_POOL_MAP:
+ {
+ uint8_t num_flows;
+ struct htt_flow_pool_map_payload_t *pool_map_payoad;
+
+ num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
+
+ msg_word++;
+ while (num_flows) {
+ pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
+ msg_word;
+ ol_tx_flow_pool_map_handler(pool_map_payoad->flow_id,
+ pool_map_payoad->flow_type,
+ pool_map_payoad->flow_pool_id,
+ pool_map_payoad->flow_pool_size);
+
+ msg_word += (HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
+ HTT_FLOW_POOL_MAP_HEADER_SZ);
+ num_flows--;
+ }
+ break;
+ }
+
+ case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
+ {
+ struct htt_flow_pool_unmap_t *pool_numap_payload;
+
+ pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
+ ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
+ pool_numap_payload->flow_type,
+ pool_numap_payload->flow_pool_id);
+ break;
+ }
+
+ default:
+ break;
+ };
+ /* Free the indication buffer */
+ cdf_nbuf_free(htt_t2h_msg);
+}
+
+/* Generic Target to host Msg/event handler for low priority messages
+ Low priority message are handler in a different handler called from
+ this function . So that the most likely succes path like Rx and
+ Tx comp has little code foot print
+ */
+void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
+{
+ struct htt_pdev_t *pdev = (struct htt_pdev_t *)context;
+ cdf_nbuf_t htt_t2h_msg = (cdf_nbuf_t) pkt->pPktContext;
+ uint32_t *msg_word;
+ enum htt_t2h_msg_type msg_type;
+
+ /* check for successful message reception */
+ if (pkt->Status != A_OK) {
+ if (pkt->Status != A_ECANCELED)
+ pdev->stats.htc_err_cnt++;
+ cdf_nbuf_free(htt_t2h_msg);
+ return;
+ }
+#ifdef HTT_RX_RESTORE
+ if (cdf_unlikely(pdev->rx_ring.rx_reset)) {
+ cdf_print("rx restore ..\n");
+ cdf_nbuf_free(htt_t2h_msg);
+ return;
+ }
+#endif
+
+ /* confirm alignment */
+ HTT_ASSERT3((((unsigned long)cdf_nbuf_data(htt_t2h_msg)) & 0x3) == 0);
+
+ msg_word = (uint32_t *) cdf_nbuf_data(htt_t2h_msg);
+ msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s %d: msg_word 0x%x msg_type %d\n",
+ __func__, __LINE__, *msg_word, msg_type);
+#endif
+
+ switch (msg_type) {
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ {
+ unsigned num_mpdu_ranges;
+ unsigned num_msdu_bytes;
+ uint16_t peer_id;
+ uint8_t tid;
+
+ if (cdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
+ cdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
+ cdf_print("with full reorder offload\n");
+ break;
+ }
+ peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
+
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ cdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
+ tid);
+ break;
+ }
+ num_msdu_bytes =
+ HTT_RX_IND_FW_RX_DESC_BYTES_GET(
+ *(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
+ /*
+ * 1 word for the message header,
+ * HTT_RX_PPDU_DESC_SIZE32 words for the FW rx PPDU desc
+ * 1 word to specify the number of MSDU bytes,
+ * 1 word for every 4 MSDU bytes (round up),
+ * 1 word for the MPDU range header
+ */
+ pdev->rx_mpdu_range_offset_words =
+ (HTT_RX_IND_HDR_BYTES + num_msdu_bytes + 3) >> 2;
+ num_mpdu_ranges =
+ HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
+ pdev->rx_ind_msdu_byte_idx = 0;
+
+ ol_rx_indication_handler(pdev->txrx_pdev,
+ htt_t2h_msg, peer_id,
+ tid, num_mpdu_ranges);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ {
+ int num_msdus;
+ enum htt_tx_status status;
+
+ /* status - no enum translation needed */
+ status = HTT_TX_COMPL_IND_STATUS_GET(*msg_word);
+ num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
+ if (num_msdus & 0x1) {
+ struct htt_tx_compl_ind_base *compl =
+ (void *)msg_word;
+
+ /*
+ * Host CPU endianness can be different from FW CPU.
+ * This can result in even and odd MSDU IDs being
+ * switched. If this happens, copy the switched final
+ * odd MSDU ID from location payload[size], to
+ * location payload[size-1], where the message
+ * handler function expects to find it
+ */
+ if (compl->payload[num_msdus] !=
+ HTT_TX_COMPL_INV_MSDU_ID) {
+ compl->payload[num_msdus - 1] =
+ compl->payload[num_msdus];
+ }
+ }
+ ol_tx_completion_handler(pdev->txrx_pdev, num_msdus,
+ status, msg_word + 1);
+ HTT_TX_SCHED(pdev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_PN_IND:
+ {
+ uint16_t peer_id;
+ uint8_t tid, pn_ie_cnt, *pn_ie = NULL;
+ int seq_num_start, seq_num_end;
+
+ /*First dword */
+ peer_id = HTT_RX_PN_IND_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_PN_IND_EXT_TID_GET(*msg_word);
+
+ msg_word++;
+ /*Second dword */
+ seq_num_start =
+ HTT_RX_PN_IND_SEQ_NUM_START_GET(*msg_word);
+ seq_num_end = HTT_RX_PN_IND_SEQ_NUM_END_GET(*msg_word);
+ pn_ie_cnt = HTT_RX_PN_IND_PN_IE_CNT_GET(*msg_word);
+
+ msg_word++;
+ /*Third dword */
+ if (pn_ie_cnt)
+ pn_ie = (uint8_t *) msg_word;
+
+ ol_rx_pn_ind_handler(pdev->txrx_pdev, peer_id, tid,
+ seq_num_start, seq_num_end,
+ pn_ie_cnt, pn_ie);
+
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ {
+ int num_msdus;
+
+ num_msdus = HTT_TX_COMPL_IND_NUM_GET(*msg_word);
+ if (num_msdus & 0x1) {
+ struct htt_tx_compl_ind_base *compl =
+ (void *)msg_word;
+
+ /*
+ * Host CPU endianness can be different from FW CPU.
+ * This can result in even and odd MSDU IDs being
+ * switched. If this happens, copy the switched final
+ * odd MSDU ID from location payload[size], to
+ * location payload[size-1], where the message handler
+ * function expects to find it
+ */
+ if (compl->payload[num_msdus] !=
+ HTT_TX_COMPL_INV_MSDU_ID) {
+ compl->payload[num_msdus - 1] =
+ compl->payload[num_msdus];
+ }
+ }
+ ol_tx_inspect_handler(pdev->txrx_pdev, num_msdus,
+ msg_word + 1);
+ HTT_TX_SCHED(pdev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND:
+ {
+ uint16_t peer_id;
+ uint8_t tid;
+ uint8_t offload_ind, frag_ind;
+
+ if (cdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
+ cdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not ");
+ cdf_print("supported when full reorder offload is ");
+ cdf_print("disabled in the configuration.\n");
+ break;
+ }
+
+ peer_id = HTT_RX_IN_ORD_PADDR_IND_PEER_ID_GET(*msg_word);
+ tid = HTT_RX_IN_ORD_PADDR_IND_EXT_TID_GET(*msg_word);
+ offload_ind = HTT_RX_IN_ORD_PADDR_IND_OFFLOAD_GET(*msg_word);
+ frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
+
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s %d: peerid %d tid %d offloadind %d fragind %d\n",
+ __func__, __LINE__, peer_id, tid, offload_ind,
+ frag_ind);
+#endif
+ if (cdf_unlikely(frag_ind)) {
+ ol_rx_frag_indication_handler(pdev->txrx_pdev,
+ htt_t2h_msg,
+ peer_id, tid);
+ break;
+ }
+
+ ol_rx_in_order_indication_handler(pdev->txrx_pdev,
+ htt_t2h_msg, peer_id,
+ tid, offload_ind);
+ break;
+ }
+
+ default:
+ htt_t2h_lp_msg_handler(context, htt_t2h_msg);
+ return;
+
+ };
+
+ /* Free the indication buffer */
+ cdf_nbuf_free(htt_t2h_msg);
+}
+
+/*--- target->host HTT message Info Element access methods ------------------*/
+
+/*--- tx completion message ---*/
+
+uint16_t htt_tx_compl_desc_id(void *iterator, int num)
+{
+ /*
+ * The MSDU IDs are packed , 2 per 32-bit word.
+ * Iterate on them as an array of 16-bit elements.
+ * This will work fine if the host endianness matches
+ * the target endianness.
+ * If the host endianness is opposite of the target's,
+ * this iterator will produce descriptor IDs in a different
+ * order than the target inserted them into the message -
+ * if the target puts in [0, 1, 2, 3, ...] the host will
+ * put out [1, 0, 3, 2, ...].
+ * This is fine, except for the last ID if there are an
+ * odd number of IDs. But the TX_COMPL_IND handling code
+ * in the htt_t2h_msg_handler already added a duplicate
+ * of the final ID, if there were an odd number of IDs,
+ * so this function can safely treat the IDs as an array
+ * of 16-bit elements.
+ */
+ return *(((uint16_t *) iterator) + num);
+}
+
+/*--- rx indication message ---*/
+
+int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ return HTT_RX_IND_FLUSH_VALID_GET(*msg_word);
+}
+
+void
+htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ unsigned *seq_num_start, unsigned *seq_num_end)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word++;
+ *seq_num_start = HTT_RX_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
+ *seq_num_end = HTT_RX_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
+}
+
+int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ return HTT_RX_IND_REL_VALID_GET(*msg_word);
+}
+
+void
+htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ unsigned *seq_num_start, unsigned *seq_num_end)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word++;
+ *seq_num_start = HTT_RX_IND_REL_SEQ_NUM_START_GET(*msg_word);
+ *seq_num_end = HTT_RX_IND_REL_SEQ_NUM_END_GET(*msg_word);
+}
+
+void
+htt_rx_ind_mpdu_range_info(struct htt_pdev_t *pdev,
+ cdf_nbuf_t rx_ind_msg,
+ int mpdu_range_num,
+ enum htt_rx_status *status, int *mpdu_count)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_ind_msg);
+ msg_word += pdev->rx_mpdu_range_offset_words + mpdu_range_num;
+ *status = HTT_RX_IND_MPDU_STATUS_GET(*msg_word);
+ *mpdu_count = HTT_RX_IND_MPDU_COUNT_GET(*msg_word);
+}
+
+/**
+ * htt_rx_ind_rssi_dbm() - Return the RSSI provided in a rx indication message.
+ *
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the RSSI from an rx indication message, in dBm units.
+ *
+ * Return: RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ int8_t rssi;
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg) +
+ HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+ /* check if the RX_IND message contains valid rx PPDU start info */
+ if (!HTT_RX_IND_START_VALID_GET(*msg_word))
+ return HTT_RSSI_INVALID;
+
+ rssi = HTT_RX_IND_RSSI_CMB_GET(*msg_word);
+ return (HTT_TGT_RSSI_INVALID == rssi) ?
+ HTT_RSSI_INVALID : rssi;
+}
+
+/**
+ * htt_rx_ind_rssi_dbm_chain() - Return the RSSI for a chain provided in a rx
+ * indication message.
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ * @chain: the index of the chain (0-4)
+ *
+ * Return the RSSI for a chain from an rx indication message, in dBm units.
+ *
+ * Return: RSSI, or HTT_INVALID_RSSI
+ */
+int16_t
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ int8_t chain)
+{
+ int8_t rssi;
+ uint32_t *msg_word;
+
+ if (chain < 0 || chain > 3)
+ return HTT_RSSI_INVALID;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg) +
+ HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+ /* check if the RX_IND message contains valid rx PPDU start info */
+ if (!HTT_RX_IND_START_VALID_GET(*msg_word))
+ return HTT_RSSI_INVALID;
+
+ msg_word += 1 + chain;
+
+ rssi = HTT_RX_IND_RSSI_PRI20_GET(*msg_word);
+ return (HTT_TGT_RSSI_INVALID == rssi) ?
+ HTT_RSSI_INVALID :
+ rssi;
+}
+
+/**
+ * htt_rx_ind_legacy_rate() - Return the data rate
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ * @legacy_rate: (output) the data rate
+ * The legacy_rate parameter's value depends on the
+ * legacy_rate_sel value.
+ * If legacy_rate_sel is 0:
+ * 0x8: OFDM 48 Mbps
+ * 0x9: OFDM 24 Mbps
+ * 0xA: OFDM 12 Mbps
+ * 0xB: OFDM 6 Mbps
+ * 0xC: OFDM 54 Mbps
+ * 0xD: OFDM 36 Mbps
+ * 0xE: OFDM 18 Mbps
+ * 0xF: OFDM 9 Mbps
+ * If legacy_rate_sel is 1:
+ * 0x8: CCK 11 Mbps long preamble
+ * 0x9: CCK 5.5 Mbps long preamble
+ * 0xA: CCK 2 Mbps long preamble
+ * 0xB: CCK 1 Mbps long preamble
+ * 0xC: CCK 11 Mbps short preamble
+ * 0xD: CCK 5.5 Mbps short preamble
+ * 0xE: CCK 2 Mbps short preamble
+ * -1 on error.
+ * @legacy_rate_sel: (output) 0 to indicate OFDM, 1 to indicate CCK.
+ * -1 on error.
+ *
+ * Return the data rate provided in a rx indication message.
+ */
+void
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ uint8_t *legacy_rate, uint8_t *legacy_rate_sel)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg) +
+ HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+ /* check if the RX_IND message contains valid rx PPDU start info */
+ if (!HTT_RX_IND_START_VALID_GET(*msg_word)) {
+ *legacy_rate = -1;
+ *legacy_rate_sel = -1;
+ return;
+ }
+
+ *legacy_rate = HTT_RX_IND_LEGACY_RATE_GET(*msg_word);
+ *legacy_rate_sel = HTT_RX_IND_LEGACY_RATE_SEL_GET(*msg_word);
+}
+
+/**
+ * htt_rx_ind_timestamp() - Return the timestamp
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ * @timestamp_microsec: (output) the timestamp to microsecond resolution.
+ * -1 on error.
+ * @timestamp_submicrosec: the submicrosecond portion of the
+ * timestamp. -1 on error.
+ *
+ * Return the timestamp provided in a rx indication message.
+ */
+void
+htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ uint32_t *timestamp_microsec,
+ uint8_t *timestamp_submicrosec)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg) +
+ HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+ /* check if the RX_IND message contains valid rx PPDU start info */
+ if (!HTT_RX_IND_END_VALID_GET(*msg_word)) {
+ *timestamp_microsec = -1;
+ *timestamp_submicrosec = -1;
+ return;
+ }
+
+ *timestamp_microsec = *(msg_word + 6);
+ *timestamp_submicrosec =
+ HTT_RX_IND_TIMESTAMP_SUBMICROSEC_GET(*msg_word);
+}
+
+#define INVALID_TSF -1
+/**
+ * htt_rx_ind_tsf32() - Return the TSF timestamp
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the TSF timestamp provided in a rx indication message.
+ *
+ * Return: TSF timestamp
+ */
+uint32_t
+htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg) +
+ HTT_RX_IND_FW_RX_PPDU_DESC_BYTE_OFFSET);
+
+ /* check if the RX_IND message contains valid rx PPDU start info */
+ if (!HTT_RX_IND_END_VALID_GET(*msg_word))
+ return INVALID_TSF;
+
+ return *(msg_word + 5);
+}
+
+/**
+ * htt_rx_ind_ext_tid() - Return the extended traffic ID provided in a rx indication message.
+ * @pdev: the HTT instance the rx data was received on
+ * @rx_ind_msg: the netbuf containing the rx indication message
+ *
+ * Return the extended traffic ID in a rx indication message.
+ *
+ * Return: Extended TID
+ */
+uint8_t
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *)
+ (cdf_nbuf_data(rx_ind_msg));
+
+ return HTT_RX_IND_EXT_TID_GET(*msg_word);
+}
+
+/*--- stats confirmation message ---*/
+
+void
+htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
+ enum htt_dbg_stats_type *type,
+ enum htt_dbg_stats_status *status,
+ int *length, uint8_t **stats_data)
+{
+ uint32_t *msg_word = (uint32_t *) stats_info_list;
+ *type = HTT_T2H_STATS_CONF_TLV_TYPE_GET(*msg_word);
+ *status = HTT_T2H_STATS_CONF_TLV_STATUS_GET(*msg_word);
+ *length = HTT_T2H_STATS_CONF_TLV_HDR_SIZE + /* header length */
+ HTT_T2H_STATS_CONF_TLV_LENGTH_GET(*msg_word); /* data len */
+ *stats_data = stats_info_list + HTT_T2H_STATS_CONF_TLV_HDR_SIZE;
+}
+
+void
+htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_frag_ind_msg,
+ int *seq_num_start, int *seq_num_end)
+{
+ uint32_t *msg_word;
+
+ msg_word = (uint32_t *) cdf_nbuf_data(rx_frag_ind_msg);
+ msg_word++;
+ *seq_num_start = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_START_GET(*msg_word);
+ *seq_num_end = HTT_RX_FRAG_IND_FLUSH_SEQ_NUM_END_GET(*msg_word);
+}
diff --git a/dp/htt/htt_tx.c b/dp/htt/htt_tx.c
new file mode 100644
index 000000000000..1aa28f9ce202
--- /dev/null
+++ b/dp/htt/htt_tx.c
@@ -0,0 +1,1166 @@
+/*
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file htt_tx.c
+ * @brief Implement transmit aspects of HTT.
+ * @details
+ * This file contains three categories of HTT tx code:
+ * 1. An abstraction of the tx descriptor, to hide the
+ * differences between the HL vs. LL tx descriptor.
+ * 2. Functions for allocating and freeing HTT tx descriptors.
+ * 3. The function that accepts a tx frame from txrx and sends the
+ * tx frame to HTC.
+ */
+#include <osdep.h> /* uint32_t, offsetof, etc. */
+#include <cdf_types.h> /* cdf_dma_addr_t */
+#include <cdf_memory.h> /* cdf_os_mem_alloc_consistent et al */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_time.h> /* cdf_mdelay */
+
+#include <htt.h> /* htt_tx_msdu_desc_t */
+#include <htc.h> /* HTC_HDR_LENGTH */
+#include <htc_api.h> /* htc_flush_surprise_remove */
+#include <ol_cfg.h> /* ol_cfg_netbuf_frags_max, etc. */
+#include <ol_htt_tx_api.h> /* HTT_TX_DESC_VADDR_OFFSET */
+#include <ol_txrx_htt_api.h> /* ol_tx_msdu_id_storage */
+#include <ol_txrx_internal.h>
+#include <htt_internal.h>
+
+/* IPA Micro controler TX data packet HTT Header Preset */
+/* 31 | 30 29 | 28 | 27 | 26 22 | 21 16 | 15 13 | 12 8 | 7 0
+ *----------------------------------------------------------------------------
+ * R | CS OL | R | PP | ext TID | vdev ID | pkt type | pkt subtyp | msg type
+ * 0 | 0 | 0 | | 0x1F | 0 | 2 | 0 | 0x01
+ ***----------------------------------------------------------------------------
+ * pkt ID | pkt length
+ ***----------------------------------------------------------------------------
+ * frag_desc_ptr
+ ***----------------------------------------------------------------------------
+ * peer_id
+ ***----------------------------------------------------------------------------
+ */
+#define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
+
+#if HTT_PADDR64
+#define HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(frag_filed_ptr) \
+do { \
+ frag_filed_ptr++; \
+ /* frags_desc_ptr.hi */ \
+ *frag_filed_ptr = 0; \
+} while (0)
+#else
+#define HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(frag_filed_ptr) {}
+#endif
+
+/*--- setup / tear-down functions -------------------------------------------*/
+
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
+#endif
+
+static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
+ char *target_vaddr);
+
+#ifdef HELIUMPLUS
+/**
+ * htt_tx_desc_get_size() - get tx descripotrs size
+ * @pdev: htt device instance pointer
+ *
+ * This function will get HTT TX descriptor size and fragment descriptor size
+ *
+ * Return: None
+ */
+static void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
+{
+ pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
+ if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
+ /*
+ * sizeof MSDU_EXT/Fragmentation descriptor.
+ */
+ pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
+ } else {
+ /*
+ * Add the fragmentation descriptor elements.
+ * Add the most that the OS may deliver, plus one more
+ * in case the txrx code adds a prefix fragment (for
+ * TSO or audio interworking SNAP header)
+ */
+ pdev->frag_descs.size =
+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
+ + 4;
+ }
+}
+
+/**
+ * htt_tx_frag_desc_field_update() - Update fragment descriptor field
+ * @pdev: htt device instance pointer
+ * @fptr: Fragment descriptor field pointer
+ * @index: Descriptor index to find page and offset
+ * @desc_v_ptr: descriptor virtual pointot to find offset
+ *
+ * This function will update fragment descriptor field with actual fragment
+ * descriptor stating physical pointer
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
+ uint32_t *fptr, unsigned int index,
+ struct htt_tx_msdu_desc_t *desc_v_ptr)
+{
+ unsigned int target_page;
+ unsigned int offset;
+ struct cdf_mem_dma_page_t *dma_page;
+
+ target_page = index / pdev->frag_descs.desc_pages.num_element_per_page;
+ offset = index % pdev->frag_descs.desc_pages.num_element_per_page;
+ dma_page = &pdev->frag_descs.desc_pages.dma_pages[target_page];
+ *fptr = (uint32_t)(dma_page->page_p_addr +
+ offset * pdev->frag_descs.size);
+ HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(fptr);
+ return;
+}
+
+/**
+ * htt_tx_frag_desc_attach() - Attach fragment descriptor
+ * @pdev: htt device instance pointer
+ * @desc_pool_elems: Number of fragment descriptor
+ *
+ * This function will allocate fragment descriptor
+ *
+ * Return: 0 success
+ */
+static int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
+ uint16_t desc_pool_elems)
+{
+ pdev->frag_descs.pool_elems = desc_pool_elems;
+ cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->frag_descs.desc_pages,
+ pdev->frag_descs.size, desc_pool_elems,
+ cdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
+ if ((0 == pdev->frag_descs.desc_pages.num_pages) ||
+ (NULL == pdev->frag_descs.desc_pages.dma_pages)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "FRAG descriptor alloc fail");
+ return -ENOBUFS;
+ }
+ return 0;
+}
+
+/**
+ * htt_tx_frag_desc_detach() - Detach fragment descriptor
+ * @pdev: htt device instance pointer
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
+{
+ cdf_mem_multi_pages_free(pdev->osdev, &pdev->frag_descs.desc_pages,
+ cdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
+}
+
+/**
+ * htt_tx_frag_alloc() - Allocate single fragment descriptor from the pool
+ * @pdev: htt device instance pointer
+ * @index: Descriptor index
+ * @frag_paddr_lo: Fragment descriptor physical address
+ * @frag_ptr: Fragment descriptor virtual address
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+int htt_tx_frag_alloc(htt_pdev_handle pdev,
+ u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+{
+ uint16_t frag_page_index;
+ uint16_t frag_elem_index;
+ struct cdf_mem_dma_page_t *dma_page;
+
+ /** Index should never be 0, since its used by the hardware
+ to terminate the link. */
+ if (index >= pdev->tx_descs.pool_elems) {
+ *frag_ptr = NULL;
+ return 1;
+ }
+
+ frag_page_index = index /
+ pdev->frag_descs.desc_pages.num_element_per_page;
+ frag_elem_index = index %
+ pdev->frag_descs.desc_pages.num_element_per_page;
+ dma_page = &pdev->frag_descs.desc_pages.dma_pages[frag_page_index];
+
+ *frag_ptr = dma_page->page_v_addr_start +
+ frag_elem_index * pdev->frag_descs.size;
+ if (((char *)(*frag_ptr) < dma_page->page_v_addr_start) ||
+ ((char *)(*frag_ptr) > dma_page->page_v_addr_end)) {
+ *frag_ptr = NULL;
+ return 1;
+ }
+
+ *frag_paddr_lo = dma_page->page_p_addr +
+ frag_elem_index * pdev->frag_descs.size;
+ return 0;
+}
+#else
+/**
+ * htt_tx_desc_get_size() - get tx descripotrs size
+ * @pdev: htt device instance pointer
+ *
+ * This function will get HTT TX descriptor size and fragment descriptor size
+ *
+ * Return: None
+ */
+static inline void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
+{
+ /*
+ * Start with the size of the base struct
+ * that actually gets downloaded.
+ *
+ * Add the fragmentation descriptor elements.
+ * Add the most that the OS may deliver, plus one more
+ * in case the txrx code adds a prefix fragment (for
+ * TSO or audio interworking SNAP header)
+ */
+ pdev->tx_descs.size =
+ sizeof(struct htt_host_tx_desc_t)
+ + (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
+ /* 2x uint32_t */
+ + 4; /* uint32_t fragmentation list terminator */
+}
+
+/**
+ * htt_tx_frag_desc_field_update() - Update fragment descriptor field
+ * @pdev: htt device instance pointer
+ * @fptr: Fragment descriptor field pointer
+ * @index: Descriptor index to find page and offset
+ * @desc_v_ptr: descriptor virtual pointot to find offset
+ *
+ * This function will update fragment descriptor field with actual fragment
+ * descriptor stating physical pointer
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
+ uint32_t *fptr, unsigned int index,
+ struct htt_tx_msdu_desc_t *desc_v_ptr)
+{
+ *fptr = (uint32_t)htt_tx_get_paddr(pdev, (char *)desc_v_ptr) +
+ HTT_TX_DESC_LEN;
+}
+
+/**
+ * htt_tx_frag_desc_attach() - Attach fragment descriptor
+ * @pdev: htt device instance pointer
+ * @desc_pool_elems: Number of fragment descriptor
+ *
+ * This function will allocate fragment descriptor
+ *
+ * Return: 0 success
+ */
+static inline int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
+ int desc_pool_elems)
+{
+ return 0;
+}
+
+/**
+ * htt_tx_frag_desc_detach() - Detach fragment descriptor
+ * @pdev: htt device instance pointer
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
+#endif /* HELIUMPLUS */
+
+/**
+ * htt_tx_attach() - Attach HTT device instance
+ * @pdev: htt device instance pointer
+ * @desc_pool_elems: Number of TX descriptors
+ *
+ * This function will allocate HTT TX resources
+ *
+ * Return: 0 Success
+ */
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
+{
+ int i, i_int, pool_size;
+ uint32_t **p;
+ struct cdf_mem_dma_page_t *page_info;
+ uint32_t num_link = 0;
+ uint16_t num_page, num_desc_per_page;
+
+ htt_tx_desc_get_size(pdev);
+
+ /*
+ * Make sure tx_descs.size is a multiple of 4-bytes.
+ * It should be, but round up just to be sure.
+ */
+ pdev->tx_descs.size = (pdev->tx_descs.size + 3) & (~0x3);
+
+ pdev->tx_descs.pool_elems = desc_pool_elems;
+ pdev->tx_descs.alloc_cnt = 0;
+ pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
+ cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
+ pdev->tx_descs.size, pdev->tx_descs.pool_elems,
+ cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+ if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
+ (NULL == pdev->tx_descs.desc_pages.dma_pages)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "HTT desc alloc fail");
+ goto out_fail;
+ }
+ num_page = pdev->tx_descs.desc_pages.num_pages;
+ num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
+
+ /* link tx descriptors into a freelist */
+ page_info = pdev->tx_descs.desc_pages.dma_pages;
+ pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
+ p = (uint32_t **) pdev->tx_descs.freelist;
+ for (i = 0; i < num_page; i++) {
+ for (i_int = 0; i_int < num_desc_per_page; i_int++) {
+ if (i_int == (num_desc_per_page - 1)) {
+ /*
+ * Last element on this page,
+ * should pint next page */
+ if (!page_info->page_v_addr_start) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "over flow num link %d\n",
+ num_link);
+ goto free_htt_desc;
+ }
+ page_info++;
+ *p = (uint32_t *)page_info->page_v_addr_start;
+ } else {
+ *p = (uint32_t *)
+ (((char *) p) + pdev->tx_descs.size);
+ }
+ num_link++;
+ p = (uint32_t **) *p;
+ /* Last link established exit */
+ if (num_link == (pdev->tx_descs.pool_elems - 1))
+ break;
+ }
+ }
+ *p = NULL;
+
+ if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "HTT Frag descriptor alloc fail");
+ goto free_htt_desc;
+ }
+
+ /* success */
+ return 0;
+
+free_htt_desc:
+ cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+ cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+out_fail:
+ return -ENOBUFS;
+}
+
+void htt_tx_detach(struct htt_pdev_t *pdev)
+{
+ if (!pdev) {
+ cdf_print("htt tx detach invalid instance");
+ return;
+ }
+
+ htt_tx_frag_desc_detach(pdev);
+ cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+ cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+}
+
+/**
+ * htt_tx_get_paddr() - get physical address for htt desc
+ *
+ * Get HTT descriptor physical address from virtaul address
+ * Find page first and find offset
+ *
+ * Return: Physical address of descriptor
+ */
+static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
+ char *target_vaddr)
+{
+ uint16_t i;
+ struct cdf_mem_dma_page_t *page_info = NULL;
+ uint64_t offset;
+
+ for (i = 0; i < pdev->tx_descs.desc_pages.num_pages; i++) {
+ page_info = pdev->tx_descs.desc_pages.dma_pages + i;
+ if (!page_info->page_v_addr_start) {
+ cdf_assert(0);
+ return 0;
+ }
+ if ((target_vaddr >= page_info->page_v_addr_start) &&
+ (target_vaddr <= page_info->page_v_addr_end))
+ break;
+ }
+
+ if (!page_info) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "invalid page_info");
+ return 0;
+ }
+
+ offset = (uint64_t)(target_vaddr - page_info->page_v_addr_start);
+ return page_info->page_p_addr + offset;
+}
+
+/*--- descriptor allocation functions ---------------------------------------*/
+
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+ uint16_t index)
+{
+ struct htt_host_tx_desc_t *htt_host_tx_desc; /* includes HTC hdr */
+ struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include HTC hdr */
+ uint32_t *fragmentation_descr_field_ptr;
+
+ htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
+ if (!htt_host_tx_desc)
+ return NULL; /* pool is exhausted */
+
+ htt_tx_desc = &htt_host_tx_desc->align32.tx_desc;
+
+ if (pdev->tx_descs.freelist) {
+ pdev->tx_descs.freelist =
+ *((uint32_t **) pdev->tx_descs.freelist);
+ pdev->tx_descs.alloc_cnt++;
+ }
+ /*
+ * For LL, set up the fragmentation descriptor address.
+ * Currently, this HTT tx desc allocation is performed once up front.
+ * If this is changed to have the allocation done during tx, then it
+ * would be helpful to have separate htt_tx_desc_alloc functions for
+ * HL vs. LL, to remove the below conditional branch.
+ */
+ fragmentation_descr_field_ptr = (uint32_t *)
+ ((uint32_t *) htt_tx_desc) +
+ HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+ /*
+ * The fragmentation descriptor is allocated from consistent
+ * memory. Therefore, we can use the address directly rather
+ * than having to map it from a virtual/CPU address to a
+ * physical/bus address.
+ */
+ htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
+ index, htt_tx_desc);
+
+ /*
+ * Include the headroom for the HTC frame header when specifying the
+ * physical address for the HTT tx descriptor.
+ */
+ *paddr_lo = (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_host_tx_desc);
+ /*
+ * The allocated tx descriptor space includes headroom for a
+ * HTC frame header. Hide this headroom, so that we don't have
+ * to jump past the headroom each time we program a field within
+ * the tx desc, but only once when we download the tx desc (and
+ * the headroom) to the target via HTC.
+ * Skip past the headroom and return the address of the HTT tx desc.
+ */
+ return (void *)htt_tx_desc;
+}
+
+void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
+{
+ char *htt_host_tx_desc = tx_desc;
+ /* rewind over the HTC frame header space */
+ htt_host_tx_desc -=
+ offsetof(struct htt_host_tx_desc_t, align32.tx_desc);
+ *((uint32_t **) htt_host_tx_desc) = pdev->tx_descs.freelist;
+ pdev->tx_descs.freelist = (uint32_t *) htt_host_tx_desc;
+ pdev->tx_descs.alloc_cnt--;
+}
+
+/*--- descriptor field access methods ---------------------------------------*/
+
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+ void *htt_tx_desc,
+ uint32_t paddr,
+ uint32_t frag_desc_paddr_lo,
+ int reset)
+{
+ uint32_t *fragmentation_descr_field_ptr;
+
+ fragmentation_descr_field_ptr = (uint32_t *)
+ ((uint32_t *) htt_tx_desc) +
+ HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
+ if (reset) {
+#if defined(HELIUMPLUS_PADDR64)
+ *fragmentation_descr_field_ptr = frag_desc_paddr_lo;
+#else
+ *fragmentation_descr_field_ptr =
+ htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+#endif
+ } else {
+ *fragmentation_descr_field_ptr = paddr;
+ }
+}
+
+/* PUT THESE AS INLINE IN ol_htt_tx_api.h */
+
+void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
+{
+}
+
+void htt_tx_pending_discard(htt_pdev_handle pdev)
+{
+ htc_flush_surprise_remove(pdev->htc_pdev);
+}
+
+void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc)
+{
+}
+
+/*--- tx send function ------------------------------------------------------*/
+
+#ifdef ATH_11AC_TXCOMPACT
+
+/* Scheduling the Queued packets in HTT which could not be sent out
+ because of No CE desc*/
+void htt_tx_sched(htt_pdev_handle pdev)
+{
+ cdf_nbuf_t msdu;
+ int download_len = pdev->download_len;
+ int packet_len;
+
+ HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
+ while (msdu != NULL) {
+ int not_accepted;
+ /* packet length includes HTT tx desc frag added above */
+ packet_len = cdf_nbuf_len(msdu);
+ if (packet_len < download_len) {
+ /*
+ * This case of packet length being less than the
+ * nominal download length can happen for a couple
+ * of reasons:
+ * In HL, the nominal download length is a large
+ * artificial value.
+ * In LL, the frame may not have the optional header
+ * fields accounted for in the nominal download size
+ * (LLC/SNAP header, IPv4 or IPv6 header).
+ */
+ download_len = packet_len;
+ }
+
+ not_accepted =
+ htc_send_data_pkt(pdev->htc_pdev, msdu,
+ pdev->htc_endpoint,
+ download_len);
+ if (not_accepted) {
+ HTT_TX_NBUF_QUEUE_INSERT_HEAD(pdev, msdu);
+ return;
+ }
+ HTT_TX_NBUF_QUEUE_REMOVE(pdev, msdu);
+ }
+}
+
+int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+
+ int download_len = pdev->download_len;
+
+ int packet_len;
+
+ /* packet length includes HTT tx desc frag added above */
+ packet_len = cdf_nbuf_len(msdu);
+ if (packet_len < download_len) {
+ /*
+ * This case of packet length being less than the nominal
+ * download length can happen for a couple of reasons:
+ * In HL, the nominal download length is a large artificial
+ * value.
+ * In LL, the frame may not have the optional header fields
+ * accounted for in the nominal download size (LLC/SNAP header,
+ * IPv4 or IPv6 header).
+ */
+ download_len = packet_len;
+ }
+
+ NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+ DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
+ (uint8_t *)(cdf_nbuf_data(msdu)),
+ sizeof(cdf_nbuf_data(msdu))));
+ if (cdf_nbuf_queue_len(&pdev->txnbufq) > 0) {
+ HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
+ htt_tx_sched(pdev);
+ return 0;
+ }
+
+ cdf_nbuf_trace_update(msdu, "HT:T:");
+ if (htc_send_data_pkt
+ (pdev->htc_pdev, msdu, pdev->htc_endpoint, download_len)) {
+ HTT_TX_NBUF_QUEUE_ADD(pdev, msdu);
+ }
+
+ return 0; /* success */
+
+}
+
+#ifdef FEATURE_RUNTIME_PM
+/**
+ * htt_tx_resume_handler() - resume callback for the htt endpoint
+ * @context: a pointer to the htt context
+ *
+ * runs htt_tx_sched.
+ */
+void htt_tx_resume_handler(void *context)
+{
+ struct htt_pdev_t *pdev = (struct htt_pdev_t *) context;
+
+ htt_tx_sched(pdev);
+}
+#else
+void
+htt_tx_resume_handler(void *context) { }
+#endif
+
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+{
+ cdf_print("*** %s curently only applies for HL systems\n", __func__);
+ cdf_assert(0);
+ return head_msdu;
+
+}
+
+int
+htt_tx_send_nonstd(htt_pdev_handle pdev,
+ cdf_nbuf_t msdu,
+ uint16_t msdu_id, enum htt_pkt_type pkt_type)
+{
+ int download_len;
+
+ /*
+ * The pkt_type could be checked to see what L2 header type is present,
+ * and then the L2 header could be examined to determine its length.
+ * But for simplicity, just use the maximum possible header size,
+ * rather than computing the actual header size.
+ */
+ download_len = sizeof(struct htt_host_tx_desc_t)
+ + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ + HTT_TX_HDR_SIZE_802_1Q
+ + HTT_TX_HDR_SIZE_LLC_SNAP
+ + ol_cfg_tx_download_size(pdev->ctrl_pdev);
+ cdf_assert(download_len <= pdev->download_len);
+ return htt_tx_send_std(pdev, msdu, msdu_id);
+}
+
+#else /*ATH_11AC_TXCOMPACT */
+
+#ifdef QCA_TX_HTT2_SUPPORT
+static inline HTC_ENDPOINT_ID
+htt_tx_htt2_get_ep_id(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+ /*
+ * TX HTT2 service mainly for small sized frame and check if
+ * this candidate frame allow or not.
+ */
+ if ((pdev->htc_tx_htt2_endpoint != ENDPOINT_UNUSED) &&
+ cdf_nbuf_get_tx_parallel_dnload_frm(msdu) &&
+ (cdf_nbuf_len(msdu) < pdev->htc_tx_htt2_max_size))
+ return pdev->htc_tx_htt2_endpoint;
+ else
+ return pdev->htc_endpoint;
+}
+#else
+#define htt_tx_htt2_get_ep_id(pdev, msdu) (pdev->htc_endpoint)
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+static inline int
+htt_tx_send_base(htt_pdev_handle pdev,
+ cdf_nbuf_t msdu,
+ uint16_t msdu_id, int download_len, uint8_t more_data)
+{
+ struct htt_host_tx_desc_t *htt_host_tx_desc;
+ struct htt_htc_pkt *pkt;
+ int packet_len;
+ HTC_ENDPOINT_ID ep_id;
+
+ /*
+ * The HTT tx descriptor was attached as the prefix fragment to the
+ * msdu netbuf during the call to htt_tx_desc_init.
+ * Retrieve it so we can provide its HTC header space to HTC.
+ */
+ htt_host_tx_desc = (struct htt_host_tx_desc_t *)
+ cdf_nbuf_get_frag_vaddr(msdu, 0);
+
+ pkt = htt_htc_pkt_alloc(pdev);
+ if (!pkt)
+ return -ENOBUFS; /* failure */
+
+ pkt->msdu_id = msdu_id;
+ pkt->pdev_ctxt = pdev->txrx_pdev;
+
+ /* packet length includes HTT tx desc frag added above */
+ packet_len = cdf_nbuf_len(msdu);
+ if (packet_len < download_len) {
+ /*
+ * This case of packet length being less than the nominal
+ * download length can happen for a couple reasons:
+ * In HL, the nominal download length is a large artificial
+ * value.
+ * In LL, the frame may not have the optional header fields
+ * accounted for in the nominal download size (LLC/SNAP header,
+ * IPv4 or IPv6 header).
+ */
+ download_len = packet_len;
+ }
+
+ ep_id = htt_tx_htt2_get_ep_id(pdev, msdu);
+
+ SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+ pdev->tx_send_complete_part2,
+ (unsigned char *)htt_host_tx_desc,
+ download_len - HTC_HDR_LENGTH,
+ ep_id,
+ 1); /* tag - not relevant here */
+
+ SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msdu);
+
+ cdf_nbuf_trace_update(msdu, "HT:T:");
+ NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_HTT);
+ DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_HTT_PACKET_PTR_RECORD,
+ (uint8_t *)(cdf_nbuf_data(msdu)),
+ sizeof(cdf_nbuf_data(msdu))));
+ htc_send_data_pkt(pdev->htc_pdev, &pkt->htc_pkt, more_data);
+
+ return 0; /* success */
+}
+
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle pdev, cdf_nbuf_t head_msdu, int num_msdus)
+{
+ cdf_nbuf_t rejected = NULL;
+ uint16_t *msdu_id_storage;
+ uint16_t msdu_id;
+ cdf_nbuf_t msdu;
+ /*
+ * FOR NOW, iterate through the batch, sending the frames singly.
+ * Eventually HTC and HIF should be able to accept a batch of
+ * data frames rather than singles.
+ */
+ msdu = head_msdu;
+ while (num_msdus--) {
+ cdf_nbuf_t next_msdu = cdf_nbuf_next(msdu);
+ msdu_id_storage = ol_tx_msdu_id_storage(msdu);
+ msdu_id = *msdu_id_storage;
+
+ /* htt_tx_send_base returns 0 as success and 1 as failure */
+ if (htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len,
+ num_msdus)) {
+ cdf_nbuf_set_next(msdu, rejected);
+ rejected = msdu;
+ }
+ msdu = next_msdu;
+ }
+ return rejected;
+}
+
+int
+htt_tx_send_nonstd(htt_pdev_handle pdev,
+ cdf_nbuf_t msdu,
+ uint16_t msdu_id, enum htt_pkt_type pkt_type)
+{
+ int download_len;
+
+ /*
+ * The pkt_type could be checked to see what L2 header type is present,
+ * and then the L2 header could be examined to determine its length.
+ * But for simplicity, just use the maximum possible header size,
+ * rather than computing the actual header size.
+ */
+ download_len = sizeof(struct htt_host_tx_desc_t)
+ + HTT_TX_HDR_SIZE_OUTER_HDR_MAX /* worst case */
+ + HTT_TX_HDR_SIZE_802_1Q
+ + HTT_TX_HDR_SIZE_LLC_SNAP
+ + ol_cfg_tx_download_size(pdev->ctrl_pdev);
+ return htt_tx_send_base(pdev, msdu, msdu_id, download_len, 0);
+}
+
+int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+ return htt_tx_send_base(pdev, msdu, msdu_id, pdev->download_len, 0);
+}
+
+#endif /*ATH_11AC_TXCOMPACT */
+#ifdef HTT_DBG
+void htt_tx_desc_display(void *tx_desc)
+{
+ struct htt_tx_msdu_desc_t *htt_tx_desc;
+
+ htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
+
+ /* only works for little-endian */
+ cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
+ cdf_print(" msg type = %d\n", htt_tx_desc->msg_type);
+ cdf_print(" pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
+ cdf_print(" pkt type = %d\n", htt_tx_desc->pkt_type);
+ cdf_print(" vdev ID = %d\n", htt_tx_desc->vdev_id);
+ cdf_print(" ext TID = %d\n", htt_tx_desc->ext_tid);
+ cdf_print(" postponed = %d\n", htt_tx_desc->postponed);
+#if HTT_PADDR64
+ cdf_print(" reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
+ cdf_print(" cksum_offload = %d\n", htt_tx_desc->cksum_offload);
+ cdf_print(" tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
+#else /* !HTT_PADDR64 */
+ cdf_print(" batch more = %d\n", htt_tx_desc->more_in_batch);
+#endif /* HTT_PADDR64 */
+ cdf_print(" length = %d\n", htt_tx_desc->len);
+ cdf_print(" id = %d\n", htt_tx_desc->id);
+#if HTT_PADDR64
+ cdf_print(" frag desc addr.lo = %#x\n",
+ htt_tx_desc->frags_desc_ptr.lo);
+ cdf_print(" frag desc addr.hi = %#x\n",
+ htt_tx_desc->frags_desc_ptr.hi);
+ cdf_print(" peerid = %d\n", htt_tx_desc->peerid);
+ cdf_print(" chanfreq = %d\n", htt_tx_desc->chanfreq);
+#else /* ! HTT_PADDR64 */
+ cdf_print(" frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
+#endif /* HTT_PADDR64 */
+}
+#endif
+
+#ifdef IPA_OFFLOAD
+#ifdef QCA_WIFI_2_0
+/**
+ * htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
+ * @pdev: htt context
+ * @uc_tx_buf_sz: TX buffer size
+ * @uc_tx_buf_cnt: TX Buffer count
+ * @uc_tx_partition_base: IPA UC TX partition base value
+ *
+ * Allocate WDI TX buffers. Also note Rome supports only WDI 1.0.
+ *
+ * Return: 0 success
+ */
+int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base)
+{
+ unsigned int tx_buffer_count;
+ cdf_nbuf_t buffer_vaddr;
+ cdf_dma_addr_t buffer_paddr;
+ uint32_t *header_ptr;
+ uint32_t *ring_vaddr;
+#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 16
+#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 32
+
+ ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
+ /* Allocate TX buffers as many as possible */
+ for (tx_buffer_count = 0;
+ tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
+ buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+ uc_tx_buf_sz, 0, 4, false);
+ if (!buffer_vaddr) {
+ cdf_print("%s: TX BUF alloc fail, loop index: %d",
+ __func__, tx_buffer_count);
+ return tx_buffer_count;
+ }
+
+ /* Init buffer */
+ cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+ header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
+
+ /* HTT control header */
+ *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
+ header_ptr++;
+
+ /* PKT ID */
+ *header_ptr |= ((uint16_t) uc_tx_partition_base +
+ tx_buffer_count) << 16;
+
+ cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
+ buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+ header_ptr++;
+ *header_ptr = (uint32_t) (buffer_paddr +
+ IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
+ header_ptr++;
+ *header_ptr = 0xFFFFFFFF;
+
+ /* FRAG Header */
+ header_ptr++;
+ *header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
+
+ *ring_vaddr = buffer_paddr;
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
+ buffer_vaddr;
+ /* Memory barrier to ensure actual value updated */
+
+ ring_vaddr++;
+ }
+ return tx_buffer_count;
+}
+#else
+int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base)
+{
+ unsigned int tx_buffer_count;
+ cdf_nbuf_t buffer_vaddr;
+ uint32_t buffer_paddr;
+ uint32_t *header_ptr;
+ uint32_t *ring_vaddr;
+#define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
+#define IPA_UC_TX_BUF_FRAG_HDR_OFFSET 64
+#define IPA_UC_TX_BUF_TSO_HDR_SIZE 6
+
+ ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr;
+ /* Allocate TX buffers as many as possible */
+ for (tx_buffer_count = 0;
+ tx_buffer_count < (uc_tx_buf_cnt - 1); tx_buffer_count++) {
+ buffer_vaddr = cdf_nbuf_alloc(pdev->osdev,
+ uc_tx_buf_sz, 0, 4, false);
+ if (!buffer_vaddr) {
+ cdf_print("%s: TX BUF alloc fail, loop index: %d",
+ __func__, tx_buffer_count);
+ return tx_buffer_count;
+ }
+
+ /* Init buffer */
+ cdf_mem_zero(cdf_nbuf_data(buffer_vaddr), uc_tx_buf_sz);
+ header_ptr = (uint32_t *) cdf_nbuf_data(buffer_vaddr);
+
+ /* HTT control header */
+ *header_ptr = HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT;
+ header_ptr++;
+
+ /* PKT ID */
+ *header_ptr |= ((uint16_t) uc_tx_partition_base +
+ tx_buffer_count) << 16;
+
+ cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
+ buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+ header_ptr++;
+
+ /* Frag Desc Pointer */
+ /* 64bits descriptor, Low 32bits */
+ *header_ptr = (uint32_t) (buffer_paddr +
+ IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
+ header_ptr++;
+
+ /* 64bits descriptor, high 32bits */
+ *header_ptr = 0;
+ header_ptr++;
+
+ /* chanreq, peerid */
+ *header_ptr = 0xFFFFFFFF;
+ header_ptr++;
+
+ /* FRAG Header */
+ /* 6 words TSO header */
+ header_ptr += IPA_UC_TX_BUF_TSO_HDR_SIZE;
+ *header_ptr = buffer_paddr + IPA_UC_TX_BUF_FRAG_HDR_OFFSET;
+
+ *ring_vaddr = buffer_paddr;
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[tx_buffer_count] =
+ buffer_vaddr;
+ /* Memory barrier to ensure actual value updated */
+
+ ring_vaddr += 2;
+ }
+ return tx_buffer_count;
+}
+#endif
+
+/**
+ * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
+ * @pdev: htt context
+ * @uc_tx_buf_sz: single tx buffer size
+ * @uc_tx_buf_cnt: total tx buffer count
+ * @uc_tx_partition_base: tx buffer partition start
+ *
+ * Return: 0 success
+ * ENOBUFS No memory fail
+ */
+int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
+ unsigned int uc_tx_buf_sz,
+ unsigned int uc_tx_buf_cnt,
+ unsigned int uc_tx_partition_base)
+{
+ int return_code = 0;
+ unsigned int tx_comp_ring_size;
+
+ /* Allocate CE Write Index WORD */
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ 4,
+ &pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ cdf_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_ce_idx),
+ memctx));
+ if (!pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+ cdf_print("%s: CE Write Index WORD alloc fail", __func__);
+ return -ENOBUFS;
+ }
+
+ /* Allocate TX COMP Ring */
+ tx_comp_ring_size = uc_tx_buf_cnt * sizeof(cdf_nbuf_t);
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr =
+ cdf_os_mem_alloc_consistent(
+ pdev->osdev,
+ tx_comp_ring_size,
+ &pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
+ tx_comp_base),
+ memctx));
+ if (!pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+ cdf_print("%s: TX COMP ring alloc fail", __func__);
+ return_code = -ENOBUFS;
+ goto free_tx_ce_idx;
+ }
+
+ cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr, tx_comp_ring_size);
+
+ /* Allocate TX BUF vAddress Storage */
+ pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg =
+ (cdf_nbuf_t *) cdf_mem_malloc(uc_tx_buf_cnt *
+ sizeof(cdf_nbuf_t));
+ if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg) {
+ cdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
+ return_code = -ENOBUFS;
+ goto free_tx_comp_base;
+ }
+ cdf_mem_zero(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg,
+ uc_tx_buf_cnt * sizeof(cdf_nbuf_t));
+
+ pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt = htt_tx_ipa_uc_wdi_tx_buf_alloc(
+ pdev, uc_tx_buf_sz, uc_tx_buf_cnt, uc_tx_partition_base);
+
+
+ return 0;
+
+free_tx_comp_base:
+ cdf_os_mem_free_consistent(pdev->osdev,
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
+ ctrl_pdev) * 4,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ cdf_get_dma_mem_context((&pdev->
+ ipa_uc_tx_rsc.
+ tx_comp_base),
+ memctx));
+free_tx_ce_idx:
+ cdf_os_mem_free_consistent(pdev->osdev,
+ 4,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ cdf_get_dma_mem_context((&pdev->
+ ipa_uc_tx_rsc.
+ tx_ce_idx),
+ memctx));
+ return return_code;
+}
+
+int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ uint16_t idx;
+
+ if (pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ 4,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_ce_idx.paddr,
+ cdf_get_dma_mem_context(
+ (&pdev->ipa_uc_tx_rsc.tx_ce_idx),
+ memctx));
+ }
+
+ if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
+ cdf_os_mem_free_consistent(
+ pdev->osdev,
+ ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
+ pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
+ cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.
+ tx_comp_base),
+ memctx));
+ }
+
+ /* Free each single buffer */
+ for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
+ if (pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg[idx]) {
+ cdf_nbuf_unmap(pdev->osdev,
+ pdev->ipa_uc_tx_rsc.
+ tx_buf_pool_vaddr_strg[idx],
+ CDF_DMA_FROM_DEVICE);
+ cdf_nbuf_free(pdev->ipa_uc_tx_rsc.
+ tx_buf_pool_vaddr_strg[idx]);
+ }
+ }
+
+ /* Free storage */
+ cdf_mem_free(pdev->ipa_uc_tx_rsc.tx_buf_pool_vaddr_strg);
+
+ return 0;
+}
+#endif /* IPA_OFFLOAD */
+
+#if defined(FEATURE_TSO)
+void
+htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
+ struct cdf_tso_info_t *tso_info)
+{
+ u_int32_t *word;
+ int i;
+ struct cdf_tso_seg_elem_t *tso_seg = tso_info->curr_seg;
+ struct msdu_ext_desc_t *msdu_ext_desc = (struct msdu_ext_desc_t *)desc;
+
+ word = (u_int32_t *)(desc);
+
+ /* Initialize the TSO flags per MSDU */
+ ((struct msdu_ext_desc_t *)msdu_ext_desc)->tso_flags =
+ tso_seg->seg.tso_flags;
+
+ /* First 24 bytes (6*4) contain the TSO flags */
+ word += 6;
+
+ for (i = 0; i < tso_seg->seg.num_frags; i++) {
+ /* [31:0] first 32 bits of the buffer pointer */
+ *word = tso_seg->seg.tso_frags[i].paddr_low_32;
+ word++;
+ /* [15:0] the upper 16 bits of the first buffer pointer */
+ /* [31:16] length of the first buffer */
+ *word = (tso_seg->seg.tso_frags[i].length << 16);
+ word++;
+ }
+
+ if (tso_seg->seg.num_frags < FRAG_NUM_MAX) {
+ *word = 0;
+ word++;
+ *word = 0;
+ }
+}
+#endif /* FEATURE_TSO */
diff --git a/dp/htt/htt_types.h b/dp/htt/htt_types.h
new file mode 100644
index 000000000000..b30f722e7d2d
--- /dev/null
+++ b/dp/htt/htt_types.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _HTT_TYPES__H_
+#define _HTT_TYPES__H_
+
+#include <osdep.h> /* uint16_t, dma_addr_t */
+#include <cdf_types.h> /* cdf_device_t */
+#include <cdf_lock.h> /* cdf_spinlock_t */
+#include <cdf_softirq_timer.h> /* cdf_softirq_timer_t */
+#include <cdf_atomic.h> /* cdf_atomic_inc */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <htc_api.h> /* HTC_PACKET */
+
+#include <ol_ctrl_api.h> /* ol_pdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
+
+#define DEBUG_DMA_DONE
+
+#define HTT_TX_MUTEX_TYPE cdf_spinlock_t
+
+#ifdef QCA_TX_HTT2_SUPPORT
+#ifndef HTC_TX_HTT2_MAX_SIZE
+/* Should sync to the target's implementation. */
+#define HTC_TX_HTT2_MAX_SIZE (120)
+#endif
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+
+struct htt_htc_pkt {
+ void *pdev_ctxt;
+ dma_addr_t nbuf_paddr;
+ HTC_PACKET htc_pkt;
+ uint16_t msdu_id;
+};
+
+struct htt_htc_pkt_union {
+ union {
+ struct htt_htc_pkt pkt;
+ struct htt_htc_pkt_union *next;
+ } u;
+};
+
+/*
+ * HTT host descriptor:
+ * Include the htt_tx_msdu_desc that gets downloaded to the target,
+ * but also include the HTC_FRAME_HDR and alignment padding that
+ * precede the htt_tx_msdu_desc.
+ * htc_send_data_pkt expects this header space at the front of the
+ * initial fragment (i.e. tx descriptor) that is downloaded.
+ */
+struct htt_host_tx_desc_t {
+ uint8_t htc_header[HTC_HEADER_LEN];
+ /* force the tx_desc field to begin on a 4-byte boundary */
+ union {
+ uint32_t dummy_force_align;
+ struct htt_tx_msdu_desc_t tx_desc;
+ } align32;
+};
+
+struct htt_tx_mgmt_desc_buf {
+ cdf_nbuf_t msg_buf;
+ A_BOOL is_inuse;
+ cdf_nbuf_t mgmt_frm;
+};
+
+struct htt_tx_mgmt_desc_ctxt {
+ struct htt_tx_mgmt_desc_buf *pool;
+ A_UINT32 pending_cnt;
+};
+
+struct htt_list_node {
+ struct htt_list_node *prev;
+ struct htt_list_node *next;
+};
+
+struct htt_rx_hash_entry {
+ A_UINT32 paddr;
+ cdf_nbuf_t netbuf;
+ A_UINT8 fromlist;
+ struct htt_list_node listnode;
+#ifdef RX_HASH_DEBUG
+ A_UINT32 cookie;
+#endif
+};
+
+struct htt_rx_hash_bucket {
+ struct htt_list_node listhead;
+ struct htt_rx_hash_entry *entries;
+ struct htt_list_node freepool;
+#ifdef RX_HASH_DEBUG
+ A_UINT32 count;
+#endif
+};
+
+/* IPA micro controller
+ wlan host driver
+ firmware shared memory structure */
+struct uc_shared_mem_t {
+ uint32_t *vaddr;
+ cdf_dma_addr_t paddr;
+ cdf_dma_mem_context(memctx);
+};
+
+/* Micro controller datapath offload
+ * WLAN TX resources */
+struct htt_ipa_uc_tx_resource_t {
+ struct uc_shared_mem_t tx_ce_idx;
+ struct uc_shared_mem_t tx_comp_base;
+
+ uint32_t tx_comp_idx_paddr;
+ cdf_nbuf_t *tx_buf_pool_vaddr_strg;
+ uint32_t alloc_tx_buf_cnt;
+};
+
+/**
+ * struct htt_ipa_uc_rx_resource_t
+ * @rx_rdy_idx_paddr: rx ready index physical address
+ * @rx_ind_ring_base: rx indication ring base memory info
+ * @rx_ipa_prc_done_idx: rx process done index memory info
+ * @rx_ind_ring_size: rx process done ring size
+ * @rx2_rdy_idx_paddr: rx process done index physical address
+ * @rx2_ind_ring_base: rx process done indication ring base memory info
+ * @rx2_ipa_prc_done_idx: rx process done index memory info
+ * @rx2_ind_ring_size: rx process done ring size
+ */
+struct htt_ipa_uc_rx_resource_t {
+ cdf_dma_addr_t rx_rdy_idx_paddr;
+ struct uc_shared_mem_t rx_ind_ring_base;
+ struct uc_shared_mem_t rx_ipa_prc_done_idx;
+ uint32_t rx_ind_ring_size;
+
+ /* 2nd RX ring */
+ cdf_dma_addr_t rx2_rdy_idx_paddr;
+ struct uc_shared_mem_t rx2_ind_ring_base;
+ struct uc_shared_mem_t rx2_ipa_prc_done_idx;
+ uint32_t rx2_ind_ring_size;
+};
+
+/**
+ * struct ipa_uc_rx_ring_elem_t
+ * @rx_packet_paddr: rx packet physical address
+ * @vdev_id: virtual interface id
+ * @rx_packet_leng: packet length
+ */
+struct ipa_uc_rx_ring_elem_t {
+ cdf_dma_addr_t rx_packet_paddr;
+ uint32_t vdev_id;
+ uint32_t rx_packet_leng;
+};
+
+#if defined(HELIUMPLUS_PADDR64)
+struct msdu_ext_desc_t {
+#if defined(FEATURE_TSO)
+ struct cdf_tso_flags_t tso_flags;
+#else
+ u_int32_t tso_flag0;
+ u_int32_t tso_flag1;
+ u_int32_t tso_flag2;
+ u_int32_t tso_flag3;
+ u_int32_t tso_flag4;
+ u_int32_t tso_flag5;
+#endif
+ u_int32_t frag_ptr0;
+ u_int32_t frag_len0;
+ u_int32_t frag_ptr1;
+ u_int32_t frag_len1;
+ u_int32_t frag_ptr2;
+ u_int32_t frag_len2;
+ u_int32_t frag_ptr3;
+ u_int32_t frag_len3;
+ u_int32_t frag_ptr4;
+ u_int32_t frag_len4;
+ u_int32_t frag_ptr5;
+ u_int32_t frag_len5;
+};
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+struct htt_pdev_t {
+ ol_pdev_handle ctrl_pdev;
+ ol_txrx_pdev_handle txrx_pdev;
+ HTC_HANDLE htc_pdev;
+ cdf_device_t osdev;
+
+ HTC_ENDPOINT_ID htc_endpoint;
+
+#ifdef QCA_TX_HTT2_SUPPORT
+ HTC_ENDPOINT_ID htc_tx_htt2_endpoint;
+ uint16_t htc_tx_htt2_max_size;
+#endif /* QCA_TX_HTT2_SUPPORT */
+
+#ifdef ATH_11AC_TXCOMPACT
+ HTT_TX_MUTEX_TYPE txnbufq_mutex;
+ cdf_nbuf_queue_t txnbufq;
+ struct htt_htc_pkt_union *htt_htc_pkt_misclist;
+#endif
+
+ struct htt_htc_pkt_union *htt_htc_pkt_freelist;
+ struct {
+ int is_full_reorder_offload;
+ int default_tx_comp_req;
+ int ce_classify_enabled;
+ } cfg;
+ struct {
+ uint8_t major;
+ uint8_t minor;
+ } tgt_ver;
+#if defined(HELIUMPLUS_PADDR64)
+ struct {
+ u_int8_t major;
+ u_int8_t minor;
+ } wifi_ip_ver;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ struct {
+ struct {
+ /*
+ * Ring of network buffer objects -
+ * This ring is used exclusively by the host SW.
+ * This ring mirrors the dev_addrs_ring that is shared
+ * between the host SW and the MAC HW.
+ * The host SW uses this netbufs ring to locate the nw
+ * buffer objects whose data buffers the HW has filled.
+ */
+ cdf_nbuf_t *netbufs_ring;
+ /*
+ * Ring of buffer addresses -
+ * This ring holds the "physical" device address of the
+ * rx buffers the host SW provides for MAC HW to fill.
+ */
+#if HTT_PADDR64
+ uint64_t *paddrs_ring;
+#else /* ! HTT_PADDR64 */
+ uint32_t *paddrs_ring;
+#endif
+ cdf_dma_mem_context(memctx);
+ } buf;
+ /*
+ * Base address of ring, as a "physical" device address rather
+ * than a CPU address.
+ */
+ uint32_t base_paddr;
+ int size; /* how many elems in the ring (power of 2) */
+ unsigned size_mask; /* size - 1 */
+
+ int fill_level; /* how many rx buffers to keep in the ring */
+ int fill_cnt; /* # of rx buffers (full+empty) in the ring */
+
+ /*
+ * target_idx -
+ * Without reorder offload:
+ * not used
+ * With reorder offload:
+ * points to the location in the rx ring from which rx buffers
+ * are available to copy into the MAC DMA ring
+ */
+ struct {
+ uint32_t *vaddr;
+ uint32_t paddr;
+ cdf_dma_mem_context(memctx);
+ } target_idx;
+
+ /*
+ * alloc_idx/host_idx -
+ * Without reorder offload:
+ * where HTT SW has deposited empty buffers
+ * This is allocated in consistent mem, so that the FW can read
+ * this variable, and program the HW's FW_IDX reg with the value
+ * of this shadow register
+ * With reorder offload:
+ * points to the end of the available free rx buffers
+ */
+ struct {
+ uint32_t *vaddr;
+ uint32_t paddr;
+ cdf_dma_mem_context(memctx);
+ } alloc_idx;
+
+ /* sw_rd_idx -
+ * where HTT SW has processed bufs filled by rx MAC DMA */
+ struct {
+ unsigned msdu_desc;
+ unsigned msdu_payld;
+ } sw_rd_idx;
+
+ /*
+ * refill_retry_timer - timer triggered when the ring is not
+ * refilled to the level expected
+ */
+ cdf_softirq_timer_t refill_retry_timer;
+
+ /*
+ * refill_ref_cnt - ref cnt for Rx buffer replenishment - this
+ * variable is used to guarantee that only one thread tries
+ * to replenish Rx ring.
+ */
+ cdf_atomic_t refill_ref_cnt;
+#ifdef DEBUG_DMA_DONE
+ uint32_t dbg_initial_msdu_payld;
+ uint32_t dbg_mpdu_range;
+ uint32_t dbg_mpdu_count;
+ uint32_t dbg_ring_idx;
+ uint32_t dbg_refill_cnt;
+ uint32_t dbg_sync_success;
+#endif
+#ifdef HTT_RX_RESTORE
+ int rx_reset;
+ uint8_t htt_rx_restore;
+#endif
+ struct htt_rx_hash_bucket *hash_table;
+ uint32_t listnode_offset;
+ } rx_ring;
+ long rx_fw_desc_offset;
+ int rx_mpdu_range_offset_words;
+ int rx_ind_msdu_byte_idx;
+
+ struct {
+ int size; /* of each HTT tx desc */
+ uint16_t pool_elems;
+ uint16_t alloc_cnt;
+ struct cdf_mem_multi_page_t desc_pages;
+ uint32_t *freelist;
+ cdf_dma_mem_context(memctx);
+ } tx_descs;
+#if defined(HELIUMPLUS_PADDR64)
+ struct {
+ int size; /* of each Fragment/MSDU-Ext descriptor */
+ int pool_elems;
+ struct cdf_mem_multi_page_t desc_pages;
+ cdf_dma_mem_context(memctx);
+ } frag_descs;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+ int download_len;
+ void (*tx_send_complete_part2)(void *pdev, A_STATUS status,
+ cdf_nbuf_t msdu, uint16_t msdu_id);
+
+ HTT_TX_MUTEX_TYPE htt_tx_mutex;
+
+ struct {
+ int htc_err_cnt;
+ } stats;
+
+ struct htt_tx_mgmt_desc_ctxt tx_mgmt_desc_ctxt;
+ struct targetdef_s *targetdef;
+ struct ce_reg_def *target_ce_def;
+
+ struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
+ struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
+#ifdef DEBUG_RX_RING_BUFFER
+ struct rx_buf_debug *rx_buff_list;
+ int rx_buff_index;
+#endif
+};
+
+#define HTT_EPID_GET(_htt_pdev_hdl) \
+ (((struct htt_pdev_t *)(_htt_pdev_hdl))->htc_endpoint)
+
+#if defined(HELIUMPLUS_PADDR64)
+#define HTT_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major == (x)) && \
+ ((pdev)->wifi_ip_ver.minor == (y)))
+
+#define HTT_SET_WIFI_IP(pdev, x, y) (((pdev)->wifi_ip_ver.major = (x)) && \
+ ((pdev)->wifi_ip_ver.minor = (y)))
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+#endif /* _HTT_TYPES__H_ */
diff --git a/dp/htt/rx_desc.h b/dp/htt/rx_desc.h
new file mode 100644
index 000000000000..66963d1e2b30
--- /dev/null
+++ b/dp/htt/rx_desc.h
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+/*
+ * REMIND: Copy one of rx_desc related structures here for export,
+ * hopes they are always the same between Peregrine and Rome in future
+ */
+struct rx_attention {
+ volatile
+ uint32_t first_mpdu:1, /* [0] */
+ last_mpdu:1, /* [1] */
+ mcast_bcast:1, /* [2] */
+ peer_idx_invalid:1, /* [3] */
+ peer_idx_timeout:1, /* [4] */
+ power_mgmt:1, /* [5] */
+ non_qos:1, /* [6] */
+ null_data:1, /* [7] */
+ mgmt_type:1, /* [8] */
+ ctrl_type:1, /* [9] */
+ more_data:1, /* [10] */
+ eosp:1, /* [11] */
+ u_apsd_trigger:1, /* [12] */
+ fragment:1, /* [13] */
+ order:1, /* [14] */
+ classification:1, /* [15] */
+ overflow_err:1, /* [16] */
+ msdu_length_err:1, /* [17] */
+ tcp_udp_chksum_fail:1, /* [18] */
+ ip_chksum_fail:1, /* [19] */
+ sa_idx_invalid:1, /* [20] */
+ da_idx_invalid:1, /* [21] */
+ sa_idx_timeout:1, /* [22] */
+ da_idx_timeout:1, /* [23] */
+ encrypt_required:1, /* [24] */
+ directed:1, /* [25] */
+ buffer_fragment:1, /* [26] */
+ mpdu_length_err:1, /* [27] */
+ tkip_mic_err:1, /* [28] */
+ decrypt_err:1, /* [29] */
+ fcs_err:1, /* [30] */
+ msdu_done:1; /* [31] */
+};
+
+struct rx_frag_info {
+ volatile
+ uint32_t ring0_more_count:8, /* [7:0] */
+ ring1_more_count:8, /* [15:8] */
+ ring2_more_count:8, /* [23:16] */
+ ring3_more_count:8; /* [31:24] */
+ volatile
+ uint32_t ring4_more_count:8, /* [7:0] */
+ ring5_more_count:8, /* [15:8] */
+ ring6_more_count:8, /* [23:16] */
+ ring7_more_count:8; /* [31:24] */
+};
+
+struct rx_msdu_start {
+ volatile
+ uint32_t msdu_length:14, /* [13:0] */
+#if defined(HELIUMPLUS)
+ l3_offset:7, /* [20:14] */
+ ipsec_ah:1, /* [21] */
+ reserved_0a:2, /* [23:22] */
+ l4_offset:7, /* [30:24] */
+ ipsec_esp:1; /* [31] */
+#else
+ ip_offset:6, /* [19:14] */
+ ring_mask:4, /* [23:20] */
+ tcp_udp_offset:7, /* [30:24] */
+ reserved_0c:1; /* [31] */
+#endif /* defined(HELIUMPLUS) */
+#if defined(HELIUMPLUS)
+ volatile uint32_t flow_id_toeplitz:32; /* [31:0] */
+#else
+ volatile uint32_t flow_id_crc:32; /* [31:0] */
+#endif /* defined(HELIUMPLUS) */
+ volatile
+ uint32_t msdu_number:8, /* [7:0] */
+ decap_format:2, /* [9:8] */
+ ipv4_proto:1, /* [10] */
+ ipv6_proto:1, /* [11] */
+ tcp_proto:1, /* [12] */
+ udp_proto:1, /* [13] */
+ ip_frag:1, /* [14] */
+ tcp_only_ack:1, /* [15] */
+ sa_idx:11, /* [26:16] */
+ reserved_2b:5; /* [31:27] */
+#if defined(HELIUMPLUS_PADDR64)
+ volatile
+ uint32_t da_idx:11, /* [10:0] */
+ da_is_bcast_mcast:1, /* [11] */
+ reserved_3a:4, /* [15:12] */
+ ip4_protocol_ip6_next_header:8, /* [23:16] */
+ ring_mask:8; /* [31:24] */
+ volatile uint32_t toeplitz_hash_2_or_4:32; /* [31:0] */
+#endif /* defined(HELIUMPLUS_PADDR64) */
+};
+
+struct rx_msdu_end {
+ volatile
+ uint32_t ip_hdr_chksum:16, /* [15:0] */
+ tcp_udp_chksum:16; /* [31:16] */
+ volatile
+ uint32_t key_id_octet:8, /* [7:0] */
+#if defined(HELIUMPLUS)
+ classification_rule:6, /* [13:8] */
+ classify_not_done_truncate:1, /* [14] */
+ classify_not_done_cce_dis:1, /* [15] */
+#else
+ classification_filter:8, /* [15:8] */
+#endif /* defined(HELIUMPLUS) */
+ ext_wapi_pn_63_48:16; /* [31:16] */
+ volatile uint32_t ext_wapi_pn_95_64:32; /* [31:0] */
+ volatile uint32_t ext_wapi_pn_127_96:32; /* [31:0] */
+ volatile
+ uint32_t reported_mpdu_length:14, /* [13:0] */
+ first_msdu:1, /* [14] */
+ last_msdu:1, /* [15] */
+#if defined(HELIUMPLUS)
+ sa_idx_timeout:1, /* [16] */
+ da_idx_timeout:1, /* [17] */
+ msdu_limit_error:1, /* [18] */
+ classify_ring_mask:8, /* [26:19] */
+#endif /* defined(HELIUMPLUS) */
+ reserved_3a:3, /* [29:27] */
+ pre_delim_err:1, /* [30] */
+ reserved_3b:1; /* [31] */
+#if defined(HELIUMPLUS_PADDR64)
+ volatile uint32_t ipv6_options_crc:32;
+ volatile uint32_t tcp_seq_number:32;
+ volatile uint32_t tcp_ack_number:32;
+ volatile
+ uint32_t tcp_flag:9, /* [8:0] */
+ lro_eligible:1, /* [9] */
+ l3_header_padding:3, /* [12:10] */
+ reserved_8a:3, /* [15:13] */
+ window_size:16; /* [31:16] */
+ volatile
+ uint32_t da_offset:6, /* [5:0] */
+ sa_offset:6, /* [11:6] */
+ da_offset_valid:1, /* [12] */
+ sa_offset_valid:1, /* [13] */
+ type_offset:7, /* [20:14] */
+ reserved_9a:11; /* [31:21] */
+ volatile uint32_t rule_indication_31_0:32;
+ volatile uint32_t rule_indication_63_32:32;
+ volatile uint32_t rule_indication_95_64:32;
+ volatile uint32_t rule_indication_127_96:32;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+};
+
+struct rx_mpdu_end {
+ volatile
+ uint32_t reserved_0:13, /* [12:0] */
+ overflow_err:1, /* [13] */
+ last_mpdu:1, /* [14] */
+ post_delim_err:1, /* [15] */
+ post_delim_cnt:12, /* [27:16] */
+ mpdu_length_err:1, /* [28] */
+ tkip_mic_err:1, /* [29] */
+ decrypt_err:1, /* [30] */
+ fcs_err:1; /* [31] */
+};
+
+
+#if defined(HELIUMPLUS)
+
+struct rx_mpdu_start {
+ volatile
+ uint32_t peer_idx:11, /* [10:0] */
+ fr_ds:1, /* [11] */
+ to_ds:1, /* [12] */
+ encrypted:1, /* [13] */
+ retry:1, /* [14] */
+ reserved:1, /* [15] */
+ seq_num:12, /* [27:16] */
+ encrypt_type:4; /* [31:28] */
+ volatile uint32_t pn_31_0:32; /* [31:0] */
+ volatile
+ uint32_t pn_47_32:16, /* [15:0] */
+ toeplitz_hash:2, /* [17:16] */
+ reserved_2:10, /* [27:18] */
+ tid:4; /* [31:28] */
+};
+
+
+struct rx_ppdu_start {
+ volatile
+ uint32_t rssi_pri_chain0:8, /* [7:0] */
+ rssi_sec20_chain0:8, /* [15:8] */
+ rssi_sec40_chain0:8, /* [23:16] */
+ rssi_sec80_chain0:8; /* [31:24] */
+ volatile
+ uint32_t rssi_pri_chain1:8, /* [7:0] */
+ rssi_sec20_chain1:8, /* [15:8] */
+ rssi_sec40_chain1:8, /* [23:16] */
+ rssi_sec80_chain1:8; /* [31:24] */
+ volatile
+ uint32_t rssi_pri_chain2:8, /* [7:0] */
+ rssi_sec20_chain2:8, /* [15:8] */
+ rssi_sec40_chain2:8, /* [23:16] */
+ rssi_sec80_chain2:8; /* [31:24] */
+ volatile
+ uint32_t rssi_pri_chain3:8, /* [7:0] */
+ rssi_sec20_chain3:8, /* [15:8] */
+ rssi_sec40_chain3:8, /* [23:16] */
+ rssi_sec80_chain3:8; /* [31:24] */
+ volatile
+ uint32_t rssi_comb:8, /* [7:0] */
+ bandwidth:3, /* [10:8] */
+ reserved_4a:5, /* [15:11] */
+ rssi_comb_ht:8, /* [23:16] */
+ reserved_4b:8; /* [31:24] */
+ volatile
+ uint32_t l_sig_rate:4, /*[3:0] */
+ l_sig_rate_select:1, /* [4] */
+ l_sig_length:12, /* [16:5] */
+ l_sig_parity:1, /* [17] */
+ l_sig_tail:6, /* [23:18] */
+ preamble_type:8; /* [31:24] */
+ volatile
+ uint32_t ht_sig_vht_sig_ah_sig_a_1:24, /* [23:0] */
+ captured_implicit_sounding:1, /* [24] */
+ reserved_6:7; /* [31:25] */
+ volatile
+ uint32_t ht_sig_vht_sig_ah_sig_a_2:24, /* [23:0] */
+ reserved_7:8; /* [31:24] */
+ volatile uint32_t vht_sig_b:32; /* [31:0] */
+ volatile
+ uint32_t service:16, /* [15:0] */
+ reserved_9:16; /* [31:16] */
+};
+struct rx_location_info {
+ volatile
+ uint32_t rtt_fac_legacy:14, /* [13:0] */
+ rtt_fac_legacy_status:1, /* [14] */
+ rtt_fac_vht:14, /* [28:15] */
+ rtt_fac_vht_status:1, /* [29] */
+ rtt_cfr_status:1, /* [30] */
+ rtt_cir_status:1; /* [31] */
+ volatile
+ uint32_t rtt_fac_sifs:10, /* [9:0] */
+ rtt_fac_sifs_status:2, /* [11:10] */
+ rtt_channel_dump_size:11, /* [22:12] */
+ rtt_mac_phy_phase:2, /* [24:23] */
+ rtt_hw_ifft_mode:1, /* [25] */
+ rtt_btcf_status:1, /* [26] */
+ rtt_preamble_type:2, /* [28:27] */
+ rtt_pkt_bw:2, /* [30:29] */
+ rtt_gi_type:1; /* [31] */
+ volatile
+ uint32_t rtt_mcs_rate:4, /* [3:0] */
+ rtt_strongest_chain:2, /* [5:4] */
+ rtt_phase_jump:7, /* [12:6] */
+ rtt_rx_chain_mask:4, /* [16:13] */
+ rtt_tx_data_start_x_phase:1, /* [17] */
+ reserved_2:13, /* [30:18] */
+ rx_location_info_valid:1; /* [31] */
+};
+
+struct rx_pkt_end {
+ volatile
+ uint32_t rx_success:1, /* [0] */
+ reserved_0a:2, /* [2:1] */
+ error_tx_interrupt_rx:1, /* [3] */
+ error_ofdm_power_drop:1, /* [4] */
+ error_ofdm_restart:1, /* [5] */
+ error_cck_power_drop:1, /* [6] */
+ error_cck_restart:1, /* [7] */
+ reserved_0b:24; /* [31:8] */
+ volatile uint32_t phy_timestamp_1_lower_32:32; /* [31:0] */
+ volatile uint32_t phy_timestamp_1_upper_32:32; /* [31:0] */
+ volatile uint32_t phy_timestamp_2_lower_32:32; /* [31:0] */
+ volatile uint32_t phy_timestamp_2_upper_32:32; /* [31:0] */
+ struct rx_location_info rx_location_info;
+};
+
+struct rx_phy_ppdu_end {
+ volatile
+ uint32_t reserved_0a:2, /* [1:0] */
+ error_radar:1, /* [2] */
+ error_rx_abort:1, /* [3] */
+ error_rx_nap:1, /* [4] */
+ error_ofdm_timing:1, /* [5] */
+ error_ofdm_signal_parity:1, /* [6] */
+ error_ofdm_rate_illegal:1, /* [7] */
+ error_ofdm_length_illegal:1, /* [8] */
+ error_ppdu_ofdm_restart:1, /* [9] */
+ error_ofdm_service:1, /* [10] */
+ error_ppdu_ofdm_power_drop:1, /* [11] */
+ error_cck_blocker:1, /* [12] */
+ error_cck_timing:1, /* [13] */
+ error_cck_header_crc:1, /* [14] */
+ error_cck_rate_illegal:1, /* [15] */
+ error_cck_length_illegal:1, /* [16] */
+ error_ppdu_cck_restart:1, /* [17] */
+ error_cck_service:1, /* [18] */
+ error_ppdu_cck_power_drop:1, /* [19] */
+ error_ht_crc_err:1, /* [20] */
+ error_ht_length_illegal:1, /* [21] */
+ error_ht_rate_illegal:1, /* [22] */
+ error_ht_zlf:1, /* [23] */
+ error_false_radar_ext:1, /* [24] */
+ error_green_field:1, /* [25] */
+ error_spectral_scan:1, /* [26] */
+ error_rx_bw_gt_dyn_bw:1, /* [27] */
+ error_leg_ht_mismatch:1, /* [28] */
+ error_vht_crc_error:1, /* [29] */
+ error_vht_siga_unsupported:1, /* [30] */
+ error_vht_lsig_len_invalid:1; /* [31] */
+ volatile
+ uint32_t error_vht_ndp_or_zlf:1, /* [0] */
+ error_vht_nsym_lt_zero:1, /* [1] */
+ error_vht_rx_extra_symbol_mismatch:1, /* [2] */
+ error_vht_rx_skip_group_id0:1, /* [3] */
+ error_vht_rx_skip_group_id1to62:1, /* [4] */
+ error_vht_rx_skip_group_id63:1, /* [5] */
+ error_ofdm_ldpc_decoder_disabled:1, /* [6] */
+ error_defer_nap:1, /* [7] */
+ error_fdomain_timeout:1, /* [8] */
+ error_lsig_rel_check:1, /* [9] */
+ error_bt_collision:1, /* [10] */
+ error_unsupported_mu_feedback:1, /* [11] */
+ error_ppdu_tx_interrupt_rx:1, /* [12] */
+ error_rx_unsupported_cbf:1, /* [13] */
+ reserved_1:18; /* [31:14] */
+};
+
+struct rx_timing_offset {
+ volatile
+ uint32_t timing_offset:12, /* [11:0] */
+ reserved:20; /* [31:12] */
+};
+
+struct rx_ppdu_end {
+ volatile uint32_t evm_p0:32;
+ volatile uint32_t evm_p1:32;
+ volatile uint32_t evm_p2:32;
+ volatile uint32_t evm_p3:32;
+ volatile uint32_t evm_p4:32;
+ volatile uint32_t evm_p5:32;
+ volatile uint32_t evm_p6:32;
+ volatile uint32_t evm_p7:32;
+ volatile uint32_t evm_p8:32;
+ volatile uint32_t evm_p9:32;
+ volatile uint32_t evm_p10:32;
+ volatile uint32_t evm_p11:32;
+ volatile uint32_t evm_p12:32;
+ volatile uint32_t evm_p13:32;
+ volatile uint32_t evm_p14:32;
+ volatile uint32_t evm_p15:32;
+ volatile uint32_t reserved_16:32;
+ volatile uint32_t reserved_17:32;
+ volatile uint32_t wb_timestamp_lower_32:32;
+ volatile uint32_t wb_timestamp_upper_32:32;
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ struct rx_timing_offset rx_timing_offset;
+ volatile
+ uint32_t rx_antenna:24, /* [23:0] */
+ tx_ht_vht_ack:1, /* [24] */
+ rx_pkt_end_valid:1, /* [25] */
+ rx_phy_ppdu_end_valid:1, /* [26] */
+ rx_timing_offset_valid:1, /* [27] */
+ bb_captured_channel:1, /* [28] */
+ unsupported_mu_nc:1, /* [29] */
+ otp_txbf_disable:1, /* [30] */
+ reserved_31:1; /* [31] */
+ volatile
+ uint32_t coex_bt_tx_from_start_of_rx:1, /* [0] */
+ coex_bt_tx_after_start_of_rx:1, /* [1] */
+ coex_wan_tx_from_start_of_rx:1, /* [2] */
+ coex_wan_tx_after_start_of_rx:1, /* [3] */
+ coex_wlan_tx_from_start_of_rx:1, /* [4] */
+ coex_wlan_tx_after_start_of_rx:1, /* [5] */
+ mpdu_delimiter_errors_seen:1, /* [6] */
+ ftm:1, /* [7] */
+ ftm_dialog_token:8, /* [15:8] */
+ ftm_follow_up_dialog_token:8, /* [23:16] */
+ reserved_32:8; /* [31:24] */
+ volatile
+ uint32_t before_mpdu_cnt_passing_fcs:8, /* [7:0] */
+ before_mpdu_cnt_failing_fcs:8, /* [15:8] */
+ after_mpdu_cnt_passing_fcs:8, /* [23:16] */
+ after_mpdu_cnt_failing_fcs:8; /* [31:24] */
+ volatile uint32_t phy_timestamp_tx_lower_32:32; /* [31:0] */
+ volatile uint32_t phy_timestamp_tx_upper_32:32; /* [31:0] */
+ volatile
+ uint32_t bb_length:16, /* [15:0] */
+ bb_data:1, /* [16] */
+ peer_idx_valid:1, /* [17] */
+ peer_idx:11, /* [28:18] */
+ reserved_26:2, /* [30:29] */
+ ppdu_done:1; /* [31] */
+};
+#else
+struct rx_ppdu_start {
+ volatile
+ uint32_t rssi_chain0_pri20:8, /* [7:0] */
+ rssi_chain0_sec20:8, /* [15:8] */
+ rssi_chain0_sec40:8, /* [23:16] */
+ rssi_chain0_sec80:8; /* [31:24] */
+ volatile
+ uint32_t rssi_chain1_pri20:8, /* [7:0] */
+ rssi_chain1_sec20:8, /* [15:8] */
+ rssi_chain1_sec40:8, /* [23:16] */
+ rssi_chain1_sec80:8; /* [31:24] */
+ volatile
+ uint32_t rssi_chain2_pri20:8, /* [7:0] */
+ rssi_chain2_sec20:8, /* [15:8] */
+ rssi_chain2_sec40:8, /* [23:16] */
+ rssi_chain2_sec80:8; /* [31:24] */
+ volatile
+ uint32_t rssi_chain3_pri20:8, /* [7:0] */
+ rssi_chain3_sec20:8, /* [15:8] */
+ rssi_chain3_sec40:8, /* [23:16] */
+ rssi_chain3_sec80:8; /* [31:24] */
+ volatile
+ uint32_t rssi_comb:8, /* [7:0] */
+ reserved_4a:16, /* [23:8] */
+ is_greenfield:1, /* [24] */
+ reserved_4b:7; /* [31:25] */
+ volatile
+ uint32_t l_sig_rate:4, /* [3:0] */
+ l_sig_rate_select:1, /* [4] */
+ l_sig_length:12, /* [16:5] */
+ l_sig_parity:1, /* [17] */
+ l_sig_tail:6, /* [23:18] */
+ preamble_type:8; /* [31:24] */
+ volatile
+ uint32_t ht_sig_vht_sig_a_1:24, /* [23:0] */
+ reserved_6:8; /* [31:24] */
+ volatile
+ uint32_t ht_sig_vht_sig_a_2:24, /* [23:0] */
+ txbf_h_info:1, /* [24] */
+ reserved_7:7; /* [31:25] */
+ volatile
+ uint32_t vht_sig_b:29, /* [28:0] */
+ reserved_8:3; /* [31:29] */
+ volatile
+ uint32_t service:16, /* [15:0] */
+ reserved_9:16; /* [31:16] */
+};
+
+
+struct rx_mpdu_start {
+ volatile
+ uint32_t peer_idx:11, /* [10:0] */
+ fr_ds:1, /* [11] */
+ to_ds:1, /* [12] */
+ encrypted:1, /* [13] */
+ retry:1, /* [14] */
+ txbf_h_info:1, /* [15] */
+ seq_num:12, /* [27:16] */
+ encrypt_type:4; /* [31:28] */
+ volatile uint32_t pn_31_0:32; /* [31:0] */
+ volatile
+ uint32_t pn_47_32:16, /* [15:0] */
+ directed:1, /* [16] */
+ reserved_2:11, /* [27:17] */
+ tid:4; /* [31:28] */
+};
+
+struct rx_ppdu_end {
+ volatile uint32_t evm_p0:32; /* [31:0] */
+ volatile uint32_t evm_p1:32; /* [31:0] */
+ volatile uint32_t evm_p2:32; /* [31:0] */
+ volatile uint32_t evm_p3:32; /* [31:0] */
+ volatile uint32_t evm_p4:32; /* [31:0] */
+ volatile uint32_t evm_p5:32; /* [31:0] */
+ volatile uint32_t evm_p6:32; /* [31:0] */
+ volatile uint32_t evm_p7:32; /* [31:0] */
+ volatile uint32_t evm_p8:32; /* [31:0] */
+ volatile uint32_t evm_p9:32; /* [31:0] */
+ volatile uint32_t evm_p10:32; /* [31:0] */
+ volatile uint32_t evm_p11:32; /* [31:0] */
+ volatile uint32_t evm_p12:32; /* [31:0] */
+ volatile uint32_t evm_p13:32; /* [31:0] */
+ volatile uint32_t evm_p14:32; /* [31:0] */
+ volatile uint32_t evm_p15:32; /* [31:0] */
+ volatile uint32_t tsf_timestamp:32; /* [31:0] */
+ volatile uint32_t wb_timestamp:32; /* [31:0] */
+ volatile
+ uint32_t locationing_timestamp:8, /* [7:0] */
+ phy_err_code:8, /* [15:8] */
+ phy_err:1, /* [16] */
+ rx_location:1, /* [17] */
+ txbf_h_info:1, /* [18] */
+ reserved_18:13; /* [31:19] */
+ volatile
+ uint32_t rx_antenna:24, /* [23:0] */
+ tx_ht_vht_ack:1, /* [24] */
+ bb_captured_channel:1, /* [25] */
+ reserved_19:6; /* [31:26] */
+ volatile
+ uint32_t rtt_correction_value:24, /* [23:0] */
+ reserved_20:7, /* [30:24] */
+ rtt_normal_mode:1; /* [31] */
+ volatile
+ uint32_t bb_length:16, /* [15:0] */
+ reserved_21:15, /* [30:16] */
+ ppdu_done:1; /* [31] */
+};
+#endif /* defined(HELIUMPLUS) */
+
+#endif /*_RX_DESC_H_*/
diff --git a/dp/ol/inc/ol_cfg.h b/dp/ol/inc/ol_cfg.h
new file mode 100644
index 000000000000..4e59ea397d30
--- /dev/null
+++ b/dp/ol/inc/ol_cfg.h
@@ -0,0 +1,543 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_CFG__H_
+#define _OL_CFG__H_
+
+#include <cdf_types.h> /* uint32_t */
+#include <ol_ctrl_api.h> /* ol_pdev_handle */
+#include <cds_ieee80211_common.h> /* ieee80211_qosframe_htc_addr4 */
+#include <enet.h> /* LLC_SNAP_HDR_LEN */
+#include "wlan_tgt_def_config.h"
+
+/**
+ * @brief format of data frames delivered to/from the WLAN driver by/to the OS
+ */
+enum wlan_frm_fmt {
+ wlan_frm_fmt_unknown,
+ wlan_frm_fmt_raw,
+ wlan_frm_fmt_native_wifi,
+ wlan_frm_fmt_802_3,
+};
+
+struct wlan_ipa_uc_rsc_t {
+ u8 uc_offload_enabled;
+ u32 tx_max_buf_cnt;
+ u32 tx_buf_size;
+ u32 rx_ind_ring_size;
+ u32 tx_partition_base;
+};
+
+/* Config parameters for txrx_pdev */
+struct txrx_pdev_cfg_t {
+ u8 is_high_latency;
+ u8 defrag_timeout_check;
+ u8 rx_pn_check;
+ u8 pn_rx_fwd_check;
+ u8 host_addba;
+ u8 tx_free_at_download;
+ u8 rx_fwd_inter_bss;
+ u32 max_thruput_mbps;
+ u32 target_tx_credit;
+ u32 vow_config;
+ u32 tx_download_size;
+ u32 max_peer_id;
+ u32 max_vdev;
+ u32 max_nbuf_frags;
+ u32 throttle_period_ms;
+ enum wlan_frm_fmt frame_type;
+ u8 rx_fwd_disabled;
+ u8 is_packet_log_enabled;
+ u8 is_full_reorder_offload;
+ struct wlan_ipa_uc_rsc_t ipa_uc_rsc;
+ bool ip_tcp_udp_checksum_offload;
+ bool enable_rxthread;
+ bool ce_classify_enabled;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+ uint32_t tx_flow_stop_queue_th;
+ uint32_t tx_flow_start_queue_offset;
+#endif
+};
+
+/**
+ * @brief Specify whether the system is high-latency or low-latency.
+ * @details
+ * Indicate whether the system is operating in high-latency (message
+ * based, e.g. USB) mode or low-latency (memory-mapped, e.g. PCIe) mode.
+ * Some chips support just one type of host / target interface.
+ * Other chips support both LL and HL interfaces (e.g. PCIe and USB),
+ * so the selection will be made based on which bus HW is present, or
+ * which is preferred if both are present.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> high-latency -OR- 0 -> low-latency
+ */
+int ol_cfg_is_high_latency(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the range of peer IDs.
+ * @details
+ * Specify the maximum peer ID. This is the maximum number of peers,
+ * minus one.
+ * This is used by the host to determine the size of arrays indexed by
+ * peer ID.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum peer ID
+ */
+int ol_cfg_max_peer_id(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the max number of virtual devices within a physical device.
+ * @details
+ * Specify how many virtual devices may exist within a physical device.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum number of virtual devices
+ */
+int ol_cfg_max_vdevs(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether host-side rx PN check is enabled or disabled.
+ * @details
+ * Choose whether to allocate rx PN state information and perform
+ * rx PN checks (if applicable, based on security type) on the host.
+ * If the rx PN check is specified to be done on the host, the host SW
+ * will determine which peers are using a security type (e.g. CCMP) that
+ * requires a PN check.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> host performs rx PN check -OR- 0 -> no host-side rx PN check
+ */
+int ol_cfg_rx_pn_check(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether host-side rx forwarding is enabled or disabled.
+ * @details
+ * Choose whether to check whether to forward rx frames to tx on the host.
+ * For LL systems, this rx -> tx host-side forwarding check is typically
+ * enabled.
+ * For HL systems, the rx -> tx forwarding check is typically done on the
+ * target. However, even in HL systems, the host-side rx -> tx forwarding
+ * will typically be enabled, as a second-tier safety net in case the
+ * target doesn't have enough memory to store all rx -> tx forwarded frames.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> host does rx->tx forward -OR- 0 -> no host-side rx->tx forward
+ */
+int ol_cfg_rx_fwd_check(ol_pdev_handle pdev);
+
+/**
+ * @brief set rx fwd disable/enable.
+ * @details
+ * Choose whether to forward rx frames to tx (where applicable) within the
+ * WLAN driver, or to leave all forwarding up to the operating system.
+ * currently only intra-bss fwd is supported.
+ *
+ * @param pdev - handle to the physical device
+ * @param disable_rx_fwd 1 -> no rx->tx forward -> rx->tx forward
+ */
+void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disalbe_rx_fwd);
+
+/**
+ * @brief Check whether rx forwarding is enabled or disabled.
+ * @details
+ * Choose whether to forward rx frames to tx (where applicable) within the
+ * WLAN driver, or to leave all forwarding up to the operating system.
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 -> no rx->tx forward -OR- 0 -> rx->tx forward (in host or target)
+ */
+int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev);
+
+/**
+ * @brief Check whether to perform inter-BSS or intra-BSS rx->tx forwarding.
+ * @details
+ * Check whether data received by an AP on one virtual device destined
+ * to a STA associated with a different virtual device within the same
+ * physical device should be forwarded within the driver, or whether
+ * forwarding should only be done within a virtual device.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ * 1 -> forward both within and between vdevs
+ * -OR-
+ * 0 -> forward only within a vdev
+ */
+int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify data frame format used by the OS.
+ * @details
+ * Specify what type of frame (802.3 or native WiFi) the host data SW
+ * should expect from and provide to the OS shim.
+ *
+ * @param pdev - handle to the physical device
+ * @return enumerated data frame format
+ */
+enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the peak throughput.
+ * @details
+ * Specify the peak throughput that a system is expected to support.
+ * The data SW uses this configuration to help choose the size for its
+ * tx descriptor pool and rx buffer ring.
+ * The data SW assumes that the peak throughput applies to either rx or tx,
+ * rather than having separate specs of the rx max throughput vs. the tx
+ * max throughput.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum supported throughput in Mbps (not MBps)
+ */
+int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the maximum number of fragments per tx network buffer.
+ * @details
+ * Specify the maximum number of fragments that a tx frame provided to
+ * the WLAN driver by the OS may contain.
+ * In LL systems, the host data SW uses this maximum fragment count to
+ * determine how many elements to allocate in the fragmentation descriptor
+ * it creates to specify to the tx MAC DMA where to locate the tx frame's
+ * data.
+ * This maximum fragments count is only for regular frames, not TSO frames,
+ * since TSO frames are sent in segments with a limited number of fragments
+ * per segment.
+ *
+ * @param pdev - handle to the physical device
+ * @return maximum number of fragments that can occur in a regular tx frame
+ */
+int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev);
+
+/**
+ * @brief For HL systems, specify when to free tx frames.
+ * @details
+ * In LL systems, the host's tx frame is referenced by the MAC DMA, and
+ * thus cannot be freed until the target indicates that it is finished
+ * transmitting the frame.
+ * In HL systems, the entire tx frame is downloaded to the target.
+ * Consequently, the target has its own copy of the tx frame, and the
+ * host can free the tx frame as soon as the download completes.
+ * Alternatively, the HL host can keep the frame allocated until the
+ * target explicitly tells the HL host it is done transmitting the frame.
+ * This gives the target the option of discarding its copy of the tx
+ * frame, and then later getting a new copy from the host.
+ * This function tells the host whether it should retain its copy of the
+ * transmit frames until the target explicitly indicates it is finished
+ * transmitting them, or if it should free its copy as soon as the
+ * tx frame is downloaded to the target.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ * 0 -> retain the tx frame until the target indicates it is done
+ * transmitting the frame
+ * -OR-
+ * 1 -> free the tx frame as soon as the download completes
+ */
+int ol_cfg_tx_free_at_download(ol_pdev_handle pdev);
+
+/**
+ * @brief Low water mark for target tx credit.
+ * Tx completion handler is invoked to reap the buffers when the target tx
+ * credit goes below Low Water Mark.
+ */
+#define OL_CFG_NUM_MSDU_REAP 512
+#define ol_cfg_tx_credit_lwm(pdev) \
+ ((CFG_TGT_NUM_MSDU_DESC > OL_CFG_NUM_MSDU_REAP) ? \
+ (CFG_TGT_NUM_MSDU_DESC - OL_CFG_NUM_MSDU_REAP) : 0)
+
+/**
+ * @brief In a HL system, specify the target initial credit count.
+ * @details
+ * The HL host tx data SW includes a module for determining which tx frames
+ * to download to the target at a given time.
+ * To make this judgement, the HL tx download scheduler has to know
+ * how many buffers the HL target has available to hold tx frames.
+ * Due to the possibility that a single target buffer pool can be shared
+ * between rx and tx frames, the host may not be able to obtain a precise
+ * specification of the tx buffer space available in the target, but it
+ * uses the best estimate, as provided by this configuration function,
+ * to determine how best to schedule the tx frame downloads.
+ *
+ * @param pdev - handle to the physical device
+ * @return the number of tx buffers available in a HL target
+ */
+uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev);
+
+/**
+ * @brief Specify the LL tx MSDU header download size.
+ * @details
+ * In LL systems, determine how many bytes from a tx frame to download,
+ * in order to provide the target FW's Descriptor Engine with enough of
+ * the packet's payload to interpret what kind of traffic this is,
+ * and who it is for.
+ * This download size specification does not include the 802.3 / 802.11
+ * frame encapsulation headers; it starts with the encapsulated IP packet
+ * (or whatever ethertype is carried within the ethernet-ish frame).
+ * The LL host data SW will determine how many bytes of the MSDU header to
+ * download by adding this download size specification to the size of the
+ * frame header format specified by the ol_cfg_frame_type configuration
+ * function.
+ *
+ * @param pdev - handle to the physical device
+ * @return the number of bytes beyond the 802.3 or native WiFi header to
+ * download to the target for tx classification
+ */
+int ol_cfg_tx_download_size(ol_pdev_handle pdev);
+
+/**
+ * brief Specify where defrag timeout and duplicate detection is handled
+ * @details
+ * non-aggregate duplicate detection and timing out stale fragments
+ * requires additional target memory. To reach max client
+ * configurations (128+), non-aggregate duplicate detection and the
+ * logic to time out stale fragments is moved to the host.
+ *
+ * @param pdev - handle to the physical device
+ * @return
+ * 0 -> target is responsible non-aggregate duplicate detection and
+ * timing out stale fragments.
+ *
+ * 1 -> host is responsible non-aggregate duplicate detection and
+ * timing out stale fragments.
+ */
+int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev);
+
+/**
+ * brief Query for the period in ms used for throttling for
+ * thermal mitigation
+ * @details
+ * In LL systems, transmit data throttling is used for thermal
+ * mitigation where data is paused and resumed during the
+ * throttle period i.e. the throttle period consists of an
+ * "on" phase when transmit is allowed and an "off" phase when
+ * transmit is suspended. This function returns the total
+ * period used for throttling.
+ *
+ * @param pdev - handle to the physical device
+ * @return the total throttle period in ms
+ */
+int ol_cfg_throttle_period_ms(ol_pdev_handle pdev);
+
+/**
+ * brief Check whether full reorder offload is
+ * enabled/disable by the host
+ * @details
+ * If the host does not support receive reorder (i.e. the
+ * target performs full receive re-ordering) this will return
+ * "enabled"
+ *
+ * @param pdev - handle to the physical device
+ * @return 1 - enable, 0 - disable
+ */
+int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev);
+
+int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev);
+
+/**
+ * ol_cfg_is_ip_tcp_udp_checksum_offload_enabled() - return
+ * ip_tcp_udp_checksum_offload is enable/disable
+ * @pdev : handle to the physical device
+ *
+ * Return: 1 - enable, 0 - disable
+ */
+static inline
+int ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ip_tcp_udp_checksum_offload;
+}
+
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev);
+
+int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev);
+#endif
+
+bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev);
+
+enum wlan_target_fmt_translation_caps {
+ wlan_frm_tran_cap_raw = 0x01,
+ wlan_frm_tran_cap_native_wifi = 0x02,
+ wlan_frm_tran_cap_8023 = 0x04,
+};
+
+/**
+ * @brief Specify the maximum header size added by SW tx encapsulation
+ * @details
+ * This function returns the maximum size of the new L2 header, not the
+ * difference between the new and old L2 headers.
+ * Thus, this function returns the maximum 802.11 header size that the
+ * tx SW may need to add to tx data frames.
+ *
+ * @param pdev - handle to the physical device
+ */
+static inline int ol_cfg_sw_encap_hdr_max_size(ol_pdev_handle pdev)
+{
+ /*
+ * 24 byte basic 802.11 header
+ * + 6 byte 4th addr
+ * + 2 byte QoS control
+ * + 4 byte HT control
+ * + 8 byte LLC/SNAP
+ */
+ return sizeof(struct ieee80211_qosframe_htc_addr4) + LLC_SNAP_HDR_LEN;
+}
+
+static inline uint8_t ol_cfg_tx_encap(ol_pdev_handle pdev)
+{
+ /* tx encap done in HW */
+ return 0;
+}
+
+static inline int ol_cfg_host_addba(ol_pdev_handle pdev)
+{
+ /*
+ * ADDBA negotiation is handled by the target FW for Peregrine + Rome.
+ */
+ return 0;
+}
+
+/**
+ * @brief If the host SW's ADDBA negotiation fails, should it be retried?
+ *
+ * @param pdev - handle to the physical device
+ */
+static inline int ol_cfg_addba_retry(ol_pdev_handle pdev)
+{
+ return 0; /* disabled for now */
+}
+
+/**
+ * @brief How many frames to hold in a paused vdev's tx queue in LL systems
+ */
+static inline int ol_tx_cfg_max_tx_queue_depth_ll(ol_pdev_handle pdev)
+{
+ /*
+ * Store up to 1500 frames for a paused vdev.
+ * For example, if the vdev is sending 300 Mbps of traffic, and the
+ * PHY is capable of 600 Mbps, then it will take 56 ms for the PHY to
+ * drain both the 700 frames that are queued initially, plus the next
+ * 700 frames that come in while the PHY is catching up.
+ * So in this example scenario, the PHY will remain fully utilized
+ * in a MCC system that has a channel-switching period of 56 ms or less.
+ * 700 frames calculation was correct when FW drain packet without
+ * any overhead. Actual situation drain overhead will slowdown drain
+ * speed. And channel period is less than 56 msec
+ * Worst scenario, 1500 frames should be stored in host.
+ */
+ return 1500;
+}
+
+/**
+ * @brief Set packet log config in HTT config based on CFG ini configuration
+ */
+void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val);
+
+/**
+ * @brief Get packet log config from HTT config
+ */
+uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev);
+
+#ifdef IPA_OFFLOAD
+/**
+ * @brief IPA micro controller data path offload enable or not
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * feature enabled or not
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * TX buffer size which should be pre-allocated by driver.
+ * Default buffer size is 2K
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * TX buffer count which should be pre-allocated by driver.
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @detail
+ * This function returns IPA micro controller data path offload
+ * RX indication ring size which will notified by WLAN FW to IPA
+ * micro controller
+ *
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev);
+/**
+ * @brief IPA micro controller data path TX buffer size
+ * @param pdev - handle to the physical device
+ */
+unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev);
+#else
+static inline unsigned int ol_cfg_ipa_uc_offload_enabled(
+ ol_pdev_handle pdev)
+{
+ return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_buf_size(
+ ol_pdev_handle pdev)
+{
+ return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(
+ ol_pdev_handle pdev)
+{
+ return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(
+ ol_pdev_handle pdev)
+{
+ return 0;
+}
+
+static inline unsigned int ol_cfg_ipa_uc_tx_partition_base(
+ ol_pdev_handle pdev)
+{
+ return 0;
+}
+#endif /* IPA_OFFLOAD */
+#endif /* _OL_CFG__H_ */
diff --git a/dp/ol/inc/ol_ctrl_addba_api.h b/dp/ol/inc/ol_ctrl_addba_api.h
new file mode 100644
index 000000000000..31854e61086d
--- /dev/null
+++ b/dp/ol/inc/ol_ctrl_addba_api.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_CTRL_ADDBA_API_H_
+#define _OL_CTRL_ADDBA_API_H_
+#define ol_ctrl_addba_attach(a, b, c, d, e) 0
+#define ol_ctrl_addba_detach(a) 0
+#define ol_ctrl_addba_init(a, b, c, d, e) 0
+#define ol_ctrl_addba_cleanup(a) 0
+#define ol_ctrl_addba_request_setup(a, b, c, d, e, f) 0
+#define ol_ctrl_addba_response_setup(a, b, c, d, e, f) 0
+#define ol_ctrl_addba_request_process(a, b, c, d, e) 0
+#define ol_ctrl_addba_response_process(a, b, c, d) 0
+#define ol_ctrl_addba_clear(a) 0
+#define ol_ctrl_delba_process(a, b, c) 0
+#define ol_ctrl_addba_get_status(a, b) 0
+#define ol_ctrl_addba_set_response(a, b, c) 0
+#define ol_ctrl_addba_clear_response(a) 0
+#endif
diff --git a/dp/ol/inc/ol_ctrl_api.h b/dp/ol/inc/ol_ctrl_api.h
new file mode 100644
index 000000000000..c3e6eb347480
--- /dev/null
+++ b/dp/ol/inc/ol_ctrl_api.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_ctrl_api.h
+ * @brief Definitions used in multiple external interfaces to the control SW.
+ */
+#ifndef _OL_CTRL_API__H_
+#define _OL_CTRL_API__H_
+
+struct ol_pdev_t;
+typedef struct ol_pdev_t *ol_pdev_handle;
+
+struct ol_vdev_t;
+typedef struct ol_vdev_t *ol_vdev_handle;
+
+struct ol_peer_t;
+typedef struct ol_peer_t *ol_peer_handle;
+
+#endif /* _OL_CTRL_API__H_ */
diff --git a/dp/ol/inc/ol_defines.h b/dp/ol/inc/ol_defines.h
new file mode 100644
index 000000000000..8b83adb85f91
--- /dev/null
+++ b/dp/ol/inc/ol_defines.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Offload specific Opaque Data types.
+ */
+#ifndef _DEV_OL_DEFINES_H
+#define _DEV_OL_DEFINES_H
+
+/**
+ * @brief Opaque handle of wmi structure
+ */
+struct wmi_unified;
+typedef struct wmi_unified *wmi_unified_t;
+
+typedef void *ol_scn_t;
+/**
+ * @wmi_event_handler function prototype
+ */
+typedef int (*wmi_unified_event_handler)(ol_scn_t scn_handle,
+ uint8_t *event_buf, uint32_t len);
+
+#endif /* _DEV_OL_DEFINES_H */
diff --git a/dp/ol/inc/ol_htt_api.h b/dp/ol/inc/ol_htt_api.h
new file mode 100644
index 000000000000..d8d2272f4643
--- /dev/null
+++ b/dp/ol/inc/ol_htt_api.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_api.h
+ * @brief Specify the general HTT API functions called by the host data SW.
+ * @details
+ * This file declares the HTT API functions that are not specific to
+ * either tx nor rx.
+ */
+#ifndef _OL_HTT_API__H_
+#define _OL_HTT_API__H_
+
+#include <cdf_types.h> /* cdf_device_t */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <athdefs.h> /* A_STATUS */
+#include <htc_api.h> /* HTC_HANDLE */
+#include <ol_ctrl_api.h> /* ol_pdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
+#include "htt.h" /* htt_dbg_stats_type, etc. */
+
+/* TID */
+#define OL_HTT_TID_NON_QOS_UNICAST 16
+#define OL_HTT_TID_NON_QOS_MCAST_BCAST 18
+
+struct htt_pdev_t;
+typedef struct htt_pdev_t *htt_pdev_handle;
+
+htt_pdev_handle
+htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
+ ol_pdev_handle ctrl_pdev,
+ HTC_HANDLE htc_pdev, cdf_device_t osdev);
+
+/**
+ * @brief Allocate and initialize a HTT instance.
+ * @details
+ * This function allocates and initializes an HTT instance.
+ * This involves allocating a pool of HTT tx descriptors in
+ * consistent memory, allocating and filling a rx ring (LL only),
+ * and connecting the HTC's HTT_DATA_MSG service.
+ * The HTC service connect call will block, so this function
+ * needs to be called in passive context.
+ * Because HTC setup has not been completed at the time this function
+ * is called, this function cannot send any HTC messages to the target.
+ * Messages to configure the target are instead sent in the
+ * htc_attach_target function.
+ *
+ * @param pdev - data SW's physical device handle
+ * (used as context pointer during HTT -> txrx calls)
+ * @param desc_pool_size - number of HTT descriptors to (pre)allocate
+ * @return success -> HTT pdev handle; failure -> NULL
+ */
+int
+htt_attach(struct htt_pdev_t *pdev, int desc_pool_size);
+
+/**
+ * @brief Send HTT configuration messages to the target.
+ * @details
+ * For LL only, this function sends a rx ring configuration message to the
+ * target. For HL, this function is a no-op.
+ *
+ * @param htt_pdev - handle to the HTT instance being initialized
+ */
+A_STATUS htt_attach_target(htt_pdev_handle htt_pdev);
+
+/**
+ * enum htt_op_mode - Virtual device operation mode
+ *
+ * @htt_op_mode_unknown: Unknown mode
+ * @htt_op_mode_ap: AP mode
+ * @htt_op_mode_ibss: IBSS mode
+ * @htt_op_mode_sta: STA (client) mode
+ * @htt_op_mode_monitor: Monitor mode
+ * @htt_op_mode_ocb: OCB mode
+ */
+enum htt_op_mode {
+ htt_op_mode_unknown,
+ htt_op_mode_ap,
+ htt_op_mode_ibss,
+ htt_op_mode_sta,
+ htt_op_mode_monitor,
+ htt_op_mode_ocb,
+};
+
+/* no-ops */
+#define htt_vdev_attach(htt_pdev, vdev_id, op_mode)
+#define htt_vdev_detach(htt_pdev, vdev_id)
+#define htt_peer_qos_update(htt_pdev, peer_id, qos_capable)
+#define htt_peer_uapsdmask_update(htt_pdev, peer_id, uapsd_mask)
+
+void htt_pdev_free(htt_pdev_handle pdev);
+
+/**
+ * @brief Deallocate a HTT instance.
+ *
+ * @param htt_pdev - handle to the HTT instance being torn down
+ */
+void htt_detach(htt_pdev_handle htt_pdev);
+
+/**
+ * @brief Stop the communication between HTT and target
+ * @details
+ * For ISOC solution, this function stop the communication between HTT and
+ * target.
+ * For Peregrine/Rome, it's already stopped by ol_ath_disconnect_htc
+ * before ol_txrx_pdev_detach called in ol_ath_detach. So this function is
+ * a no-op.
+ * Peregrine/Rome HTT layer is on top of HTC while ISOC solution HTT layer is
+ * on top of DXE layer.
+ *
+ * @param htt_pdev - handle to the HTT instance being initialized
+ */
+void htt_detach_target(htt_pdev_handle htt_pdev);
+
+/*
+ * @brief Tell the target side of HTT to suspend H2T processing until synced
+ * @param htt_pdev - the host HTT object
+ * @param sync_cnt - what sync count value the target HTT FW should wait for
+ * before resuming H2T processing
+ */
+A_STATUS htt_h2t_sync_msg(htt_pdev_handle htt_pdev, uint8_t sync_cnt);
+
+int
+htt_h2t_aggr_cfg_msg(htt_pdev_handle htt_pdev,
+ int max_subfrms_ampdu, int max_subfrms_amsdu);
+
+/**
+ * @brief Get the FW status
+ * @details
+ * Trigger FW HTT to retrieve FW status.
+ * A separate HTT message will come back with the statistics we want.
+ *
+ * @param pdev - handle to the HTT instance
+ * @param stats_type_upload_mask - bitmask identifying which stats to upload
+ * @param stats_type_reset_mask - bitmask identifying which stats to reset
+ * @param cookie - unique value to distinguish and identify stats requests
+ * @return 0 - succeed to send the request to FW; otherwise, failed to do so.
+ */
+int
+htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
+ uint32_t stats_type_upload_mask,
+ uint32_t stats_type_reset_mask,
+ uint8_t cfg_stats_type,
+ uint32_t cfg_val, uint64_t cookie);
+
+/**
+ * @brief Get the fields from HTT T2H stats upload message's stats info header
+ * @details
+ * Parse the a HTT T2H message's stats info tag-length-value header,
+ * to obtain the stats type, status, data lenght, and data address.
+ *
+ * @param stats_info_list - address of stats record's header
+ * @param[out] type - which type of FW stats are contained in the record
+ * @param[out] status - whether the stats are (fully) present in the record
+ * @param[out] length - how large the data portion of the stats record is
+ * @param[out] stats_data - where the data portion of the stats record is
+ */
+void
+htt_t2h_dbg_stats_hdr_parse(uint8_t *stats_info_list,
+ enum htt_dbg_stats_type *type,
+ enum htt_dbg_stats_status *status,
+ int *length, uint8_t **stats_data);
+
+/**
+ * @brief Display a stats record from the HTT T2H STATS_CONF message.
+ * @details
+ * Parse the stats type and status, and invoke a type-specified printout
+ * to display the stats values.
+ *
+ * @param stats_data - buffer holding the stats record from the STATS_CONF msg
+ * @param concise - whether to do a verbose or concise printout
+ */
+void htt_t2h_stats_print(uint8_t *stats_data, int concise);
+
+#ifndef HTT_DEBUG_LEVEL
+#if defined(DEBUG)
+#define HTT_DEBUG_LEVEL 10
+#else
+#define HTT_DEBUG_LEVEL 0
+#endif
+#endif
+
+#if HTT_DEBUG_LEVEL > 5
+void htt_display(htt_pdev_handle pdev, int indent);
+#else
+#define htt_display(pdev, indent)
+#endif
+
+#define HTT_DXE_RX_LOG 0
+#define htt_rx_reorder_log_print(pdev)
+
+#ifdef IPA_OFFLOAD
+int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev);
+
+int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx_paddr,
+ void **rx2_proc_done_idx_vaddr);
+
+int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+ cdf_dma_addr_t ipa_uc_tx_doorbell_paddr,
+ cdf_dma_addr_t ipa_uc_rx_doorbell_paddr);
+
+int
+htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active, bool is_tx);
+
+int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev);
+
+int htt_ipa_uc_attach(struct htt_pdev_t *pdev);
+
+void htt_ipa_uc_detach(struct htt_pdev_t *pdev);
+#else
+/**
+ * htt_h2t_ipa_uc_rsc_cfg_msg() - Send WDI IPA config message to firmware
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ */
+static inline int htt_h2t_ipa_uc_rsc_cfg_msg(struct htt_pdev_t *pdev)
+{
+ return 0;
+}
+
+/**
+ * htt_ipa_uc_get_resource() - Get uc resource from htt and lower layer
+ * @pdev: handle to the HTT instance
+ * @ce_sr_base_paddr: copy engine source ring base physical address
+ * @ce_sr_ring_size: copy engine source ring size
+ * @ce_reg_paddr: copy engine register physical address
+ * @tx_comp_ring_base_paddr: tx comp ring base physical address
+ * @tx_comp_ring_size: tx comp ring size
+ * @tx_num_alloc_buffer: number of allocated tx buffer
+ * @rx_rdy_ring_base_paddr: rx ready ring base physical address
+ * @rx_rdy_ring_size: rx ready ring size
+ * @rx_proc_done_idx_paddr: rx process done index physical address
+ * @rx_proc_done_idx_vaddr: rx process done index virtual address
+ * @rx2_rdy_ring_base_paddr: rx done ring base physical address
+ * @rx2_rdy_ring_size: rx done ring size
+ * @rx2_proc_done_idx_paddr: rx done index physical address
+ * @rx2_proc_done_idx_vaddr: rx done index virtual address
+ *
+ * Return: 0 success
+ */
+static inline int
+htt_ipa_uc_get_resource(htt_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx_paddr,
+ void **rx2_proc_done_idx_vaddr)
+{
+ return 0;
+}
+
+/**
+ * htt_ipa_uc_set_doorbell_paddr() - Propagate IPA doorbell address
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: TX doorbell base physical address
+ * @ipa_uc_rx_doorbell_paddr: RX doorbell base physical address
+ *
+ * Return: 0 success
+ */
+static inline int
+htt_ipa_uc_set_doorbell_paddr(htt_pdev_handle pdev,
+ uint32_t ipa_uc_tx_doorbell_paddr,
+ uint32_t ipa_uc_rx_doorbell_paddr)
+{
+ return 0;
+}
+
+/**
+ * htt_h2t_ipa_uc_set_active() - Propagate WDI path enable/disable to firmware
+ * @pdev: handle to the HTT instance
+ * @uc_active: WDI UC path enable or not
+ * @is_tx: TX path or RX path
+ *
+ * Return: 0 success
+ */
+static inline int
+htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev, bool uc_active,
+ bool is_tx)
+{
+ return 0;
+}
+
+/**
+ * htt_h2t_ipa_uc_get_stats() - WDI UC state query request to firmware
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ */
+static inline int htt_h2t_ipa_uc_get_stats(struct htt_pdev_t *pdev)
+{
+ return 0;
+}
+
+/**
+ * htt_ipa_uc_attach() - Allocate UC data path resources
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ */
+static inline int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
+{
+ return 0;
+}
+
+/**
+ * htt_ipa_uc_attach() - Remove UC data path resources
+ * @pdev: handle to the HTT instance
+ *
+ * Return: 0 success
+ */
+static inline void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
+{
+ return;
+}
+#endif /* IPA_OFFLOAD */
+
+#endif /* _OL_HTT_API__H_ */
diff --git a/dp/ol/inc/ol_htt_rx_api.h b/dp/ol/inc/ol_htt_rx_api.h
new file mode 100644
index 000000000000..d94c707da432
--- /dev/null
+++ b/dp/ol/inc/ol_htt_rx_api.h
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_rx_api.h
+ * @brief Specify the rx HTT API functions called by the host data SW.
+ * @details
+ * This file declares the HTT API functions that are specifically
+ * related to receive processing.
+ * In particular, this file specifies methods of the abstract HTT rx
+ * descriptor, and functions to iterate though a series of rx descriptors
+ * and rx MSDU buffers.
+ */
+#ifndef _OL_HTT_RX_API__H_
+#define _OL_HTT_RX_API__H_
+
+/* #include <osapi_linux.h> / * uint16_t, etc. * / */
+#include <osdep.h> /* uint16_t, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_types.h> /* bool */
+
+#include <htt.h> /* HTT_RX_IND_MPDU_STATUS */
+#include <ol_htt_api.h> /* htt_pdev_handle */
+
+#include <cds_ieee80211_defines.h> /* ieee80211_rx_status */
+#include <ol_vowext_dbg_defs.h>
+
+/*================ constants and types used in the rx API ===================*/
+
+#define HTT_RSSI_INVALID 0x7fff
+
+/**
+ * struct ocb_rx_stats_hdr_t - RX stats header
+ * @version: The version must be 1.
+ * @length: The length of this structure
+ * @channel_freq: The center frequency for the packet
+ * @rssi_cmb: combined RSSI from all chains
+ * @rssi[4]: rssi for chains 0 through 3 (for 20 MHz bandwidth)
+ * @tsf32: timestamp in TSF units
+ * @timestamp_microsec: timestamp in microseconds
+ * @datarate: MCS index
+ * @timestamp_submicrosec: submicrosecond portion of the timestamp
+ * @ext_tid: Extended TID
+ * @reserved: Ensure the size of the structure is a multiple of 4.
+ * Must be 0.
+ *
+ * When receiving an OCB packet, the RX stats is sent to the user application
+ * so that the user application can do processing based on the RX stats.
+ * This structure will be preceded by an ethernet header with
+ * the proto field set to 0x8152. This struct includes various RX
+ * paramaters including RSSI, data rate, and center frequency.
+ */
+PREPACK struct ocb_rx_stats_hdr_t {
+ uint16_t version;
+ uint16_t length;
+ uint16_t channel_freq;
+ int16_t rssi_cmb;
+ int16_t rssi[4];
+ uint32_t tsf32;
+ uint32_t timestamp_microsec;
+ uint8_t datarate;
+ uint8_t timestamp_submicrosec;
+ uint8_t ext_tid;
+ uint8_t reserved;
+};
+
+/*================ rx indication message field access methods ===============*/
+
+/**
+ * @brief Check if a rx indication message has a rx reorder flush command.
+ * @details
+ * Space is reserved in each rx indication message for a rx reorder flush
+ * command, to release specified MPDUs from the rx reorder holding array
+ * before processing the new MPDUs referenced by the rx indication message.
+ * This rx reorder flush command contains a flag to show whether the command
+ * is valid within a given rx indication message.
+ * This function checks the validity flag from the rx indication
+ * flush command IE within the rx indication message.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return
+ * 1 - the message's rx flush command is valid and should be processed
+ * before processing new rx MPDUs,
+ * -OR-
+ * 0 - the message's rx flush command is invalid and should be ignored
+ */
+int htt_rx_ind_flush(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Return the sequence number starting the range of MPDUs to flush.
+ * @details
+ * Read the fields of the rx indication message that identify the start
+ * and end of the range of MPDUs to flush from the rx reorder holding array
+ * and send on to subsequent stages of rx processing.
+ * These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
+ * number. These sequence numbers are masked with the block ack window size,
+ * rounded up to a power of two (minus one, to create a bitmask) to obtain
+ * the corresponding index into the rx reorder holding array.
+ * The series of MPDUs to flush includes the one specified by the start
+ * sequence number.
+ * The series of MPDUs to flush excludes the one specified by the end
+ * sequence number; the MPDUs up to but not including the end sequence number
+ * are to be flushed.
+ * These start and end seq num fields are only valid if the "flush valid"
+ * flag is set.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ * for the start of the range of MPDUs to flush
+ * @param seq_num_end - (call-by-reference output) sequence number
+ * for the end of the range of MPDUs to flush
+ */
+void
+htt_rx_ind_flush_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ unsigned *seq_num_start, unsigned *seq_num_end);
+
+/**
+ * @brief Check if a rx indication message has a rx reorder release command.
+ * @details
+ * Space is reserved in each rx indication message for a rx reorder release
+ * command, to release specified MPDUs from the rx reorder holding array
+ * after processing the new MPDUs referenced by the rx indication message.
+ * This rx reorder release command contains a flag to show whether the command
+ * is valid within a given rx indication message.
+ * This function checks the validity flag from the rx indication
+ * release command IE within the rx indication message.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return
+ * 1 - the message's rx release command is valid and should be processed
+ * after processing new rx MPDUs,
+ * -OR-
+ * 0 - the message's rx release command is invalid and should be ignored
+ */
+int htt_rx_ind_release(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Return the sequence number starting the range of MPDUs to release.
+ * @details
+ * Read the fields of the rx indication message that identify the start
+ * and end of the range of MPDUs to release from the rx reorder holding
+ * array and send on to subsequent stages of rx processing.
+ * These sequence numbers are the 6 LSBs of the 12-bit 802.11 sequence
+ * number. These sequence numbers are masked with the block ack window size,
+ * rounded up to a power of two (minus one, to create a bitmask) to obtain
+ * the corresponding index into the rx reorder holding array.
+ * The series of MPDUs to release includes the one specified by the start
+ * sequence number.
+ * The series of MPDUs to release excludes the one specified by the end
+ * sequence number; the MPDUs up to but not including the end sequence number
+ * are to be released.
+ * These start and end seq num fields are only valid if the "release valid"
+ * flag is set.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ * for the start of the range of MPDUs to release
+ * @param seq_num_end - (call-by-reference output) sequence number
+ * for the end of the range of MPDUs to release
+ */
+void
+htt_rx_ind_release_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ unsigned *seq_num_start,
+ unsigned *seq_num_end);
+
+/*
+ * For now, the host HTT -> host data rx status enum
+ * exactly matches the target HTT -> host HTT rx status enum;
+ * no translation is required.
+ * However, the host data SW should only use the htt_rx_status,
+ * so that in the future a translation from target HTT rx status
+ * to host HTT rx status can be added, if the need ever arises.
+ */
+enum htt_rx_status {
+ htt_rx_status_unknown = HTT_RX_IND_MPDU_STATUS_UNKNOWN,
+ htt_rx_status_ok = HTT_RX_IND_MPDU_STATUS_OK,
+ htt_rx_status_err_fcs = HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+ htt_rx_status_err_dup = HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+ htt_rx_status_err_replay = HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+ htt_rx_status_err_inv_peer = HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+ htt_rx_status_ctrl_mgmt_null = HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+ htt_rx_status_tkip_mic_err = HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+
+ htt_rx_status_err_misc = HTT_RX_IND_MPDU_STATUS_ERR_MISC
+};
+
+/**
+ * @brief Check the status MPDU range referenced by a rx indication message.
+ * @details
+ * Check the status of a range of MPDUs referenced by a rx indication message.
+ * This status determines whether the MPDUs should be processed or discarded.
+ * If the status is OK, then the MPDUs within the range should be processed
+ * as usual.
+ * Otherwise (FCS error, duplicate error, replay error, unknown sender error,
+ * etc.) the MPDUs within the range should be discarded.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param mpdu_range_num - which MPDU range within the rx ind msg to check,
+ * starting from 0
+ * @param status - (call-by-reference output) MPDU status
+ * @param mpdu_count - (call-by-reference output) count of MPDUs comprising
+ * the specified MPDU range
+ */
+void
+htt_rx_ind_mpdu_range_info(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ int mpdu_range_num,
+ enum htt_rx_status *status, int *mpdu_count);
+
+/**
+ * @brief Return the RSSI provided in a rx indication message.
+ * @details
+ * Return the RSSI from an rx indication message, converted to dBm units.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t
+htt_rx_ind_rssi_dbm(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+int16_t
+htt_rx_ind_rssi_dbm_chain(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ int8_t chain);
+
+void
+htt_rx_ind_legacy_rate(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ uint8_t *legacy_rate, uint8_t *legacy_rate_sel);
+
+
+void
+htt_rx_ind_timestamp(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg,
+ uint32_t *timestamp_microsec,
+ uint8_t *timestamp_submicrosec);
+
+uint32_t
+htt_rx_ind_tsf32(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+uint8_t
+htt_rx_ind_ext_tid(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+
+/*==================== rx MPDU descriptor access methods ====================*/
+
+/**
+ * @brief Check if the retry bit is set in Rx-descriptor
+ * @details
+ * This function returns the retry bit of the 802.11 header for the
+ * provided rx MPDU descriptor.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return boolean -- true if retry is set, false otherwise
+ */
+extern
+bool (*htt_rx_mpdu_desc_retry)(
+ htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return a rx MPDU's sequence number.
+ * @details
+ * This function returns the LSBs of the 802.11 sequence number for the
+ * provided rx MPDU descriptor.
+ * Depending on the system, 6-12 LSBs from the 802.11 sequence number are
+ * returned. (Typically, either the 8 or 12 LSBs are returned.)
+ * This sequence number is masked with the block ack window size,
+ * rounded up to a power of two (minus one, to create a bitmask) to obtain
+ * the corresponding index into the rx reorder holding array.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return the LSBs of the sequence number for the MPDU
+ */
+extern uint16_t
+(*htt_rx_mpdu_desc_seq_num)(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return a rx MPDU's rx reorder array index, based on sequence number.
+ * @details
+ * This function returns a sequence-number based index into the rx
+ * reorder array for the specified MPDU.
+ * In some systems, this rx reorder array is simply the LSBs of the
+ * sequence number, or possibly even the full sequence number.
+ * To support such systems, the returned index has to be masked with
+ * the power-of-two array size before using the value to index the
+ * rx reorder array.
+ * In other systems, this rx reorder array index is
+ * (sequence number) % (block ack window size)
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return the rx reorder array index the MPDU goes into
+ */
+/* use sequence number (or LSBs thereof) as rx reorder array index */
+#define htt_rx_mpdu_desc_reorder_idx htt_rx_mpdu_desc_seq_num
+
+union htt_rx_pn_t {
+ /* WEP: 24-bit PN */
+ uint32_t pn24;
+
+ /* TKIP or CCMP: 48-bit PN */
+ uint64_t pn48;
+
+ /* WAPI: 128-bit PN */
+ uint64_t pn128[2];
+};
+
+/**
+ * @brief Find the packet number (PN) for a MPDU.
+ * @details
+ * This function only applies when the rx PN check is configured to be
+ * performed in the host rather than the target, and on peers using a
+ * security type for which a PN check applies.
+ * The pn_len_bits argument is used to determine which element of the
+ * htt_rx_pn_t union to deposit the PN value read from the MPDU descriptor
+ * into.
+ * A 24-bit PN is deposited into pn->pn24.
+ * A 48-bit PN is deposited into pn->pn48.
+ * A 128-bit PN is deposited in little-endian order into pn->pn128.
+ * Specifically, bits 63:0 of the PN are copied into pn->pn128[0], while
+ * bits 127:64 of the PN are copied into pn->pn128[1].
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @param pn - the location to copy the packet number into
+ * @param pn_len_bits - the PN size, in bits
+ */
+extern void (*htt_rx_mpdu_desc_pn)(htt_pdev_handle pdev,
+ void *mpdu_desc,
+ union htt_rx_pn_t *pn, int pn_len_bits);
+
+/**
+ * @brief This function Returns the TID value from the Rx descriptor
+ * for Low Latency driver
+ * @details
+ * This function returns the TID set in the 802.11 QoS Control for the MPDU
+ * in the packet header, by looking at the mpdu_start of the Rx descriptor.
+ * Rx descriptor gets a copy of the TID from the MAC.
+ * @pdev: Handle (pointer) to HTT pdev.
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return: Actual TID set in the packet header.
+ */
+extern
+uint8_t (*htt_rx_mpdu_desc_tid)(
+ htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the TSF timestamp indicating when a MPDU was received.
+ * @details
+ * This function provides the timestamp indicating when the PPDU that
+ * the specified MPDU belongs to was received.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return 32 LSBs of TSF time at which the MPDU's PPDU was received
+ */
+uint32_t htt_rx_mpdu_desc_tsf32(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the 802.11 header of the MPDU
+ * @details
+ * This function provides a pointer to the start of the 802.11 header
+ * of the Rx MPDU
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return pointer to 802.11 header of the received MPDU
+ */
+char *htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Return the RSSI provided in a rx descriptor.
+ * @details
+ * Return the RSSI from a rx descriptor, converted to dBm units.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return RSSI in dBm, or HTT_INVALID_RSSI
+ */
+int16_t htt_rx_mpdu_desc_rssi_dbm(htt_pdev_handle pdev, void *mpdu_desc);
+
+/*==================== rx MSDU descriptor access methods ====================*/
+
+/**
+ * @brief Check if a MSDU completes a MPDU.
+ * @details
+ * When A-MSDU aggregation is used, a single MPDU will consist of
+ * multiple MSDUs. This function checks a MSDU's rx descriptor to
+ * see whether the MSDU is the final MSDU within a MPDU.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - there are subsequent MSDUs within the A-MSDU / MPDU
+ * -OR-
+ * 1 - this is the last MSDU within its MPDU
+ */
+extern bool (*htt_rx_msdu_desc_completes_mpdu)(htt_pdev_handle pdev,
+ void *msdu_desc);
+
+/**
+ * @brief Check if a MSDU is first msdu of MPDU.
+ * @details
+ * When A-MSDU aggregation is used, a single MPDU will consist of
+ * multiple MSDUs. This function checks a MSDU's rx descriptor to
+ * see whether the MSDU is the first MSDU within a MPDU.
+ *
+ * @param pdev - the handle of the physical device the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - this is interior MSDU in the A-MSDU / MPDU
+ * -OR-
+ * 1 - this is the first MSDU within its MPDU
+ */
+extern bool (*htt_rx_msdu_first_msdu_flag)(htt_pdev_handle pdev,
+ void *msdu_desc);
+
+/**
+ * @brief Retrieve encrypt bit from a mpdu desc.
+ * @details
+ * Fw will pass all the frame to the host whether encrypted or not, and will
+ * indicate the encrypt flag in the desc, this function is to get the info
+ * and used to make a judge whether should make pn check, because
+ * non-encrypted frames always get the same pn number 0.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param mpdu_desc - the abstract descriptor for the MPDU in question
+ * @return 0 - the frame was not encrypted
+ * 1 - the frame was encrypted
+ */
+extern bool (*htt_rx_mpdu_is_encrypted)(htt_pdev_handle pdev, void *mpdu_desc);
+
+/**
+ * @brief Indicate whether a rx desc has a WLAN unicast vs. mcast/bcast flag.
+ * @details
+ * A flag indicating whether a MPDU was delivered over WLAN as unicast or
+ * multicast/broadcast may be only valid once per MPDU (LL), or within each
+ * rx descriptor for the MSDUs within the MPDU (HL). (In practice, it is
+ * unlikely that A-MSDU aggregation will be used in HL, so typically HL will
+ * only have one MSDU per MPDU anyway.)
+ * This function indicates whether the specified rx descriptor contains
+ * a WLAN ucast vs. mcast/bcast flag.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - The rx descriptor does not contain a WLAN ucast vs. mcast flag.
+ * -OR-
+ * 1 - The rx descriptor has a valid WLAN ucast vs. mcast flag.
+ */
+extern int (*htt_rx_msdu_has_wlan_mcast_flag)(htt_pdev_handle pdev,
+ void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU was received as unicast or mcast/bcast
+ * @details
+ * Indicate whether the MPDU that the specified MSDU belonged to was
+ * delivered over the WLAN as unicast, or as multicast/broadcast.
+ * This query can only be performed on rx descriptors for which
+ * htt_rx_msdu_has_wlan_mcast_flag is true.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - The MSDU was delivered over the WLAN as unicast.
+ * -OR-
+ * 1 - The MSDU was delivered over the WLAN as broadcast or multicast.
+ */
+extern bool (*htt_rx_msdu_is_wlan_mcast)(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU was received as a fragmented frame
+ * @details
+ * This query can only be performed on LL system.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - The MSDU was a non-fragmented frame.
+ * -OR-
+ * 1 - The MSDU was fragmented frame.
+ */
+extern int (*htt_rx_msdu_is_frag)(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate if a MSDU should be delivered to the OS shim or discarded.
+ * @details
+ * Indicate whether a MSDU should be discarded or delivered to the OS shim.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - The MSDU should be delivered to the OS
+ * -OR-
+ * non-zero - The MSDU should not be delivered to the OS.
+ * If the "forward" flag is set, it should be forwarded to tx.
+ * Else, it should be discarded.
+ */
+int htt_rx_msdu_discard(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU should be forwarded to tx.
+ * @details
+ * Indicate whether a MSDU should be forwarded to tx, e.g. for intra-BSS
+ * STA-to-STA forwarding in an AP, or for multicast echo in an AP.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - The MSDU should not be forwarded
+ * -OR-
+ * non-zero - The MSDU should be forwarded.
+ * If the "discard" flag is set, then the original MSDU can be
+ * directly forwarded into the tx path.
+ * Else, a copy (clone?) of the rx MSDU needs to be created to
+ * send to the tx path.
+ */
+int htt_rx_msdu_forward(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Indicate whether a MSDU's contents need to be inspected.
+ * @details
+ * Indicate whether the host data SW needs to examine the contents of the
+ * received MSDU, and based on the packet type infer what special handling
+ * to provide for the MSDU.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @return
+ * 0 - No inspection + special handling is required.
+ * -OR-
+ * non-zero - Inspect the MSDU contents to infer what special handling
+ * to apply to the MSDU.
+ */
+int htt_rx_msdu_inspect(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief Provide all action specifications for a rx MSDU
+ * @details
+ * Provide all action specifications together. This provides the same
+ * information in a single function call as would be provided by calling
+ * the functions htt_rx_msdu_discard, htt_rx_msdu_forward, and
+ * htt_rx_msdu_inspect.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @param[out] discard - 1: discard the MSDU, 0: deliver the MSDU to the OS
+ * @param[out] forward - 1: forward the rx MSDU to tx, 0: no rx->tx forward
+ * @param[out] inspect - 1: process according to MSDU contents, 0: no inspect
+ */
+void
+htt_rx_msdu_actions(htt_pdev_handle pdev,
+ void *msdu_desc, int *discard, int *forward, int *inspect);
+
+/**
+ * @brief Get the key id sent in IV of the frame
+ * @details
+ * Provide the key index octet which is taken from IV.
+ * This is valid only for the first MSDU.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the abstract descriptor for the MSDU in question
+ * @key_id - Key id octet
+ * @return indication of whether key id access is successful
+ * true - Success
+ * false - if this is not first msdu
+ */
+extern bool
+(*htt_rx_msdu_desc_key_id)(htt_pdev_handle pdev,
+ void *mpdu_desc, uint8_t *key_id);
+
+extern bool
+(*htt_rx_msdu_chan_info_present)(
+ htt_pdev_handle pdev,
+ void *mpdu_desc);
+
+extern bool
+(*htt_rx_msdu_center_freq)(
+ htt_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ void *mpdu_desc,
+ uint16_t *primary_chan_center_freq_mhz,
+ uint16_t *contig_chan1_center_freq_mhz,
+ uint16_t *contig_chan2_center_freq_mhz,
+ uint8_t *phy_mode);
+
+/*====================== rx MSDU + descriptor delivery ======================*/
+
+/**
+ * @brief Return a linked-list of network buffer holding the next rx A-MSDU.
+ * @details
+ * In some systems, the rx MSDUs are uploaded along with the rx
+ * indication message, while in other systems the rx MSDUs are uploaded
+ * out of band, via MAC DMA.
+ * This function provides an abstract way to obtain a linked-list of the
+ * next MSDUs, regardless of whether the MSDU was delivered in-band with
+ * the rx indication message, or out of band through MAC DMA.
+ * In a LL system, this function returns a linked list of the one or more
+ * MSDUs that together comprise an A-MSDU.
+ * In a HL system, this function returns a degenerate linked list consisting
+ * of a single MSDU (head_msdu == tail_msdu).
+ * This function also makes sure each MSDU's rx descriptor can be found
+ * through the MSDU's network buffer.
+ * In most systems, this is trivial - a single network buffer stores both
+ * the MSDU rx descriptor and the MSDU payload.
+ * In systems where the rx descriptor is in a separate buffer from the
+ * network buffer holding the MSDU payload, a pointer to the rx descriptor
+ * has to be stored in the network buffer.
+ * After this function call, the descriptor for a given MSDU can be
+ * obtained via the htt_rx_msdu_desc_retrieve function.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @param head_msdu - call-by-reference network buffer handle, which gets set
+ * in this function to point to the head MSDU of the A-MSDU
+ * @param tail_msdu - call-by-reference network buffer handle, which gets set
+ * in this function to point to the tail MSDU of the A-MSDU, or the
+ * same MSDU that the head_msdu points to if only a single MSDU is
+ * delivered at a time.
+ * @return indication of whether any MSDUs in the AMSDU use chaining:
+ * 0 - no buffer chaining
+ * 1 - buffers are chained
+ */
+extern int
+(*htt_rx_amsdu_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+extern int
+(*htt_rx_frag_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ cdf_nbuf_t *head_msdu, cdf_nbuf_t *tail_msdu);
+
+/**
+ * @brief Return a linked list of buffers holding one MSDU
+ * In some systems the buffers are delivered along with offload delivery
+ * indication message itself, while in other systems the buffers are uploaded
+ * out of band, via MAC DMA.
+ * @details
+ * This function provides an abstract way to obtain a linked-list of the
+ * buffers corresponding to an msdu, regardless of whether the MSDU was
+ * delivered in-band with the rx indication message, or out of band through
+ * MAC DMA.
+ * In a LL system, this function returns a linked list of one or more
+ * buffers corresponding to an MSDU
+ * In a HL system , TODO
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param offload_deliver_msg - the nebuf containing the offload deliver message
+ * @param head_msdu - call-by-reference network buffer handle, which gets set in this
+ * function to the head buffer of this MSDU
+ * @param tail_msdu - call-by-reference network buffer handle, which gets set in this
+ * function to the tail buffer of this MSDU
+ */
+extern int
+(*htt_rx_offload_msdu_pop)(htt_pdev_handle pdev,
+ cdf_nbuf_t offload_deliver_msg,
+ int *vdev_id,
+ int *peer_id,
+ int *tid,
+ uint8_t *fw_desc,
+ cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+
+/**
+ * @brief Return the rx descriptor for the next rx MPDU.
+ * @details
+ * The rx MSDU descriptors may be uploaded as part of the rx indication
+ * message, or delivered separately out of band.
+ * This function provides an abstract way to obtain the next MPDU descriptor,
+ * regardless of whether the MPDU descriptors are delivered in-band with
+ * the rx indication message, or out of band.
+ * This is used to iterate through the series of MPDU descriptors referenced
+ * by a rx indication message.
+ * The htt_rx_amsdu_pop function should be called before this function
+ * (or at least before using the returned rx descriptor handle), so that
+ * the cache location for the rx descriptor will be flushed before the
+ * rx descriptor gets used.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_ind_msg - the netbuf containing the rx indication message
+ * @return next abstract rx descriptor from the series of MPDUs referenced
+ * by an rx ind msg
+ */
+extern void *
+(*htt_rx_mpdu_desc_list_next)(htt_pdev_handle pdev, cdf_nbuf_t rx_ind_msg);
+
+/**
+ * @brief Retrieve a previously-stored rx descriptor from a MSDU buffer.
+ * @details
+ * The data SW will call the htt_rx_msdu_desc_link macro/function to
+ * link a MSDU's rx descriptor with the buffer holding the MSDU payload.
+ * This function retrieves the rx MSDU descriptor.
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu - the buffer containing the MSDU payload
+ * @return the corresponding abstract rx MSDU descriptor
+ */
+extern void *
+(*htt_rx_msdu_desc_retrieve)(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Free both an rx MSDU descriptor and the associated MSDU buffer.
+ * @details
+ * Usually the WLAN driver does not free rx MSDU buffers, but needs to
+ * do so when an invalid frame (e.g. FCS error) was deposited into the
+ * queue of rx buffers.
+ * This function frees both the rx descriptor and the rx frame.
+ * On some systems, the rx descriptor and rx frame are stored in the
+ * same buffer, and thus one free suffices for both objects.
+ * On other systems, the rx descriptor and rx frame are stored
+ * separately, so distinct frees are internally needed.
+ * However, in either case, the rx descriptor has been associated with
+ * the MSDU buffer, and can be retrieved by htt_rx_msdu_desc_retrieve.
+ * Hence, it is only necessary to provide the MSDU buffer; the HTT SW
+ * internally finds the corresponding MSDU rx descriptor.
+ *
+ * @param htt_pdev - the HTT instance the rx data was received on
+ * @param rx_msdu_desc - rx descriptor for the MSDU being freed
+ * @param msdu - rx frame buffer for the MSDU being freed
+ */
+void htt_rx_desc_frame_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Look up and free the rx descriptor for a MSDU.
+ * @details
+ * When the driver delivers rx frames to the OS, it first needs
+ * to free the associated rx descriptors.
+ * In some systems the rx descriptors are allocated in the same
+ * buffer as the rx frames, so this operation is a no-op.
+ * In other systems, the rx descriptors are stored separately
+ * from the rx frames, so the rx descriptor has to be freed.
+ * The descriptor is located from the MSDU buffer with the
+ * htt_rx_desc_frame_free macro/function.
+ *
+ * @param htt_pdev - the HTT instance the rx data was received on
+ * @param msdu - rx frame buffer for the rx MSDU descriptor being freed
+ */
+void htt_rx_msdu_desc_free(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu);
+
+/**
+ * @brief Add new MSDU buffers for the target to fill.
+ * @details
+ * In some systems, the underlying upload mechanism (HIF) allocates new rx
+ * buffers itself. In other systems, the underlying upload mechanism
+ * (MAC DMA) needs to be provided with new rx buffers.
+ * This function is used as an abstract method to indicate to the underlying
+ * data upload mechanism when it is an appropriate time to allocate new rx
+ * buffers.
+ * If the allocation is automatically handled, a la HIF, then this function
+ * call is ignored.
+ * If the allocation has to be done explicitly, a la MAC DMA, then this
+ * function provides the context and timing for such replenishment
+ * allocations.
+ *
+ * @param pdev - the HTT instance the rx data will be received on
+ */
+void htt_rx_msdu_buff_replenish(htt_pdev_handle pdev);
+
+/**
+ * @brief Links list of MSDUs into an single MPDU. Updates RX stats
+ * @details
+ * When HW MSDU splitting is turned on each MSDU in an AMSDU MPDU occupies
+ * a separate wbuf for delivery to the network stack. For delivery to the
+ * monitor mode interface they need to be restitched into an MPDU. This
+ * function does this. Also updates the RX status if the MPDU starts
+ * a new PPDU
+ *
+ * @param pdev - the HTT instance the rx data was received on
+ * @param head_msdu - network buffer handle, which points to the first MSDU
+ * in the list. This is a NULL terminated list
+ * @param rx_staus - pointer to the status associated with this MPDU.
+ * Updated only if there is a new PPDU and new status associated with it
+ * @param clone_not_reqd - If set the MPDU linking destroys the passed in
+ * list, else operates on a cloned nbuf
+ * @return network buffer handle to the MPDU
+ */
+cdf_nbuf_t
+htt_rx_restitch_mpdu_from_msdus(htt_pdev_handle pdev,
+ cdf_nbuf_t head_msdu,
+ struct ieee80211_rx_status *rx_status,
+ unsigned clone_not_reqd);
+
+/**
+ * @brief Return the sequence number of MPDUs to flush.
+ * @param pdev - the HTT instance the rx data was received on
+ * @param rx_frag_ind_msg - the netbuf containing the rx fragment indication message
+ * @param seq_num_start - (call-by-reference output) sequence number
+ * for the start of the range of MPDUs to flush
+ * @param seq_num_end - (call-by-reference output) sequence number
+ * for the end of the range of MPDUs to flush
+ */
+void
+htt_rx_frag_ind_flush_seq_num_range(htt_pdev_handle pdev,
+ cdf_nbuf_t rx_frag_ind_msg,
+ int *seq_num_start, int *seq_num_end);
+/**
+ * @brief Return the HL rx desc size
+ * @param pdev - the HTT instance the rx data was received on
+ * @param msdu_desc - the hl rx desc pointer
+ *
+ */
+uint16_t htt_rx_msdu_rx_desc_size_hl(htt_pdev_handle pdev, void *msdu_desc);
+
+/**
+ * @brief populates vowext stats by processing RX desc.
+ * @param msdu - network buffer handle
+ * @param vowstats - handle to vow ext stats.
+ */
+void htt_rx_get_vowext_stats(cdf_nbuf_t msdu, struct vow_extstats *vowstats);
+
+/**
+ * @brief parses the offload message passed by the target.
+ * @param pdev - pdev handle
+ * @param paddr - physical address of the rx buffer
+ * @param vdev_id - reference to vdev id to be filled
+ * @param peer_id - reference to the peer id to be filled
+ * @param tid - reference to the tid to be filled
+ * @param fw_desc - reference to the fw descriptor to be filled
+ * @param peer_id - reference to the peer id to be filled
+ * @param head_buf - reference to the head buffer
+ * @param tail_buf - reference to the tail buffer
+ */
+int
+htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
+ uint32_t *msg_word,
+ int msdu_iter,
+ int *vdev_id,
+ int *peer_id,
+ int *tid,
+ uint8_t *fw_desc,
+ cdf_nbuf_t *head_buf, cdf_nbuf_t *tail_buf);
+#endif /* _OL_HTT_RX_API__H_ */
diff --git a/dp/ol/inc/ol_htt_tx_api.h b/dp/ol/inc/ol_htt_tx_api.h
new file mode 100644
index 000000000000..baacfdb5848f
--- /dev/null
+++ b/dp/ol/inc/ol_htt_tx_api.h
@@ -0,0 +1,979 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_htt_tx_api.h
+ * @brief Specify the tx HTT API functions called by the host data SW.
+ * @details
+ * This file declares the HTT API functions that are specifically
+ * related to transmit processing.
+ * In particular, the methods of the abstract HTT tx descriptor are
+ * specified.
+ */
+#ifndef _OL_HTT_TX_API__H_
+#define _OL_HTT_TX_API__H_
+
+/* #include <osapi_linux.h> / * uint16_t, etc. * / */
+#include <osdep.h> /* uint16_t, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_cfg.h> /* wlan_frm_fmt */
+
+#include <htt.h> /* needed by inline functions */
+#include <cdf_net_types.h>
+#include <ol_htt_api.h> /* htt_pdev_handle */
+#include <htt_types.h>
+#include <cdf_trace.h>
+
+/* Remove these macros when they get added to htt.h. */
+#ifndef HTT_TX_DESC_EXTENSION_GET
+#define HTT_TX_DESC_EXTENSION_OFFSET_BYTES 0
+#define HTT_TX_DESC_EXTENSION_OFFSET_DWORD 0
+#define HTT_TX_DESC_EXTENSION_M 0x10000000
+#define HTT_TX_DESC_EXTENSION_S 28
+
+#define HTT_TX_DESC_EXTENSION_GET(_var) \
+ (((_var) & HTT_TX_DESC_EXTENSION_M) >> HTT_TX_DESC_EXTENSION_S)
+#define HTT_TX_DESC_EXTENSION_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_TX_DESC_EXTENSION, _val); \
+ ((_var) |= ((_val) << HTT_TX_DESC_EXTENSION_S)); \
+ } while (0)
+#endif
+
+/*================ meta-info about tx MSDUs =================================*/
+
+/*
+ * For simplicity, use the IEEE 802.11 frame type values.
+ */
+enum htt_frm_type {
+ htt_frm_type_mgmt = 0,
+ htt_frm_type_ctrl = 1,
+ htt_frm_type_data = 2
+};
+
+/*
+ * For simplicity, use the IEEE 802.11 frame sub-type values.
+ */
+enum htt_frm_subtype {
+ htt_frm_subtype_mgmt_assoc_req = 0,
+ htt_frm_subtype_mgmt_assoc_resp = 1,
+ htt_frm_subtype_mgmt_reassoc_req = 2,
+ htt_frm_subtype_mgmt_reassoc_resp = 3,
+ htt_frm_subtype_mgmt_probe_req = 4,
+ htt_frm_subtype_mgmt_probe_resp = 5,
+ htt_frm_subtype_mgmt_timing_adv = 6,
+ htt_frm_subtype_mgmt_beacon = 8,
+ htt_frm_subtype_mgmt_atim = 9,
+ htt_frm_subtype_mgmt_disassoc = 10,
+ htt_frm_subtype_mgmt_auth = 11,
+ htt_frm_subtype_mgmt_deauth = 12,
+ htt_frm_subtype_mgmt_action = 13,
+ htt_frm_subtype_mgmt_action_no_ack = 14,
+
+ htt_frm_subtype_data_data = 0,
+ htt_frm_subtype_data_data_cf_ack = 1,
+ htt_frm_subtype_data_data_cf_poll = 2,
+ htt_frm_subtype_data_data_cf_ack_cf_poll = 3,
+ htt_frm_subtype_data_null = 4,
+ htt_frm_subtype_data_cf_ack = 5,
+ htt_frm_subtype_data_cf_poll = 6,
+ htt_frm_subtype_data_cf_ack_cf_poll = 7,
+ htt_frm_subtype_data_QoS_data = 8,
+ htt_frm_subtype_data_QoS_data_cf_ack = 9,
+ htt_frm_subtype_data_QoS_data_cf_poll = 10,
+ htt_frm_subtype_data_QoS_data_cf_ack_cf_poll = 11,
+ htt_frm_subtype_data_QoS_null = 12,
+ htt_frm_subtype_data_QoS_cf_poll = 14,
+ htt_frm_subtype_data_QoS_cf_ack_cf_poll = 15,
+};
+
+enum htt_ofdm_datarate { /* Value MBPS Modulation Coding*/
+ htt_ofdm_datarate_6_mbps = 0, /* 0 6 BPSK 1/2 */
+ htt_ofdm_datarate_9_mbps = 1, /* 1 9 BPSK 3/4 */
+ htt_ofdm_datarate_12_mbps = 2, /* 2 12 QPSK 1/2 */
+ htt_ofdm_datarate_18_mbps = 3, /* 3 18 QPSK 3/4 */
+ htt_ofdm_datarate_24_mbps = 4, /* 4 24 16-QAM 1/2 */
+ htt_ofdm_datarate_36_mbps = 5, /* 5 36 16-QAM 3/4 */
+ htt_ofdm_datarate_48_mbps = 6, /* 6 48 64-QAM 1/2 */
+ htt_ofdm_datarate_54_mbps = 7, /* 7 54 64-QAM 3/4 */
+ htt_ofdm_datarate_max = 7,
+};
+
+/**
+ * struct ocb_tx_ctrl_hdr_t - TX control header
+ * @version: must be 1
+ * @length: length of this structure
+ * @channel_freq: channel on which to transmit the packet
+ * @valid_pwr: bit 0: if set, tx pwr spec is valid
+ * @valid_datarate: bit 1: if set, tx MCS mask spec is valid
+ * @valid_retries: bit 2: if set, tx retries spec is valid
+ * @valid_chain_mask: bit 3: if set, chain mask is valid
+ * @valid_expire_tsf: bit 4: if set, tx expire TSF spec is valid
+ * @valid_tid: bit 5: if set, TID is valid
+ * @reserved0_15_6: bits 15:6 - unused, set to 0x0
+ * @all_flags: union of all the flags
+ * @expire_tsf_lo: TX expiry time (TSF) LSBs
+ * @expire_tsf_hi: TX expiry time (TSF) MSBs
+ * @pwr: Specify what power the tx frame needs to be transmitted
+ * at. The power a signed (two's complement) value is in
+ * units of 0.5 dBm. The value needs to be appropriately
+ * sign-extended when extracting the value from the message
+ * and storing it in a variable that is larger than A_INT8.
+ * If the transmission uses multiple tx chains, this power
+ * spec is the total transmit power, assuming incoherent
+ * combination of per-chain power to produce the total
+ * power.
+ * @datarate: The desired modulation and coding scheme.
+ * VALUE DATA RATE MODULATION CODING RATE
+ * @ 20 MHz
+ * (MBPS)
+ * 0 6 BPSK 1/2
+ * 1 9 BPSK 3/4
+ * 2 12 QPSK 1/2
+ * 3 18 QPSK 3/4
+ * 4 24 16-QAM 1/2
+ * 5 36 16-QAM 3/4
+ * 6 48 64-QAM 1/2
+ * 7 54 64-QAM 3/4
+ * @retry_limit: Specify the maximum number of transmissions, including
+ * the initial transmission, to attempt before giving up if
+ * no ack is received.
+ * If the tx rate is specified, then all retries shall use
+ * the same rate as the initial transmission.
+ * If no tx rate is specified, the target can choose
+ * whether to retain the original rate during the
+ * retransmissions, or to fall back to a more robust rate.
+ * @chain_mask: specify which chains to transmit from
+ * @ext_tid: Extended Traffic ID (0-15)
+ * @reserved: Ensure that the size of the structure is a multiple of
+ * 4. Must be 0.
+ *
+ * When sending an OCB packet, the user application has
+ * the option of including the following struct following an ethernet header
+ * with the proto field set to 0x8151. This struct includes various TX
+ * paramaters including the TX power and MCS.
+ */
+PREPACK struct ocb_tx_ctrl_hdr_t {
+ uint16_t version;
+ uint16_t length;
+ uint16_t channel_freq;
+
+ union {
+ struct {
+ uint16_t
+ valid_pwr:1,
+ valid_datarate:1,
+ valid_retries:1,
+ valid_chain_mask:1,
+ valid_expire_tsf:1,
+ valid_tid:1,
+ reserved0_15_6:10;
+ };
+ uint16_t all_flags;
+ };
+
+ uint32_t expire_tsf_lo;
+ uint32_t expire_tsf_hi;
+ int8_t pwr;
+ uint8_t datarate;
+ uint8_t retry_limit;
+ uint8_t chain_mask;
+ uint8_t ext_tid;
+ uint8_t reserved[3];
+} POSTPACK;
+
+/**
+ * @brief tx MSDU meta-data that HTT may use to program the FW/HW tx descriptor
+ */
+struct htt_msdu_info_t {
+ /* the info sub-struct specifies the characteristics of the MSDU */
+ struct {
+ uint16_t ethertype;
+#define HTT_INVALID_PEER_ID 0xffff
+ uint16_t peer_id;
+ uint8_t vdev_id;
+ uint8_t ext_tid;
+ /*
+ * l2_hdr_type - L2 format (802.3, native WiFi 802.11,
+ * or raw 802.11)
+ * Based on attach-time configuration, the tx frames provided
+ * by the OS to the tx data SW are expected to be either
+ * 802.3 format or the "native WiFi" variant of 802.11 format.
+ * Internally, the driver may also inject tx frames into the tx
+ * datapath, and these frames may be either 802.3 format or
+ * 802.11 "raw" format, with no further 802.11 encapsulation
+ * needed.
+ * The tx frames are tagged with their frame format, so target
+ * FW/HW will know how to interpret the packet's encapsulation
+ * headers when doing tx classification, and what form of 802.11
+ * header encapsulation is needed, if any.
+ */
+ uint8_t l2_hdr_type; /* enum htt_pkt_type */
+ /*
+ * frame_type - is the tx frame management or data?
+ * Just to avoid confusion, the enum values for this frame type
+ * field use the 802.11 frame type values, although it is
+ * unexpected for control frames to be sent through the host
+ * data path.
+ */
+ uint8_t frame_type; /* enum htt_frm_type */
+ /*
+ * frame subtype - this field specifies the sub-type of
+ * management frames
+ * Just to avoid confusion, the enum values for this frame
+ * subtype field use the 802.11 management frame subtype values.
+ */
+ uint8_t frame_subtype; /* enum htt_frm_subtype */
+ uint8_t is_unicast;
+
+ /* dest_addr is not currently used.
+ * It could be used as an input to a Tx BD (Riva tx descriptor)
+ * signature computation.
+ uint8_t *dest_addr;
+ */
+
+ uint8_t l3_hdr_offset; /* wrt cdf_nbuf_data(msdu), in bytes */
+
+ /* l4_hdr_offset is not currently used.
+ * It could be used to specify to a TCP/UDP checksum computation
+ * engine where the TCP/UDP header starts.
+ */
+ /* uint8_t l4_hdr_offset; - wrt cdf_nbuf_data(msdu), in bytes */
+ } info;
+ /* the action sub-struct specifies how to process the MSDU */
+ struct {
+ uint8_t use_6mbps; /* mgmt frames: option to force
+ 6 Mbps rate */
+ uint8_t do_encrypt;
+ uint8_t do_tx_complete;
+ uint8_t tx_comp_req;
+
+ /*
+ * cksum_offload - Specify whether checksum offload is
+ * enabled or not
+ * Target FW uses this flag to turn on HW checksumming
+ * 0x0 - No checksum offload
+ * 0x1 - L3 header checksum only
+ * 0x2 - L4 checksum only
+ * 0x3 - L3 header checksum + L4 checksum
+ */
+ cdf_nbuf_tx_cksum_t cksum_offload;
+ } action;
+};
+
+static inline void htt_msdu_info_dump(struct htt_msdu_info_t *msdu_info)
+{
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "HTT MSDU info object (%p)\n", msdu_info);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " ethertype: %#x\n", msdu_info->info.ethertype);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " peer_id: %d\n", msdu_info->info.peer_id);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " vdev_id: %d\n", msdu_info->info.vdev_id);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " ext_tid: %d\n", msdu_info->info.ext_tid);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " l2_hdr_type: %d\n", msdu_info->info.l2_hdr_type);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " frame_type: %d\n", msdu_info->info.frame_type);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " frame_subtype: %d\n", msdu_info->info.frame_subtype);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " is_unicast: %u\n", msdu_info->info.is_unicast);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " l3_hdr_offset: %u\n", msdu_info->info.l3_hdr_offset);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " use 6 Mbps: %d\n", msdu_info->action.use_6mbps);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " do_encrypt: %d\n", msdu_info->action.do_encrypt);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " do_tx_complete: %d\n", msdu_info->action.do_tx_complete);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " is_unicast: %u\n", msdu_info->info.is_unicast);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ " is_unicast: %u\n", msdu_info->info.is_unicast);
+}
+
+/*================ tx completion message field access methods ===============*/
+
+/**
+ * @brief Look up the descriptor ID of the nth MSDU from a tx completion msg.
+ * @details
+ * A tx completion message tells the host that the target is done
+ * transmitting a series of MSDUs. The message uses a descriptor ID
+ * to identify each such MSDU. This function/macro is used to
+ * find the ID of one such MSDU referenced by the tx completion message.
+ *
+ * @param iterator - tx completion message context provided by HTT to the
+ * tx completion message handler. This abstract reference to the
+ * HTT tx completion message's payload allows the data SW's tx
+ * completion handler to not care about the format of the HTT
+ * tx completion message.
+ * @param num - (zero-based) index to specify a single MSDU within the
+ * series of MSDUs referenced by the tx completion message
+ * @return descriptor ID for the specified MSDU
+ */
+uint16_t htt_tx_compl_desc_id(void *iterator, int num);
+
+/*========================= tx descriptor operations ========================*/
+
+/**
+ * @brief Allocate a HTT abstract tx descriptor.
+ * @details
+ * Allocate a HTT abstract tx descriptor from a pool within "consistent"
+ * memory, which is accessible by HIF and/or MAC DMA as well as by the
+ * host CPU.
+ * It is expected that the tx datapath will allocate HTT tx descriptors
+ * and link them with datapath SW tx descriptors up front as the driver
+ * is loaded. Thereafter, the link from datapath SW tx descriptor to
+ * HTT tx descriptor will be maintained until the driver is unloaded.
+ *
+ * @param htt_pdev - handle to the HTT instance making the allocation
+ * @param[OUT] paddr_lo - physical address of the HTT descriptor
+ * @return success -> descriptor handle, -OR- failure -> NULL
+ */
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+ uint16_t index);
+
+/**
+ * @brief Free a HTT abstract tx descriptor.
+ *
+ * @param htt_pdev - handle to the HTT instance that made the allocation
+ * @param htt_tx_desc - the descriptor to free
+ */
+void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
+
+#if defined(HELIUMPLUS_PADDR64)
+/**
+ * @brief Allocate TX frag descriptor
+ * @details
+ * Allocate TX frag descriptor
+ *
+ * @param pdev - handle to the HTT instance that made the allocation
+ * @param index - tx descriptor index
+ * @param frag_paddr_lo - fragment descriptor physical address lower 32bits
+ * @param frag_ptr - fragment descriptor hlos pointe
+ * @return success 0
+ */
+int htt_tx_frag_alloc(htt_pdev_handle pdev,
+ u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr);
+#else
+static inline int htt_tx_frag_alloc(htt_pdev_handle pdev,
+ u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+{
+ *frag_ptr = NULL;
+ return 0;
+}
+#endif /* defined(HELIUMPLUS_PADDR64) */
+/**
+ * @brief Discard all tx frames in the process of being downloaded.
+ * @details
+ * This function dicards any tx frames queued in HTT or the layers
+ * under HTT.
+ * The download completion callback is invoked on these frames.
+ *
+ * @param htt_pdev - handle to the HTT instance
+ * @param[OUT] frag_paddr_lo - physical address of the fragment descriptor
+ * (MSDU Link Extension Descriptor)
+ */
+void htt_tx_pending_discard(htt_pdev_handle pdev);
+
+/**
+ * @brief Download a MSDU descriptor and (a portion of) the MSDU payload.
+ * @details
+ * This function is used within LL systems to download a tx descriptor and
+ * the initial portion of the tx MSDU payload, and within HL systems to
+ * download the tx descriptor and the entire tx MSDU payload.
+ * The HTT layer determines internally how much of the tx descriptor
+ * actually needs to be downloaded. In particular, the HTT layer does not
+ * download the fragmentation descriptor, and only for the LL case downloads
+ * the physical address of the fragmentation descriptor.
+ * In HL systems, the tx descriptor and the entire frame are downloaded.
+ * In LL systems, only the tx descriptor and the header of the frame are
+ * downloaded. To determine how much of the tx frame to download, this
+ * function assumes the tx frame is the default frame type, as specified
+ * by ol_cfg_frame_type. "Raw" frames need to be transmitted through the
+ * alternate htt_tx_send_nonstd function.
+ * The tx descriptor has already been attached to the cdf_nbuf object during
+ * a preceding call to htt_tx_desc_init.
+ *
+ * @param htt_pdev - the handle of the physical device sending the tx data
+ * @param msdu - the frame being transmitted
+ * @param msdu_id - unique ID for the frame being transmitted
+ * @return 0 -> success, -OR- 1 -> failure
+ */
+int
+htt_tx_send_std(htt_pdev_handle htt_pdev, cdf_nbuf_t msdu, uint16_t msdu_id);
+
+/**
+ * @brief Download a Batch Of Tx MSDUs
+ * @details
+ * Each MSDU already has the MSDU ID stored in the headroom of the
+ * netbuf data buffer, and has the HTT tx descriptor already attached
+ * as a prefix fragment to the netbuf.
+ *
+ * @param htt_pdev - the handle of the physical device sending the tx data
+ * @param head_msdu - the MSDU Head for Tx batch being transmitted
+ * @param num_msdus - The total Number of MSDU's provided for batch tx
+ * @return null-terminated linked-list of unaccepted frames
+ */
+cdf_nbuf_t
+htt_tx_send_batch(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t head_msdu, int num_msdus);
+
+/* The htt scheduler for queued packets in htt
+ * htt when unable to send to HTC because of lack of resource
+ * forms a nbuf queue which is flushed when tx completion event from
+ * target is recieved
+ */
+
+void htt_tx_sched(htt_pdev_handle pdev);
+
+/**
+ * @brief Same as htt_tx_send_std, but can handle raw frames.
+ */
+int
+htt_tx_send_nonstd(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t msdu,
+ uint16_t msdu_id, enum htt_pkt_type pkt_type);
+
+/**
+ * htt_pkt_dl_len_get() Gets the HTT PKT download length.
+ * @pdev: pointer to struct htt_pdev_t
+ *
+ * Return: size of HTT packet download length.
+ */
+int
+htt_pkt_dl_len_get(struct htt_pdev_t *pdev);
+
+#define HTT_TX_CLASSIFY_BIT_S 4 /* Used to set
+ * classify bit in HTT desc.*/
+
+/**
+ * enum htt_ce_tx_pkt_type - enum of packet types to be set in CE
+ * descriptor
+ * @tx_pkt_type_raw: Value set for RAW frames
+ * @tx_pkt_type_native_wifi: Value set for NATIVE WIFI frames
+ * @tx_pkt_type_eth2: Value set for Ethernet II frames (mostly default)
+ * @tx_pkt_type_802_3: Value set for 802.3 / original ethernet frames
+ * @tx_pkt_type_mgmt: Value set for MGMT frames over HTT
+ *
+ */
+enum htt_ce_tx_pkt_type {
+ tx_pkt_type_raw = 0,
+ tx_pkt_type_native_wifi = 1,
+ tx_pkt_type_eth2 = 2,
+ tx_pkt_type_802_3 = 3,
+ tx_pkt_type_mgmt = 4
+};
+
+
+extern const uint32_t htt_to_ce_pkt_type[];
+
+/**
+ * Provide a constant to specify the offset of the HTT portion of the
+ * HTT tx descriptor, to avoid having to export the descriptor defintion.
+ * The htt module checks internally that this exported offset is consistent
+ * with the private tx descriptor definition.
+ *
+ * Similarly, export a definition of the HTT tx descriptor size, and then
+ * check internally that this exported constant matches the private tx
+ * descriptor definition.
+ */
+#define HTT_TX_DESC_VADDR_OFFSET 8
+
+/**
+ * htt_tx_desc_init() - Initialize the per packet HTT Tx descriptor
+ * @pdev: The handle of the physical device sending the
+ * tx data
+ * @htt_tx_desc: Abstract handle to the tx descriptor
+ * @htt_tx_desc_paddr_lo: Physical address of the HTT tx descriptor
+ * @msdu_id: ID to tag the descriptor with.
+ * The FW sends this ID back to host as a cookie
+ * during Tx completion, which the host uses to
+ * identify the MSDU.
+ * This ID is an index into the OL Tx desc. array.
+ * @msdu: The MSDU that is being prepared for transmission
+ * @msdu_info: Tx MSDU meta-data
+ * @tso_info: Storage for TSO meta-data
+ *
+ * This function initializes the HTT tx descriptor.
+ * HTT Tx descriptor is a host-f/w interface structure, and meta-data
+ * accompanying every packet downloaded to f/w via the HTT interface.
+ */
+static inline
+void
+htt_tx_desc_init(htt_pdev_handle pdev,
+ void *htt_tx_desc,
+ uint32_t htt_tx_desc_paddr_lo,
+ uint16_t msdu_id,
+ cdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
+ struct cdf_tso_info_t *tso_info,
+ struct ocb_tx_ctrl_hdr_t *tx_ctrl,
+ uint8_t is_dsrc)
+{
+ uint8_t pkt_type, pkt_subtype = 0, ce_pkt_type = 0;
+ uint32_t hw_classify = 0, data_attr = 0;
+ uint32_t *word0, *word1, local_word3;
+#if HTT_PADDR64
+ uint32_t *word4;
+#else /* ! HTT_PADDR64 */
+ uint32_t *word3;
+#endif /* HTT_PADDR64 */
+ uint32_t local_word0, local_word1;
+ struct htt_host_tx_desc_t *htt_host_tx_desc =
+ (struct htt_host_tx_desc_t *)
+ (((char *)htt_tx_desc) - HTT_TX_DESC_VADDR_OFFSET);
+ bool desc_ext_required = (tx_ctrl && tx_ctrl->all_flags != 0);
+
+ word0 = (uint32_t *) htt_tx_desc;
+ word1 = word0 + 1;
+ /*
+ * word2 is frag desc pointer
+ * word3 or 4 is peer_id
+ */
+#if HTT_PADDR64
+ word4 = word0 + 4; /* Dword 3 */
+#else /* ! HTT_PADDR64 */
+ word3 = word0 + 3; /* Dword 3 */
+#endif /* HTT_PADDR64 */
+
+ pkt_type = msdu_info->info.l2_hdr_type;
+
+ if (cdf_likely(pdev->cfg.ce_classify_enabled)) {
+ if (cdf_likely(pkt_type == htt_pkt_type_eth2 ||
+ pkt_type == htt_pkt_type_ethernet))
+ cdf_nbuf_tx_info_get(msdu, pkt_type, pkt_subtype,
+ hw_classify);
+
+ ce_pkt_type = htt_to_ce_pkt_type[pkt_type];
+ if (0xffffffff == ce_pkt_type) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_DEBUG,
+ "Invalid HTT pkt type %d\n", pkt_type);
+ return;
+ }
+ }
+
+ /*
+ * HTT Tx Desc is in uncached memory. Used cached writes per word, to
+ * reduce unnecessary memory access.
+ */
+
+ local_word0 = 0;
+ if (msdu_info) {
+ HTT_H2T_MSG_TYPE_SET(local_word0, HTT_H2T_MSG_TYPE_TX_FRM);
+ HTT_TX_DESC_PKT_TYPE_SET(local_word0, pkt_type);
+ HTT_TX_DESC_PKT_SUBTYPE_SET(local_word0, pkt_subtype);
+ HTT_TX_DESC_VDEV_ID_SET(local_word0, msdu_info->info.vdev_id);
+ if (tx_ctrl && tx_ctrl->valid_tid)
+ HTT_TX_DESC_EXT_TID_SET(local_word0, tx_ctrl->ext_tid);
+ else
+ HTT_TX_DESC_EXT_TID_SET(local_word0,
+ msdu_info->info.ext_tid);
+ HTT_TX_DESC_EXTENSION_SET(local_word0, desc_ext_required);
+ HTT_TX_DESC_EXT_TID_SET(local_word0, msdu_info->info.ext_tid);
+ HTT_TX_DESC_CKSUM_OFFLOAD_SET(local_word0,
+ msdu_info->action.cksum_offload);
+ HTT_TX_DESC_NO_ENCRYPT_SET(local_word0,
+ msdu_info->action.do_encrypt ?
+ 0 : 1);
+ }
+
+ *word0 = local_word0;
+
+ local_word1 = 0;
+
+#if defined(FEATURE_TSO)
+ if (tso_info->is_tso)
+ HTT_TX_DESC_FRM_LEN_SET(local_word1, tso_info->total_len);
+ else
+#endif
+ HTT_TX_DESC_FRM_LEN_SET(local_word1, cdf_nbuf_len(msdu));
+
+ HTT_TX_DESC_FRM_ID_SET(local_word1, msdu_id);
+ *word1 = local_word1;
+
+ /* Initialize peer_id to INVALID_PEER because
+ this is NOT Reinjection path */
+ local_word3 = HTT_INVALID_PEER;
+ if (tx_ctrl && tx_ctrl->channel_freq)
+ HTT_TX_DESC_CHAN_FREQ_SET(local_word3, tx_ctrl->channel_freq);
+#if HTT_PADDR64
+ *word4 = local_word3;
+#else /* ! HTT_PADDR64 */
+ *word3 = local_word3;
+#endif /* HTT_PADDR64 */
+
+ /*
+ * If any of the tx control flags are set, then we need the extended
+ * HTT header.
+ */
+ if (desc_ext_required) {
+ struct htt_tx_msdu_desc_ext_t local_desc_ext = {0};
+
+ /*
+ * Copy the info that was read from TX control header from the
+ * user application to the extended HTT header.
+ * First copy everything
+ * to a local temp structure, and then copy everything to the
+ * actual uncached structure in one go to save memory writes.
+ */
+ local_desc_ext.valid_pwr = tx_ctrl->valid_pwr;
+ local_desc_ext.valid_mcs_mask = tx_ctrl->valid_datarate;
+ local_desc_ext.valid_retries = tx_ctrl->valid_retries;
+ local_desc_ext.valid_expire_tsf = tx_ctrl->valid_expire_tsf;
+ local_desc_ext.valid_chainmask = tx_ctrl->valid_chain_mask;
+
+ local_desc_ext.pwr = tx_ctrl->pwr;
+ if (tx_ctrl->valid_datarate &&
+ tx_ctrl->datarate <= htt_ofdm_datarate_max)
+ local_desc_ext.mcs_mask =
+ (1 << (tx_ctrl->datarate + 4));
+ local_desc_ext.retry_limit = tx_ctrl->retry_limit;
+ local_desc_ext.expire_tsf_lo = tx_ctrl->expire_tsf_lo;
+ local_desc_ext.expire_tsf_hi = tx_ctrl->expire_tsf_hi;
+ local_desc_ext.chain_mask = tx_ctrl->chain_mask;
+
+ local_desc_ext.is_dsrc = (is_dsrc != 0);
+
+ cdf_nbuf_push_head(msdu, sizeof(local_desc_ext));
+ cdf_mem_copy(cdf_nbuf_data(msdu), &local_desc_ext,
+ sizeof(local_desc_ext));
+ }
+
+ /*
+ * Specify that the data provided by the OS is a bytestream,
+ * and thus should not be byte-swapped during the HIF download
+ * even if the host is big-endian.
+ * There could be extra fragments added before the OS's fragments,
+ * e.g. for TSO, so it's incorrect to clear the frag 0 wordstream flag.
+ * Instead, clear the wordstream flag for the final fragment, which
+ * is certain to be (one of the) fragment(s) provided by the OS.
+ * Setting the flag for this final fragment suffices for specifying
+ * all fragments provided by the OS rather than added by the driver.
+ */
+ cdf_nbuf_set_frag_is_wordstream(msdu, cdf_nbuf_get_num_frags(msdu) - 1,
+ 0);
+
+ /* store a link to the HTT tx descriptor within the netbuf */
+ cdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
+ (char *)htt_host_tx_desc, /* virtual addr */
+ htt_tx_desc_paddr_lo,
+ 0 /* phys addr MSBs - n/a */);
+
+ /*
+ * Indicate that the HTT header (and HTC header) is a meta-data
+ * "wordstream", i.e. series of uint32_t, rather than a data
+ * bytestream.
+ * This allows the HIF download to byteswap the HTT + HTC headers if
+ * the host is big-endian, to convert to the target's little-endian
+ * format.
+ */
+ cdf_nbuf_set_frag_is_wordstream(msdu, 0, 1);
+
+ if (cdf_likely(pdev->cfg.ce_classify_enabled &&
+ (msdu_info->info.l2_hdr_type != htt_pkt_type_mgmt))) {
+ uint32_t pkt_offset = cdf_nbuf_get_frag_len(msdu, 0);
+ data_attr = hw_classify << CDF_CE_TX_CLASSIFY_BIT_S;
+ data_attr |= ce_pkt_type << CDF_CE_TX_PKT_TYPE_BIT_S;
+ data_attr |= pkt_offset << CDF_CE_TX_PKT_OFFSET_BIT_S;
+ }
+
+ cdf_nbuf_data_attr_set(msdu, data_attr);
+}
+
+/**
+ * @brief Set a flag to indicate that the MSDU in question was postponed.
+ * @details
+ * In systems in which the host retains its tx frame until the target sends
+ * a tx completion, the target has the option of discarding it's copy of
+ * the tx descriptor (and frame, for HL) and sending a "postpone" message
+ * to the host, to inform the host that it must eventually download the
+ * tx descriptor (and frame, for HL).
+ * Before the host downloads the postponed tx desc/frame again, it will use
+ * this function to set a flag in the HTT tx descriptor indicating that this
+ * is a re-send of a postponed frame, rather than a new frame. The target
+ * uses this flag to keep the correct order between re-sent and new tx frames.
+ * This function is relevant for LL systems.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ */
+void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc);
+
+/**
+ * @brief Set a flag to tell the target that more tx downloads are en route.
+ * @details
+ * At times, particularly in response to a U-APSD trigger in a HL system, the
+ * host will download multiple tx descriptors (+ frames, in HL) in a batch.
+ * The host will use this function to set a "more" flag in the initial
+ * and interior frames of the batch, to tell the target that more tx frame
+ * downloads within the batch are imminent.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ */
+void htt_tx_desc_flag_batch_more(htt_pdev_handle pdev, void *desc);
+
+/**
+ * @brief Specify the number of fragments in the fragmentation descriptor.
+ * @details
+ * Specify the number of fragments within the MSDU, i.e. the number of
+ * elements within the fragmentation descriptor.
+ * For LL, this is used to terminate the list of fragments used by the
+ * HW's tx MAC DMA.
+ * For HL, this is used to terminate the list of fragments provided to
+ * HTC for download.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the tx descriptor
+ * @param num_frags - the number of fragments comprising the MSDU
+ */
+static inline
+void
+htt_tx_desc_num_frags(htt_pdev_handle pdev, void *desc, uint32_t num_frags)
+{
+ /*
+ * Set the element after the valid frag elems to 0x0,
+ * to terminate the list of fragments.
+ */
+#if defined(HELIUMPLUS_PADDR64)
+ if (HTT_WIFI_IP(pdev, 2, 0)) {
+ /** Skip TSO related 4 dwords WIFI2.0*/
+ desc = (void *)&(((struct msdu_ext_desc_t *)desc)->frag_ptr0);
+ /* Frag ptr is 48 bit wide so clear the next dword as well */
+ *((uint32_t *)(((char *)desc) + (num_frags << 3))) = 0;
+ *((uint32_t *)
+ (((char *)desc) + (num_frags << 3) + sizeof(uint32_t))) = 0;
+ /* TODO: OKA: remove the magic constants */
+ } else {
+ /* XXXOKA -- Looks like a bug, called with htt_frag_desc */
+ *((u_int32_t *)
+ (((char *) desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
+ }
+#else /* ! HELIUMPLUS_PADDR64 */
+ *((uint32_t *)
+ (((char *)desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
+#endif /* HELIUMPLUS_PADDR64 */
+}
+
+/* checksum offload flags for hw */
+#define IPV4_CSUM_EN 0x00010000
+#define UDP_IPV4_CSUM_EN 0x00020000
+#define UDP_IPV6_CSUM_EN 0x00040000
+#define TCP_IPV4_CSUM_EN 0x00080000
+#define TCP_IPV6_CSUM_EN 0x00100000
+#define PARTIAL_CSUM_EN 0x00200000
+
+/**
+ * @brief Specify the location and size of a fragment of a tx MSDU.
+ * @details
+ * In LL systems, the tx MAC DMA needs to know how the MSDU is constructed
+ * from fragments.
+ * In LL and HL systems, the HIF's download DMA to the target (LL: tx desc
+ * + header of tx payload; HL: tx desc + entire tx payload) needs to know
+ * where to find the fragments to download.
+ * The tx data SW uses this function to specify the location and size of
+ * each of the MSDU's fragments.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param desc - abstract handle to the HTT tx descriptor
+ * @param frag_num - which fragment is being specified (zero-based indexing)
+ * @param frag_phys_addr - DMA/physical address of the fragment
+ * @param frag_len - number of bytes within the fragment
+ */
+static inline
+void
+htt_tx_desc_frag(htt_pdev_handle pdev,
+ void *desc,
+ int frag_num, uint32_t frag_phys_addr, uint16_t frag_len)
+{
+ u_int32_t *word;
+
+#if defined(HELIUMPLUS_PADDR64)
+ if (HTT_WIFI_IP(pdev, 2, 0)) {
+ word = (u_int32_t *)(desc);
+ /* Initialize top 6 words of TSO flags per packet */
+ *word++ = 0;
+ *word++ = 0;
+ *word++ = 0;
+ if (((struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev))
+ ->ip_tcp_udp_checksum_offload)
+ *word |= (IPV4_CSUM_EN | TCP_IPV4_CSUM_EN |
+ TCP_IPV6_CSUM_EN | UDP_IPV4_CSUM_EN |
+ UDP_IPV6_CSUM_EN);
+ else
+ *word = 0;
+ word++;
+ *word++ = 0;
+ *word++ = 0;
+
+ cdf_assert_always(word == &(((struct msdu_ext_desc_t *)
+ desc)->frag_ptr0));
+
+ /* Each fragment consumes 2 DWORDS */
+ word += (frag_num << 1);
+ *word = frag_phys_addr;
+
+ word++;
+ *word = (frag_len<<16);
+
+ } else {
+ /* For Helium+, this block cannot exist */
+ CDF_ASSERT(0);
+ }
+#else /* !defined(HELIUMPLUS_PADDR64) */
+ word = (uint32_t *) (((char *)desc) + HTT_TX_DESC_LEN + frag_num * 8);
+ *word = frag_phys_addr;
+ word++;
+ *word = frag_len;
+#endif /* defined(HELIUMPLUS_PADDR64) */
+}
+
+void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
+ void *desc,
+ uint32_t paddr,
+ uint32_t frag_desc_paddr_lo,
+ int reset);
+
+/**
+ * @brief Specify the type and subtype of a tx frame.
+ *
+ * @param pdev - the handle of the physical device sending the tx data
+ * @param type - format of the MSDU (802.3, native WiFi, raw, or mgmt)
+ * @param sub_type - sub_type (relevant for raw frames)
+ */
+static inline
+void
+htt_tx_desc_type(htt_pdev_handle pdev,
+ void *htt_tx_desc, enum wlan_frm_fmt type, uint8_t sub_type)
+{
+ uint32_t *word0;
+
+ word0 = (uint32_t *) htt_tx_desc;
+ /* clear old values */
+ *word0 &= ~(HTT_TX_DESC_PKT_TYPE_M | HTT_TX_DESC_PKT_SUBTYPE_M);
+ /* write new values */
+ HTT_TX_DESC_PKT_TYPE_SET(*word0, type);
+ HTT_TX_DESC_PKT_SUBTYPE_SET(*word0, sub_type);
+}
+
+/***** TX MGMT DESC management APIs ****/
+
+/* Number of mgmt descriptors in the pool */
+#define HTT_MAX_NUM_MGMT_DESCS 32
+
+/** htt_tx_mgmt_desc_pool_alloc
+ * @description - allocates the memory for mgmt frame descriptors
+ * @param - htt pdev object
+ * @param - num of descriptors to be allocated in the pool
+ */
+void htt_tx_mgmt_desc_pool_alloc(struct htt_pdev_t *pdev, A_UINT32 num_elems);
+
+/** htt_tx_mgmt_desc_alloc
+ * @description - reserves a mgmt descriptor from the pool
+ * @param - htt pdev object
+ * @param - pointer to variable to hold the allocated desc id
+ * @param - pointer to the mamangement from UMAC
+ * @return - pointer the allocated mgmt descriptor
+ */
+cdf_nbuf_t
+htt_tx_mgmt_desc_alloc(struct htt_pdev_t *pdev, A_UINT32 *desc_id,
+ cdf_nbuf_t mgmt_frm);
+
+/** htt_tx_mgmt_desc_free
+ * @description - releases the management descriptor back to the pool
+ * @param - htt pdev object
+ * @param - descriptor ID
+ */
+void
+htt_tx_mgmt_desc_free(struct htt_pdev_t *pdev, A_UINT8 desc_id,
+ A_UINT32 status);
+
+/** htt_tx_mgmt_desc_pool_free
+ * @description - releases all the resources allocated for mgmt desc pool
+ * @param - htt pdev object
+ */
+void htt_tx_mgmt_desc_pool_free(struct htt_pdev_t *pdev);
+
+/**
+ * @brief Provide a buffer to store a 802.11 header added by SW tx encap
+ *
+ * @param htt_tx_desc - which frame the 802.11 header is being added to
+ * @param new_l2_hdr_size - how large the buffer needs to be
+ */
+#define htt_tx_desc_mpdu_header(htt_tx_desc, new_l2_hdr_size) /*NULL*/
+/**
+ * @brief How many tx credits would be consumed by the specified tx frame.
+ *
+ * @param msdu - the tx frame in question
+ * @return number of credits used for this tx frame
+ */
+#define htt_tx_msdu_credit(msdu) 1 /* 1 credit per buffer */
+#ifdef HTT_DBG
+void htt_tx_desc_display(void *tx_desc);
+#else
+#define htt_tx_desc_display(tx_desc)
+#endif
+
+static inline void htt_tx_desc_set_peer_id(void *htt_tx_desc, uint16_t peer_id)
+{
+ uint16_t *peer_id_field_ptr;
+
+ peer_id_field_ptr = (uint16_t *)
+ (htt_tx_desc +
+ HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES);
+
+ *peer_id_field_ptr = peer_id;
+}
+
+static inline
+void htt_tx_desc_set_chanfreq(void *htt_tx_desc, uint16_t chanfreq)
+{
+ uint16_t *chanfreq_field_ptr;
+
+ /* The reason we dont use CHAN_FREQ_OFFSET_BYTES is because
+ it uses DWORD as unit */
+ /* The reason we dont use the SET macro in htt.h is because
+ htt_tx_desc is incomplete type */
+ chanfreq_field_ptr = (uint16_t *)
+ (htt_tx_desc +
+ HTT_TX_DESC_PEERID_DESC_PADDR_OFFSET_BYTES
+ + sizeof(A_UINT16));
+
+ *chanfreq_field_ptr = chanfreq;
+}
+
+#if defined(FEATURE_TSO)
+void
+htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
+ struct cdf_tso_info_t *tso_info);
+#else
+#define htt_tx_desc_fill_tso_info(pdev, desc, tso_info)
+#endif
+#endif /* _OL_HTT_TX_API__H_ */
diff --git a/dp/ol/inc/ol_osif_api.h b/dp/ol/inc/ol_osif_api.h
new file mode 100644
index 000000000000..9344a5fa5fea
--- /dev/null
+++ b/dp/ol/inc/ol_osif_api.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_osif_api.h
+ * @brief Definitions used in multiple external interfaces to the txrx SW.
+ */
+#ifndef _OL_OSIF_API__H_
+#define _OL_OSIF_API__H_
+
+/**
+ * @typedef ol_osif_vdev_handle
+ * @brief opaque handle for OS shim virtual device object
+ */
+struct ol_osif_vdev_t;
+typedef struct ol_osif_vdev_t *ol_osif_vdev_handle;
+
+#endif /* _OL_OSIF_API__H_ */
diff --git a/dp/ol/inc/ol_params.h b/dp/ol/inc/ol_params.h
new file mode 100644
index 000000000000..89eff22faad1
--- /dev/null
+++ b/dp/ol/inc/ol_params.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*
+ * Defintions for the Atheros Wireless LAN controller driver.
+ */
+#ifndef _DEV_OL_PARAMS_H
+#define _DEV_OL_PARAMS_H
+#include "ol_txrx_stats.h"
+#include "wlan_defs.h" /* for wlan statst definitions */
+/*
+** Enumeration of PDEV Configuration parameter
+*/
+
+enum ol_ath_param_t {
+ OL_ATH_PARAM_TXCHAINMASK = 0,
+ OL_ATH_PARAM_RXCHAINMASK,
+ OL_ATH_PARAM_TXCHAINMASKLEGACY,
+ OL_ATH_PARAM_RXCHAINMASKLEGACY,
+ OL_ATH_PARAM_CHAINMASK_SEL,
+ OL_ATH_PARAM_AMPDU,
+ OL_ATH_PARAM_AMPDU_LIMIT,
+ OL_ATH_PARAM_AMPDU_SUBFRAMES,
+ OL_ATH_PARAM_LDPC,
+ OL_ATH_PARAM_NON_AGG_SW_RETRY_TH,
+ OL_ATH_PARAM_AGG_SW_RETRY_TH,
+ OL_ATH_PARAM_STA_KICKOUT_TH,
+ OL_ATH_PARAM_WLAN_PROF_ENABLE,
+ OL_ATH_PARAM_LTR_ENABLE,
+ OL_ATH_PARAM_LTR_AC_LATENCY_BE,
+ OL_ATH_PARAM_LTR_AC_LATENCY_BK,
+ OL_ATH_PARAM_LTR_AC_LATENCY_VI,
+ OL_ATH_PARAM_LTR_AC_LATENCY_VO,
+ OL_ATH_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ OL_ATH_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ OL_ATH_PARAM_LTR_SLEEP_OVERRIDE,
+ OL_ATH_PARAM_LTR_RX_OVERRIDE,
+ OL_ATH_PARAM_L1SS_ENABLE,
+ OL_ATH_PARAM_DSLEEP_ENABLE,
+ OL_ATH_PARAM_PCIELP_TXBUF_FLUSH,
+ OL_ATH_PARAM_PCIELP_TXBUF_WATERMARK,
+ OL_ATH_PARAM_PCIELP_TXBUF_TMO_EN,
+ OL_ATH_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ OL_ATH_PARAM_BCN_BURST,
+ OL_ATH_PARAM_ARP_AC_OVERRIDE,
+ OL_ATH_PARAM_TXPOWER_LIMIT2G,
+ OL_ATH_PARAM_TXPOWER_LIMIT5G,
+ OL_ATH_PARAM_TXPOWER_SCALE,
+ OL_ATH_PARAM_DCS,
+ OL_ATH_PARAM_ANI_ENABLE,
+ OL_ATH_PARAM_ANI_POLL_PERIOD,
+ OL_ATH_PARAM_ANI_LISTEN_PERIOD,
+ OL_ATH_PARAM_ANI_OFDM_LEVEL,
+ OL_ATH_PARAM_ANI_CCK_LEVEL,
+ OL_ATH_PARAM_PROXYSTA,
+ OL_ATH_PARAM_DYN_TX_CHAINMASK,
+ OL_ATH_PARAM_VOW_EXT_STATS,
+ OL_ATH_PARAM_PWR_GATING_ENABLE,
+ OL_ATH_PARAM_CHATTER,
+};
+
+/*
+** Enumeration of PDEV Configuration parameter
+*/
+
+enum ol_hal_param_t {
+ OL_HAL_CONFIG_DMA_BEACON_RESPONSE_TIME = 0
+};
+
+/*
+** structure to hold all stats information
+** for offload device interface
+*/
+struct ol_stats {
+ int txrx_stats_level;
+ struct ol_txrx_stats txrx_stats;
+ struct wlan_dbg_stats stats;
+};
+#endif /* _DEV_OL_PARAMS_H */
diff --git a/dp/ol/inc/ol_txrx_api.h b/dp/ol/inc/ol_txrx_api.h
new file mode 100644
index 000000000000..289d49e32341
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_api.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_api.h
+ * @brief Definitions used in multiple external interfaces to the txrx SW.
+ */
+#ifndef _OL_TXRX_API__H_
+#define _OL_TXRX_API__H_
+
+/**
+ * @typedef ol_txrx_pdev_handle
+ * @brief opaque handle for txrx physical device object
+ */
+struct ol_txrx_pdev_t;
+typedef struct ol_txrx_pdev_t *ol_txrx_pdev_handle;
+
+/**
+ * @typedef ol_txrx_vdev_handle
+ * @brief opaque handle for txrx virtual device object
+ */
+struct ol_txrx_vdev_t;
+typedef struct ol_txrx_vdev_t *ol_txrx_vdev_handle;
+
+/**
+ * @typedef ol_txrx_peer_handle
+ * @brief opaque handle for txrx peer object
+ */
+struct ol_txrx_peer_t;
+typedef struct ol_txrx_peer_t *ol_txrx_peer_handle;
+
+/**
+ * @brief ADDBA negotiation status, used both during requests and confirmations
+ */
+enum ol_addba_status {
+ /* status: negotiation started or completed successfully */
+ ol_addba_success,
+
+ /* reject: aggregation is not applicable - don't try again */
+ ol_addba_reject,
+
+ /* busy: ADDBA negotiation couldn't be performed - try again later */
+ ol_addba_busy,
+};
+
+enum ol_sec_type {
+ ol_sec_type_none,
+ ol_sec_type_wep128,
+ ol_sec_type_wep104,
+ ol_sec_type_wep40,
+ ol_sec_type_tkip,
+ ol_sec_type_tkip_nomic,
+ ol_sec_type_aes_ccmp,
+ ol_sec_type_wapi,
+
+ /* keep this last! */
+ ol_sec_type_types
+};
+
+/**
+ * @enum ol_tx_spec
+ * @brief indicate what non-standard transmission actions to apply
+ * @details
+ * Indicate one or more of the following:
+ * - The tx frame already has a complete 802.11 header.
+ * Thus, skip 802.3/native-WiFi to 802.11 header encapsulation and
+ * A-MSDU aggregation.
+ * - The tx frame should not be aggregated (A-MPDU or A-MSDU)
+ * - The tx frame is already encrypted - don't attempt encryption.
+ * - The tx frame is a segment of a TCP jumbo frame.
+ * - This tx frame should not be unmapped and freed by the txrx layer
+ * after transmission, but instead given to a registered tx completion
+ * callback.
+ * More than one of these specification can apply, though typically
+ * only a single specification is applied to a tx frame.
+ * A compound specification can be created, as a bit-OR of these
+ * specifications.
+ */
+enum ol_tx_spec {
+ ol_tx_spec_std = 0x0, /* do regular processing */
+ ol_tx_spec_raw = 0x1, /* skip encap + A-MSDU aggr */
+ ol_tx_spec_no_aggr = 0x2, /* skip encap + all aggr */
+ ol_tx_spec_no_encrypt = 0x4, /* skip encap + encrypt */
+ ol_tx_spec_tso = 0x8, /* TCP segmented */
+ ol_tx_spec_nwifi_no_encrypt = 0x10, /* skip encrypt for nwifi */
+ ol_tx_spec_no_free = 0x20, /* give to cb rather than free */
+};
+
+#endif /* _OL_TXRX_API__H_ */
diff --git a/dp/ol/inc/ol_txrx_ctrl_api.h b/dp/ol/inc/ol_txrx_ctrl_api.h
new file mode 100644
index 000000000000..700dcf4a0626
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_ctrl_api.h
@@ -0,0 +1,1332 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_ctrl_api.h
+ * @brief Define the host data API functions called by the host control SW.
+ */
+#ifndef _OL_TXRX_CTRL_API__H_
+#define _OL_TXRX_CTRL_API__H_
+
+#include <athdefs.h> /* A_STATUS */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_types.h> /* cdf_device_t */
+#include <htc_api.h> /* HTC_HANDLE */
+
+#include <ol_osif_api.h> /* ol_osif_vdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
+#include <ol_ctrl_api.h> /* ol_pdev_handle, ol_vdev_handle */
+
+#include <wlan_defs.h> /* MAX_SPATIAL_STREAM */
+
+#define OL_ATH_TX_DRAIN_WAIT_DELAY 50
+
+/* Maximum number of station supported by data path, including BC. */
+#define WLAN_MAX_STA_COUNT (HAL_NUM_STA)
+
+/* The symbolic station ID return to HDD to specify the packet is bc/mc */
+#define WLAN_RX_BCMC_STA_ID (WLAN_MAX_STA_COUNT + 1)
+
+/* The symbolic station ID return to HDD to specify the packet is
+ to soft-AP itself */
+#define WLAN_RX_SAP_SELF_STA_ID (WLAN_MAX_STA_COUNT + 2)
+
+/**
+ * enum wlan_op_mode - Virtual device operation mode
+ *
+ * @wlan_op_mode_unknown: Unknown mode
+ * @wlan_op_mode_ap: AP mode
+ * @wlan_op_mode_ibss: IBSS mode
+ * @wlan_op_mode_sta: STA (client) mode
+ * @wlan_op_mode_monitor: Monitor mode
+ * @wlan_op_mode_ocb: OCB mode
+ */
+enum wlan_op_mode {
+ wlan_op_mode_unknown,
+ wlan_op_mode_ap,
+ wlan_op_mode_ibss,
+ wlan_op_mode_sta,
+ wlan_op_mode_monitor,
+ wlan_op_mode_ocb,
+};
+
+#define OL_TXQ_PAUSE_REASON_FW (1 << 0)
+#define OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED (1 << 1)
+#define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2)
+#define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3)
+#define OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION (1 << 4)
+
+
+/**
+ * enum netif_action_type - Type of actions on netif queues
+ * @WLAN_STOP_ALL_NETIF_QUEUE: stop all netif queues
+ * @WLAN_START_ALL_NETIF_QUEUE: start all netif queues
+ * @WLAN_WAKE_ALL_NETIF_QUEUE: wake all netif queues
+ * @WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER: stop all queues and off carrier
+ * @WLAN_START_ALL_NETIF_QUEUE_N_CARRIER: start all queues and on carrier
+ * @WLAN_NETIF_TX_DISABLE: disable tx
+ * @WLAN_NETIF_TX_DISABLE_N_CARRIER: disable tx and off carrier
+ * @WLAN_NETIF_CARRIER_ON: on carrier
+ * @WLAN_NETIF_CARRIER_OFF: off carrier
+ */
+enum netif_action_type {
+ WLAN_STOP_ALL_NETIF_QUEUE,
+ WLAN_START_ALL_NETIF_QUEUE,
+ WLAN_WAKE_ALL_NETIF_QUEUE,
+ WLAN_STOP_ALL_NETIF_QUEUE_N_CARRIER,
+ WLAN_START_ALL_NETIF_QUEUE_N_CARRIER,
+ WLAN_NETIF_TX_DISABLE,
+ WLAN_NETIF_TX_DISABLE_N_CARRIER,
+ WLAN_NETIF_CARRIER_ON,
+ WLAN_NETIF_CARRIER_OFF,
+ WLAN_NETIF_ACTION_TYPE_MAX,
+};
+
+/**
+ * enum netif_reason_type - reason for netif queue action
+ * @WLAN_CONTROL_PATH: action from control path
+ * @WLAN_DATA_FLOW_CONTROL: because of flow control
+ * @WLAN_FW_PAUSE: because of firmware pause
+ * @WLAN_TX_ABORT: because of tx abort
+ * @WLAN_VDEV_STOP: because of vdev stop
+ * @WLAN_PEER_UNAUTHORISED: because of peer is unauthorised
+ * @WLAN_THERMAL_MITIGATION: because of thermal mitigation
+ */
+enum netif_reason_type {
+ WLAN_CONTROL_PATH,
+ WLAN_DATA_FLOW_CONTROL,
+ WLAN_FW_PAUSE,
+ WLAN_TX_ABORT,
+ WLAN_VDEV_STOP,
+ WLAN_PEER_UNAUTHORISED,
+ WLAN_THERMAL_MITIGATION,
+ WLAN_REASON_TYPE_MAX,
+};
+
+
+/* command options for dumpStats*/
+#define WLAN_HDD_STATS 0
+#define WLAN_TXRX_STATS 1
+#define WLAN_TXRX_HIST_STATS 2
+#define WLAN_TXRX_TSO_STATS 3
+#define WLAN_HDD_NETIF_OPER_HISTORY 4
+#define WLAN_DUMP_TX_FLOW_POOL_INFO 5
+#define WLAN_TXRX_DESC_STATS 6
+
+ol_txrx_pdev_handle
+ol_txrx_pdev_alloc(ol_pdev_handle ctrl_pdev,
+ HTC_HANDLE htc_pdev, cdf_device_t osdev);
+
+/**
+ * @brief Set up the data SW subsystem.
+ * @details
+ * As part of the WLAN device attach, the data SW subsystem has
+ * to be attached as a component within the WLAN device.
+ * This attach allocates and initializes the physical device object
+ * used by the data SW.
+ * The data SW subsystem attach needs to happen after the target has
+ * be started, and host / target parameter negotiation has completed,
+ * since the host data SW uses some of these host/target negotiated
+ * parameters (e.g. peer ID range) during the initializations within
+ * its attach function.
+ * However, the host data SW is not allowed to send HTC messages to the
+ * target within this pdev_attach function call, since the HTC setup
+ * has not complete at this stage of initializations. Any messaging
+ * to the target has to be done in the separate pdev_attach_target call
+ * that is invoked after HTC setup is complete.
+ *
+ * @param pdev - txrx_pdev handle
+ * @return 0 for success or error code
+ */
+int
+ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief Do final steps of data SW setup that send messages to the target.
+ * @details
+ * The majority of the data SW setup are done by the pdev_attach function,
+ * but this function completes the data SW setup by sending datapath
+ * configuration messages to the target.
+ *
+ * @param data_pdev - the physical device being initialized
+ */
+A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle data_pdev);
+
+/**
+ * @brief Allocate and initialize the data object for a new virtual device.
+ * @param data_pdev - the physical device the virtual device belongs to
+ * @param vdev_mac_addr - the MAC address of the virtual device
+ * @param vdev_id - the ID used to identify the virtual device to the target
+ * @param op_mode - whether this virtual device is operating as an AP,
+ * an IBSS, or a STA
+ * @return
+ * success: handle to new data vdev object, -OR-
+ * failure: NULL
+ */
+ol_txrx_vdev_handle
+ol_txrx_vdev_attach(ol_txrx_pdev_handle data_pdev,
+ uint8_t *vdev_mac_addr,
+ uint8_t vdev_id, enum wlan_op_mode op_mode);
+
+/**
+ * @brief Allocate and set up references for a data peer object.
+ * @details
+ * When an association with a peer starts, the host's control SW
+ * uses this function to inform the host data SW.
+ * The host data SW allocates its own peer object, and stores a
+ * reference to the control peer object within the data peer object.
+ * The host data SW also stores a reference to the virtual device
+ * that the peer is associated with. This virtual device handle is
+ * used when the data SW delivers rx data frames to the OS shim layer.
+ * The host data SW returns a handle to the new peer data object,
+ * so a reference within the control peer object can be set to the
+ * data peer object.
+ *
+ * @param data_pdev - data physical device object that will indirectly
+ * own the data_peer object
+ * @param data_vdev - data virtual device object that will directly
+ * own the data_peer object
+ * @param peer_mac_addr - MAC address of the new peer
+ * @return handle to new data peer object, or NULL if the attach fails
+ */
+ol_txrx_peer_handle
+ol_txrx_peer_attach(ol_txrx_pdev_handle data_pdev,
+ ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac_addr);
+
+/**
+ * @brief Parameter type to be input to ol_txrx_peer_update
+ * @details
+ * This struct is union,to be used to specify various informations to update
+ * txrx peer object.
+ */
+union ol_txrx_peer_update_param_t {
+ uint8_t qos_capable;
+ uint8_t uapsd_mask;
+ enum ol_sec_type sec_type;
+};
+
+/**
+ * @brief Parameter type to be input to ol_txrx_peer_update
+ * @details
+ * This enum is used to specify what exact information in
+ * ol_txrx_peer_update_param_t
+ * is used to update the txrx peer object.
+ */
+enum ol_txrx_peer_update_select_t {
+ ol_txrx_peer_update_qos_capable = 1,
+ ol_txrx_peer_update_uapsdMask,
+ ol_txrx_peer_update_peer_security,
+};
+
+/**
+ * @brief Update the data peer object as some informaiton changed in node.
+ * @details
+ * Only a single prarameter can be changed for each call to this func.
+ *
+ * @param peer - pointer to the node's object
+ * @param param - new param to be upated in peer object.
+ * @param select - specify what's parameter needed to be update
+ */
+void
+ol_txrx_peer_update(ol_txrx_vdev_handle data_vdev, uint8_t *peer_mac,
+ union ol_txrx_peer_update_param_t *param,
+ enum ol_txrx_peer_update_select_t select);
+
+enum {
+ OL_TX_WMM_AC_BE,
+ OL_TX_WMM_AC_BK,
+ OL_TX_WMM_AC_VI,
+ OL_TX_WMM_AC_VO,
+
+ OL_TX_NUM_WMM_AC
+};
+
+/**
+ * @brief Parameter type to pass WMM setting to ol_txrx_set_wmm_param
+ * @details
+ * The struct is used to specify informaiton to update TX WMM scheduler.
+ */
+struct ol_tx_ac_param_t {
+ uint32_t aifs;
+ uint32_t cwmin;
+ uint32_t cwmax;
+};
+
+struct ol_tx_wmm_param_t {
+ struct ol_tx_ac_param_t ac[OL_TX_NUM_WMM_AC];
+};
+
+/**
+ * @brief Set paramters of WMM scheduler per AC settings. .
+ * @details
+ * This function applies only to HL systems.
+ *
+ * @param data_pdev - the physical device being paused
+ * @param wmm_param - the wmm parameters
+ */
+#define ol_txrx_set_wmm_param(data_pdev, wmm_param) /* no-op */
+
+/**
+ * @brief notify tx data SW that a peer's transmissions are suspended.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ * The HL host tx data SW is doing tx classification and tx download
+ * scheduling, and therefore also needs to actively participate in tx
+ * flow control. Specifically, the HL tx data SW needs to check whether a
+ * given peer is available to transmit to, or is paused.
+ * This function is used to tell the HL tx data SW when a peer is paused,
+ * so the host tx data SW can hold the tx frames for that SW.
+ *
+ * @param data_peer - which peer is being paused
+ */
+#define ol_txrx_peer_pause(data_peer) /* no-op */
+
+/**
+ * @brief notify tx data SW that a peer-TID is ready to transmit to.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ * If a peer-TID has tx paused, then the tx datapath will end up queuing
+ * any tx frames that arrive from the OS shim for that peer-TID.
+ * In a HL system, the host tx data SW itself will classify the tx frame,
+ * and determine that it needs to be queued rather than downloaded to the
+ * target for transmission.
+ * Once the peer-TID is ready to accept data, the host control SW will call
+ * this function to notify the host data SW that the queued frames can be
+ * enabled for transmission, or specifically to download the tx frames
+ * to the target to transmit.
+ * The TID parameter is an extended version of the QoS TID. Values 0-15
+ * indicate a regular QoS TID, and the value 16 indicates either non-QoS
+ * data, multicast data, or broadcast data.
+ *
+ * @param data_peer - which peer is being unpaused
+ * @param tid - which TID within the peer is being unpaused, or -1 as a
+ * wildcard to unpause all TIDs within the peer
+ */
+#define ol_txrx_peer_tid_unpause(data_peer, tid) /* no-op */
+
+/**
+ * @brief Tell a paused peer to release a specified number of tx frames.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ * Download up to a specified maximum number of tx frames from the tx
+ * queues of the specified TIDs within the specified paused peer, usually
+ * in response to a U-APSD trigger from the peer.
+ * It is up to the host data SW to determine how to choose frames from the
+ * tx queues of the specified TIDs. However, the host data SW does need to
+ * provide long-term fairness across the U-APSD enabled TIDs.
+ * The host data SW will notify the target data FW when it is done downloading
+ * the batch of U-APSD triggered tx frames, so the target data FW can
+ * differentiate between an in-progress download versus a case when there are
+ * fewer tx frames available than the specified limit.
+ * This function is relevant primarily to HL U-APSD, where the frames are
+ * held in the host.
+ *
+ * @param peer - which peer sent the U-APSD trigger
+ * @param tid_mask - bitmask of U-APSD enabled TIDs from whose tx queues
+ * tx frames can be released
+ * @param max_frms - limit on the number of tx frames to release from the
+ * specified TID's queues within the specified peer
+ */
+#define ol_txrx_tx_release(peer, tid_mask, max_frms) /* no-op */
+
+/**
+ * @brief Suspend all tx data for the specified virtual device.
+ * @details
+ * This function applies primarily to HL systems, but also applies to
+ * LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ * As an example, this function could be used when a single-channel physical
+ * device supports multiple channels by jumping back and forth between the
+ * channels in a time-shared manner. As the device is switched from channel
+ * A to channel B, the virtual devices that operate on channel A will be
+ * paused.
+ *
+ * @param data_vdev - the virtual device being paused
+ * @param reason - the reason for which vdev queue is getting paused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ return;
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Drop all tx data for the specified virtual device.
+ * @details
+ * This function applies primarily to HL systems, but also applies to
+ * LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ * This function would typically be used by the ctrl SW after it parks
+ * a STA vdev and then resumes it, but to a new AP. In this case, though
+ * the same vdev can be used, any old tx frames queued inside it would be
+ * stale, and would need to be discarded.
+ *
+ * @param data_vdev - the virtual device being flushed
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+void ol_txrx_vdev_flush(ol_txrx_vdev_handle data_vdev);
+#else
+#define ol_txrx_vdev_flush(data_vdev) /* no-op */
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Resume tx for the specified virtual device.
+ * @details
+ * This function applies primarily to HL systems, but also applies to
+ * LL systems that use per-vdev tx queues for MCC or thermal throttling.
+ *
+ * @param data_vdev - the virtual device being unpaused
+ * @param reason - the reason for which vdev queue is getting unpaused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle data_vdev, uint32_t reason)
+{
+ return;
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @brief Suspend all tx data per thermal event/timer for the
+ * specified physical device
+ * @details
+ * This function applies only to HL systerms, and it makes pause and
+ * unpause operations happen in pairs.
+ */
+#define ol_txrx_throttle_pause(data_pdev) /* no-op */
+
+/**
+ * @brief Resume all tx data per thermal event/timer for the
+ * specified physical device
+ * @details
+ * This function applies only to HL systerms, and it makes pause and
+ * unpause operations happen in pairs.
+ */
+#define ol_txrx_throttle_unpause(data_pdev) /* no-op */
+
+/**
+ * @brief Suspend all tx data for the specified physical device.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ * In some systems it is necessary to be able to temporarily
+ * suspend all WLAN traffic, e.g. to allow another device such as bluetooth
+ * to temporarily have exclusive access to shared RF chain resources.
+ * This function suspends tx traffic within the specified physical device.
+ *
+ * @param data_pdev - the physical device being paused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *data_pdev, uint32_t reason)
+{
+ return;
+}
+#endif
+
+/**
+ * @brief Resume tx for the specified physical device.
+ * @details
+ * This function applies only to HL systems - in LL systems, tx flow control
+ * is handled entirely within the target FW.
+ *
+ * @param data_pdev - the physical device being unpaused
+ */
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || defined(QCA_LL_TX_FLOW_CONTROL_V2)
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason);
+#else
+static inline
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+ return;
+}
+#endif
+
+/**
+ * @brief Synchronize the data-path tx with a control-path target download
+ * @dtails
+ * @param data_pdev - the data-path physical device object
+ * @param sync_cnt - after the host data-path SW downloads this sync request
+ * to the target data-path FW, the target tx data-path will hold itself
+ * in suspension until it is given an out-of-band sync counter value that
+ * is equal to or greater than this counter value
+ */
+void ol_txrx_tx_sync(ol_txrx_pdev_handle data_pdev, uint8_t sync_cnt);
+
+/**
+ * @brief Delete a peer's data object.
+ * @details
+ * When the host's control SW disassociates a peer, it calls this
+ * function to delete the peer's data object.
+ * The reference stored in the control peer object to the data peer
+ * object (set up by a call to ol_peer_store()) is provided.
+ *
+ * @param data_peer - the object to delete
+ */
+void ol_txrx_peer_detach(ol_txrx_peer_handle data_peer);
+
+typedef void (*ol_txrx_vdev_delete_cb)(void *context);
+
+/**
+ * @brief Deallocate the specified data virtual device object.
+ * @details
+ * All peers associated with the virtual device need to be deleted
+ * (ol_txrx_peer_detach) before the virtual device itself is deleted.
+ * However, for the peers to be fully deleted, the peer deletion has to
+ * percolate through the target data FW and back up to the host data SW.
+ * Thus, even though the host control SW may have issued a peer_detach
+ * call for each of the vdev's peers, the peer objects may still be
+ * allocated, pending removal of all references to them by the target FW.
+ * In this case, though the vdev_detach function call will still return
+ * immediately, the vdev itself won't actually be deleted, until the
+ * deletions of all its peers complete.
+ * The caller can provide a callback function pointer to be notified when
+ * the vdev deletion actually happens - whether it's directly within the
+ * vdev_detach call, or if it's deferred until all in-progress peer
+ * deletions have completed.
+ *
+ * @param data_vdev - data object for the virtual device in question
+ * @param callback - function to call (if non-NULL) once the vdev has
+ * been wholly deleted
+ * @param callback_context - context to provide in the callback
+ */
+void
+ol_txrx_vdev_detach(ol_txrx_vdev_handle data_vdev,
+ ol_txrx_vdev_delete_cb callback, void *callback_context);
+
+/**
+ * @brief Delete the data SW state.
+ * @details
+ * This function is used when the WLAN driver is being removed to
+ * remove the host data component within the driver.
+ * All virtual devices within the physical device need to be deleted
+ * (ol_txrx_vdev_detach) before the physical device itself is deleted.
+ *
+ * @param data_pdev - the data physical device object being removed
+ * @param force - delete the pdev (and its vdevs and peers) even if there
+ * are outstanding references by the target to the vdevs and peers
+ * within the pdev
+ */
+void ol_txrx_pdev_detach(ol_txrx_pdev_handle data_pdev, int force);
+
+typedef void
+(*ol_txrx_data_tx_cb)(void *ctxt, cdf_nbuf_t tx_frm, int had_error);
+
+/**
+ * @brief Store a delivery notification callback for specific data frames.
+ * @details
+ * Through a non-std tx function, the txrx SW can be given tx data frames
+ * that are specially marked to not be unmapped and freed by the tx SW
+ * when transmission completes. Rather, these specially-marked frames
+ * are provided to the callback registered with this function.
+ *
+ * @param data_vdev - which vdev the callback is being registered with
+ * (Currently the callback is stored in the pdev rather than the vdev.)
+ * @param callback - the function to call when tx frames marked as "no free"
+ * are done being transmitted
+ * @param ctxt - the context argument provided to the callback function
+ */
+void
+ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle data_vdev,
+ ol_txrx_data_tx_cb callback, void *ctxt);
+
+/**
+ * @brief Allow the control-path SW to send data frames.
+ * @details
+ * Generally, all tx data frames come from the OS shim into the txrx layer.
+ * However, there are rare cases such as TDLS messaging where the UMAC
+ * control-path SW creates tx data frames.
+ * This UMAC SW can call this function to provide the tx data frames to
+ * the txrx layer.
+ * The UMAC SW can request a callback for these data frames after their
+ * transmission completes, by using the ol_txrx_data_tx_cb_set function
+ * to register a tx completion callback, and by specifying
+ * ol_tx_spec_no_free as the tx_spec arg when giving the frames to
+ * ol_tx_non_std.
+ * The MSDUs need to have the appropriate L2 header type (802.3 vs. 802.11),
+ * as specified by ol_cfg_frame_type().
+ *
+ * @param data_vdev - which vdev should transmit the tx data frames
+ * @param tx_spec - what non-standard handling to apply to the tx data frames
+ * @param msdu_list - NULL-terminated list of tx MSDUs
+ */
+cdf_nbuf_t
+ol_tx_non_std(ol_txrx_vdev_handle data_vdev,
+ enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list);
+
+typedef void
+(*ol_txrx_mgmt_tx_cb)(void *ctxt, cdf_nbuf_t tx_mgmt_frm, int had_error);
+
+/**
+ * @brief Store a callback for delivery notifications for management frames.
+ * @details
+ * When the txrx SW receives notifications from the target that a tx frame
+ * has been delivered to its recipient, it will check if the tx frame
+ * is a management frame. If so, the txrx SW will check the management
+ * frame type specified when the frame was submitted for transmission.
+ * If there is a callback function registered for the type of managment
+ * frame in question, the txrx code will invoke the callback to inform
+ * the management + control SW that the mgmt frame was delivered.
+ * This function is used by the control SW to store a callback pointer
+ * for a given type of management frame.
+ *
+ * @param pdev - the data physical device object
+ * @param type - the type of mgmt frame the callback is used for
+ * @param download_cb - the callback for notification of delivery to the target
+ * @param ota_ack_cb - the callback for notification of delivery to the peer
+ * @param ctxt - context to use with the callback
+ */
+void
+ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
+ uint8_t type,
+ ol_txrx_mgmt_tx_cb download_cb,
+ ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt);
+
+/**
+ * @brief Transmit a management frame.
+ * @details
+ * Send the specified management frame from the specified virtual device.
+ * The type is used for determining whether to invoke a callback to inform
+ * the sender that the tx mgmt frame was delivered, and if so, which
+ * callback to use.
+ *
+ * @param vdev - virtual device transmitting the frame
+ * @param tx_mgmt_frm - management frame to transmit
+ * @param type - the type of managment frame (determines what callback to use)
+ * @param use_6mbps - specify whether management frame to transmit should use 6 Mbps
+ * rather than 1 Mbps min rate(for 5GHz band or P2P)
+ * @return
+ * 0 -> the frame is accepted for transmission, -OR-
+ * 1 -> the frame was not accepted
+ */
+int
+ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
+ cdf_nbuf_t tx_mgmt_frm,
+ uint8_t type, uint8_t use_6mbps, uint16_t chanfreq);
+
+/**
+ * @brief Setup the monitor mode vap (vdev) for this pdev
+ * @details
+ * When a non-NULL vdev handle is registered as the monitor mode vdev, all
+ * packets received by the system are delivered to the OS stack on this
+ * interface in 802.11 MPDU format. Only a single monitor mode interface
+ * can be up at any timer. When the vdev handle is set to NULL the monitor
+ * mode delivery is stopped. This handle may either be a unique vdev
+ * object that only receives monitor mode packets OR a point to a a vdev
+ * object that also receives non-monitor traffic. In the second case the
+ * OS stack is responsible for delivering the two streams using approprate
+ * OS APIs
+ *
+ * @param pdev - the data physical device object
+ * @param vdev - the data virtual device object to deliver monitor mode
+ * packets on
+ * @return
+ * 0 -> the monitor mode vap was sucessfully setup
+ * -1 -> Unable to setup monitor mode
+ */
+int
+ol_txrx_set_monitor_mode_vap(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev);
+
+/**
+ * @brief Setup the current operating channel of the device
+ * @details
+ * Mainly used when populating monitor mode status that requires the
+ * current operating channel
+ *
+ * @param pdev - the data physical device object
+ * @param chan_mhz - the channel frequency (mhz)
+ * packets on
+ * @return - void
+ */
+void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz);
+
+CDF_STATUS ol_txrx_bus_suspend(void);
+CDF_STATUS ol_txrx_bus_resume(void);
+CDF_STATUS ol_txrx_wait_for_pending_tx(int timeout);
+
+/**
+ * @brief Get the number of pending transmit frames that are awaiting completion.
+ * @details
+ * Mainly used in clean up path to make sure all buffers have been free'ed
+ *
+ * @param pdev - the data physical device object
+ * @return - count of pending frames
+ */
+int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief Discard all tx frames that are pending in txrx.
+ * @details
+ * Mainly used in clean up path to make sure all pending tx packets
+ * held by txrx are returned back to OS shim immediately.
+ *
+ * @param pdev - the data physical device object
+ * @return - void
+ */
+void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief set the safemode of the device
+ * @details
+ * This flag is used to bypass the encrypt and decrypt processes when send and
+ * receive packets. It works like open AUTH mode, HW will treate all packets
+ * as non-encrypt frames because no key installed. For rx fragmented frames,
+ * it bypasses all the rx defragmentaion.
+ *
+ * @param vdev - the data virtual device object
+ * @param val - the safemode state
+ * @return - void
+ */
+void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val);
+
+/**
+ * @brief set the privacy filter
+ * @details
+ * Rx related. Set the privacy filters. When rx packets, check
+ * the ether type, filter type and packet type
+ * to decide whether discard these packets.
+ *
+ * @param vdev - the data virtual device object
+ * @param filter - filters to be set
+ * @param num - the number of filters
+ * @return - void
+ */
+void
+ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
+ void *filter, uint32_t num);
+
+/**
+ * @brief configure the drop unencrypted frame flag
+ * @details
+ * Rx related. When set this flag, all the unencrypted frames
+ * received over a secure connection will be discarded
+ *
+ * @param vdev - the data virtual device object
+ * @param val - flag
+ * @return - void
+ */
+void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val);
+
+enum ol_txrx_peer_state {
+ ol_txrx_peer_state_invalid,
+ ol_txrx_peer_state_disc, /* initial state */
+ ol_txrx_peer_state_conn, /* authentication in progress */
+ ol_txrx_peer_state_auth, /* authentication successful */
+};
+
+/**
+ * @brief specify the peer's authentication state
+ * @details
+ * Specify the peer's authentication state (none, connected, authenticated)
+ * to allow the data SW to determine whether to filter out invalid data frames.
+ * (In the "connected" state, where security is enabled, but authentication
+ * has not completed, tx and rx data frames other than EAPOL or WAPI should
+ * be discarded.)
+ * This function is only relevant for systems in which the tx and rx filtering
+ * are done in the host rather than in the target.
+ *
+ * @param data_peer - which peer has changed its state
+ * @param state - the new state of the peer
+ *
+ * Return: CDF Status
+ */
+CDF_STATUS
+ol_txrx_peer_state_update(ol_txrx_pdev_handle pdev, uint8_t *peer_addr,
+ enum ol_txrx_peer_state state);
+
+void
+ol_txrx_peer_keyinstalled_state_update(ol_txrx_peer_handle data_peer,
+ uint8_t val);
+
+#define ol_tx_addba_conf(data_peer, tid, status) /* no-op */
+
+/**
+ * @brief Find a txrx peer handle from the peer's MAC address
+ * @details
+ * The control SW typically uses the txrx peer handle to refer to the peer.
+ * In unusual circumstances, if it is infeasible for the control SW maintain
+ * the txrx peer handle but it can maintain the peer's MAC address,
+ * this function allows the peer handled to be retrieved, based on the peer's
+ * MAC address.
+ * In cases where there are multiple peer objects with the same MAC address,
+ * it is undefined which such object is returned.
+ * This function does not increment the peer's reference count. Thus, it is
+ * only suitable for use as long as the control SW has assurance that it has
+ * not deleted the peer object, by calling ol_txrx_peer_detach.
+ *
+ * @param pdev - the data physical device object
+ * @param peer_mac_addr - MAC address of the peer in question
+ * @return handle to the txrx peer object
+ */
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_addr(ol_txrx_pdev_handle pdev, uint8_t *peer_mac_addr);
+
+/**
+ * @brief Find a txrx peer handle from a peer's local ID
+ * @details
+ * The control SW typically uses the txrx peer handle to refer to the peer.
+ * In unusual circumstances, if it is infeasible for the control SW maintain
+ * the txrx peer handle but it can maintain a small integer local peer ID,
+ * this function allows the peer handled to be retrieved, based on the local
+ * peer ID.
+ *
+ * @param pdev - the data physical device object
+ * @param local_peer_id - the ID txrx assigned locally to the peer in question
+ * @return handle to the txrx peer object
+ */
+#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id(ol_txrx_pdev_handle pdev, uint8_t local_peer_id);
+#else
+#define ol_txrx_peer_find_by_local_id(pdev, local_peer_id) NULL
+#endif
+
+struct ol_txrx_peer_stats_t {
+ struct {
+ struct {
+ uint32_t ucast;
+ uint32_t mcast;
+ uint32_t bcast;
+ } frms;
+ struct {
+ uint32_t ucast;
+ uint32_t mcast;
+ uint32_t bcast;
+ } bytes;
+ } tx;
+ struct {
+ struct {
+ uint32_t ucast;
+ uint32_t mcast;
+ uint32_t bcast;
+ } frms;
+ struct {
+ uint32_t ucast;
+ uint32_t mcast;
+ uint32_t bcast;
+ } bytes;
+ } rx;
+};
+
+/**
+ * @brief Provide a snapshot of the txrx counters for the specified peer
+ * @details
+ * The txrx layer optionally maintains per-peer stats counters.
+ * This function provides the caller with a consistent snapshot of the
+ * txrx stats counters for the specified peer.
+ *
+ * @param pdev - the data physical device object
+ * @param peer - which peer's stats counters are requested
+ * @param stats - buffer for holding the stats counters snapshot
+ * @return success / failure status
+ */
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+A_STATUS
+ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
+ ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats);
+#else
+#define ol_txrx_peer_stats_copy(pdev, peer, stats) A_ERROR /* failure */
+#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
+
+/* Config parameters for txrx_pdev */
+struct txrx_pdev_cfg_param_t {
+ uint8_t is_full_reorder_offload;
+ /* IPA Micro controller data path offload enable flag */
+ uint8_t is_uc_offload_enabled;
+ /* IPA Micro controller data path offload TX buffer count */
+ uint32_t uc_tx_buffer_count;
+ /* IPA Micro controller data path offload TX buffer size */
+ uint32_t uc_tx_buffer_size;
+ /* IPA Micro controller data path offload RX indication ring count */
+ uint32_t uc_rx_indication_ring_count;
+ /* IPA Micro controller data path offload TX partition base */
+ uint32_t uc_tx_partition_base;
+ /* IP, TCP and UDP checksum offload */
+ bool ip_tcp_udp_checksum_offload;
+ /* Rx processing in thread from TXRX */
+ bool enable_rxthread;
+ /* CE classification enabled through INI */
+ bool ce_classify_enabled;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+ /* Threshold to stop queue in percentage */
+ uint32_t tx_flow_stop_queue_th;
+ /* Start queue offset in percentage */
+ uint32_t tx_flow_start_queue_offset;
+#endif
+};
+
+/**
+ * @brief Setup configuration parameters
+ * @details
+ * Allocation configuration context that will be used across data path
+ *
+ * @param osdev - OS handle needed as an argument for some OS primitives
+ * @return the control device object
+ */
+ol_pdev_handle ol_pdev_cfg_attach(cdf_device_t osdev,
+ struct txrx_pdev_cfg_param_t cfg_param);
+
+CDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id);
+void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id);
+
+
+#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
+ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
+ uint8_t *peer_addr,
+ uint8_t *peer_id);
+ol_txrx_peer_handle
+ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev,
+ uint8_t *peer_addr, uint8_t *peer_id);
+#else
+#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
+#define ol_txrx_find_peer_by_addr(pdev, peer_addr, peer_id) NULL
+#define ol_txrx_find_peer_by_addr_and_vdev(pdev, vdev, peer_addr, peer_id) NULL
+#endif
+
+#define OL_TXRX_RSSI_INVALID 0xffff
+/**
+ * @brief Provide the current RSSI average from data frames sent by a peer.
+ * @details
+ * If a peer has sent data frames, the data SW will optionally keep
+ * a running average of the RSSI observed for those data frames.
+ * This function returns that time-average RSSI if is it available,
+ * or OL_TXRX_RSSI_INVALID if either RSSI tracking is disabled or if
+ * no data frame indications with valid RSSI meta-data have been received.
+ * The RSSI is in approximate dBm units, and is normalized with respect
+ * to a 20 MHz channel. For example, if a data frame is received on a
+ * 40 MHz channel, wherein both the primary 20 MHz channel and the
+ * secondary 20 MHz channel have an RSSI of -77 dBm, the reported RSSI
+ * will be -77 dBm, rather than the actual -74 dBm RSSI from the
+ * combination of the primary + extension 20 MHz channels.
+ * Alternatively, the RSSI may be evaluated only on the primary 20 MHz
+ * channel.
+ *
+ * @param peer - which peer's RSSI is desired
+ * @return RSSI evaluted from frames sent by the specified peer
+ */
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer);
+#else
+#define ol_txrx_peer_rssi(peer) OL_TXRX_RSSI_INVALID
+#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
+
+#define OL_TXRX_INVALID_LOCAL_PEER_ID 0xffff
+#if QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer);
+#else
+#define ol_txrx_local_peer_id(peer) OL_TXRX_INVALID_LOCAL_PEER_ID
+#endif
+
+#ifdef QCA_COMPUTE_TX_DELAY
+/**
+ * @brief updates the compute interval period for TSM stats.
+ * @details
+ * @param interval - interval for stats computation
+ */
+void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval);
+
+/**
+ * @brief Return the uplink (transmitted) packet count and loss count.
+ * @details
+ * This function will be called for getting uplink packet count and
+ * loss count for given stream (access category) a regular interval.
+ * This also resets the counters hence, the value returned is packets
+ * counted in last 5(default) second interval. These counter are
+ * incremented per access category in ol_tx_completion_handler()
+ *
+ * @param category - access category of interest
+ * @param out_packet_count - number of packets transmitted
+ * @param out_packet_loss_count - number of packets lost
+ */
+void
+ol_tx_packet_count(ol_txrx_pdev_handle pdev,
+ uint16_t *out_packet_count,
+ uint16_t *out_packet_loss_count, int category);
+#endif
+
+/**
+ * @brief Return the average delays for tx frames.
+ * @details
+ * Return the average of the total time tx frames spend within the driver
+ * and the average time tx frames take to be transmitted.
+ * These averages are computed over a 5 second time interval.
+ * These averages are computed separately for separate access categories,
+ * if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
+ *
+ * @param pdev - the data physical device instance
+ * @param queue_delay_microsec - average time tx frms spend in the WLAN driver
+ * @param tx_delay_microsec - average time for frames to be transmitted
+ * @param category - category (TID) of interest
+ */
+#ifdef QCA_COMPUTE_TX_DELAY
+void
+ol_tx_delay(ol_txrx_pdev_handle pdev,
+ uint32_t *queue_delay_microsec,
+ uint32_t *tx_delay_microsec, int category);
+#else
+static inline void
+ol_tx_delay(ol_txrx_pdev_handle pdev,
+ uint32_t *queue_delay_microsec,
+ uint32_t *tx_delay_microsec, int category)
+{
+ /* no-op version if QCA_COMPUTE_TX_DELAY is not set */
+ *queue_delay_microsec = *tx_delay_microsec = 0;
+}
+#endif
+
+/*
+ * Bins used for reporting delay histogram:
+ * bin 0: 0 - 10 ms delay
+ * bin 1: 10 - 20 ms delay
+ * bin 2: 20 - 40 ms delay
+ * bin 3: 40 - 80 ms delay
+ * bin 4: 80 - 160 ms delay
+ * bin 5: > 160 ms delay
+ */
+#define QCA_TX_DELAY_HIST_REPORT_BINS 6
+/**
+ * @brief Provide a histogram of tx queuing delays.
+ * @details
+ * Return a histogram showing the number of tx frames of the specified
+ * category for each of the delay levels in the histogram bin spacings
+ * listed above.
+ * These histograms are computed over a 5 second time interval.
+ * These histograms are computed separately for separate access categories,
+ * if the QCA_COMPUTE_TX_DELAY_PER_AC flag is set.
+ *
+ * @param pdev - the data physical device instance
+ * @param bin_values - an array of QCA_TX_DELAY_HIST_REPORT_BINS elements
+ * This array gets filled in with the histogram bin counts.
+ * @param category - category (TID) of interest
+ */
+#ifdef QCA_COMPUTE_TX_DELAY
+void
+ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category);
+#else
+static inline void
+ol_tx_delay_hist(ol_txrx_pdev_handle pdev, uint16_t *bin_values, int category)
+{
+ /* no-op version if QCA_COMPUTE_TX_DELAY is not set */
+ cdf_assert(bin_values);
+ cdf_mem_zero(bin_values,
+ QCA_TX_DELAY_HIST_REPORT_BINS * sizeof(*bin_values));
+}
+#endif
+
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+/**
+ * @brief Set the thermal mitgation throttling level.
+ * @details
+ * This function applies only to LL systems. This function is used set the
+ * tx throttle level used for thermal mitigation
+ *
+ * @param pdev - the physics device being throttled
+ */
+void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level);
+#else
+static inline void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev,
+ int level)
+{
+ /* no-op */
+}
+#endif /* QCA_SUPPORT_TX_THROTTLE */
+
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+/**
+ * @brief Configure the thermal mitgation throttling period.
+ * @details
+ * This function applies only to LL systems. This function is used set the
+ * period over which data will be throttled
+ *
+ * @param pdev - the physics device being throttled
+ */
+void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period);
+#else
+static inline void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev,
+ int period)
+{
+ /* no-op */
+}
+#endif /* QCA_SUPPORT_TX_THROTTLE */
+
+void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val);
+
+
+#ifdef IPA_OFFLOAD
+void
+ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx_paddr,
+ void **rx2_proc_done_idx_vaddr);
+
+
+void
+ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
+ cdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
+
+void
+ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx);
+
+void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg);
+
+void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+ void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
+ void *osif_ctxt),
+ void *osif_dev);
+
+void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev);
+#else
+/**
+ * ol_txrx_ipa_uc_get_resource() - Client request resource information
+ * @pdev: handle to the HTT instance
+ * @ce_sr_base_paddr: copy engine source ring base physical address
+ * @ce_sr_ring_size: copy engine source ring size
+ * @ce_reg_paddr: copy engine register physical address
+ * @tx_comp_ring_base_paddr: tx comp ring base physical address
+ * @tx_comp_ring_size: tx comp ring size
+ * @tx_num_alloc_buffer: number of allocated tx buffer
+ * @rx_rdy_ring_base_paddr: rx ready ring base physical address
+ * @rx_rdy_ring_size: rx ready ring size
+ * @rx_proc_done_idx_paddr: rx process done index physical address
+ * @rx_proc_done_idx_vaddr: rx process done index virtual address
+ * @rx2_rdy_ring_base_paddr: rx done ring base physical address
+ * @rx2_rdy_ring_size: rx done ring size
+ * @rx2_proc_done_idx_paddr: rx done index physical address
+ * @rx2_proc_done_idx_vaddr: rx done index virtual address
+ *
+ * OL client will reuqest IPA UC related resource information
+ * Resource information will be distributted to IPA module
+ * All of the required resources should be pre-allocated
+ *
+ * Return: none
+ */
+static inline void
+ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx_paddr,
+ void **rx2_proc_done_idx_vaddr)
+{
+ return;
+}
+
+/**
+ * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
+ * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
+ *
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * Return: none
+ */
+static inline void
+ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
+ cdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
+{
+ return;
+}
+
+/**
+ * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
+ * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
+ *
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * Return: none
+ */
+static inline void
+ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev,
+ bool uc_active, bool is_tx)
+{
+ return;
+}
+
+/**
+ * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
+ * @pdev: handle to the HTT instance
+ * @op_msg: op response message from firmware
+ *
+ * Return: none
+ */
+static inline void
+ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
+{
+ return;
+}
+
+/**
+ * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
+ * @pdev: handle to the HTT instance
+ * @op_cb: handler function pointer
+ * @osif_dev: register client context
+ *
+ * Return: none
+ */
+static inline void
+ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+ void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
+ void *osif_ctxt),
+ void *osif_dev)
+{
+ return;
+}
+
+/**
+ * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
+ * @pdev: handle to the HTT instance
+ *
+ * Return: none
+ */
+static inline void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
+{
+ return;
+}
+#endif /* IPA_OFFLOAD */
+
+void ol_txrx_display_stats(uint16_t bitmap);
+void ol_txrx_clear_stats(uint16_t bitmap);
+int ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len);
+
+CDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
+ uint8_t *peer_id);
+
+void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer);
+
+bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t **peer);
+
+void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value);
+uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void);
+
+/* TX FLOW Control related functions */
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+#define TX_FLOW_MGMT_POOL_ID 0xEF
+
+#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+#define TX_FLOW_MGMT_POOL_SIZE 32
+#else
+#define TX_FLOW_MGMT_POOL_SIZE 0
+#endif
+
+void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev);
+void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev);
+void ol_tx_dump_flow_pool_info(void);
+void ol_tx_clear_flow_pool_stats(void);
+void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
+ uint8_t flow_pool_id, uint16_t flow_pool_size);
+void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
+ uint8_t flow_pool_id);
+struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
+ uint16_t flow_pool_size);
+int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool);
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
+#else
+
+static inline void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+static inline void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+static inline void ol_tx_dump_flow_pool_info(void)
+{
+ return;
+}
+static inline void ol_tx_clear_flow_pool_stats(void)
+{
+ return;
+}
+static inline void ol_tx_flow_pool_map_handler(uint8_t flow_id,
+ uint8_t flow_type, uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+ return;
+}
+static inline void ol_tx_flow_pool_unmap_handler(uint8_t flow_id,
+ uint8_t flow_type, uint8_t flow_pool_id)
+{
+ return;
+}
+static inline struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(
+ uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+ return NULL;
+}
+static inline int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
+{
+ return 0;
+}
+static inline void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
+{
+ return;
+}
+#endif
+
+#endif /* _OL_TXRX_CTRL_API__H_ */
diff --git a/dp/ol/inc/ol_txrx_dbg.h b/dp/ol/inc/ol_txrx_dbg.h
new file mode 100644
index 000000000000..138e1ed0df21
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_dbg.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_dbg.h
+ * @brief Functions provided for visibility and debugging.
+ */
+#ifndef _OL_TXRX_DBG__H_
+#define _OL_TXRX_DBG__H_
+
+#include <athdefs.h> /* A_STATUS, uint64_t */
+#include <cdf_lock.h> /* cdf_semaphore_t */
+#include <htt.h> /* htt_dbg_stats_type */
+#include <ol_txrx_stats.h> /* ol_txrx_stats */
+
+typedef void (*ol_txrx_stats_callback)(void *ctxt,
+ enum htt_dbg_stats_type type,
+ uint8_t *buf, int bytes);
+
+struct ol_txrx_stats_req {
+ uint32_t stats_type_upload_mask; /* which stats to upload */
+ uint32_t stats_type_reset_mask; /* which stats to reset */
+
+ /* stats will be printed if either print element is set */
+ struct {
+ int verbose; /* verbose stats printout */
+ int concise; /* concise stats printout (takes precedence) */
+ } print; /* print uploaded stats */
+
+ /* stats notify callback will be invoked if fp is non-NULL */
+ struct {
+ ol_txrx_stats_callback fp;
+ void *ctxt;
+ } callback;
+
+ /* stats will be copied into the specified buffer if buf is non-NULL */
+ struct {
+ uint8_t *buf;
+ int byte_limit; /* don't copy more than this */
+ } copy;
+
+ /*
+ * If blocking is true, the caller will take the specified semaphore
+ * to wait for the stats to be uploaded, and the driver will release
+ * the semaphore when the stats are done being uploaded.
+ */
+ struct {
+ int blocking;
+ cdf_semaphore_t *sem_ptr;
+ } wait;
+};
+
+#ifndef TXRX_DEBUG_LEVEL
+#define TXRX_DEBUG_LEVEL 0 /* no debug info */
+#endif
+
+#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
+
+#define ol_txrx_debug(vdev, debug_specs) 0
+#define ol_txrx_fw_stats_cfg(vdev, type, val) 0
+#define ol_txrx_fw_stats_get(vdev, req, response_expected) 0
+#define ol_txrx_aggr_cfg(vdev, max_subfrms_ampdu, max_subfrms_amsdu) 0
+
+#else /*---------------------------------------------------------------------*/
+
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
+
+int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs);
+
+void ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
+ uint8_t cfg_stats_type, uint32_t cfg_val);
+
+int ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev,
+ struct ol_txrx_stats_req *req,
+ bool response_expected);
+
+int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
+ int max_subfrms_ampdu, int max_subfrms_amsdu);
+
+enum {
+ TXRX_DBG_MASK_OBJS = 0x01,
+ TXRX_DBG_MASK_STATS = 0x02,
+ TXRX_DBG_MASK_PROT_ANALYZE = 0x04,
+ TXRX_DBG_MASK_RX_REORDER_TRACE = 0x08,
+ TXRX_DBG_MASK_RX_PN_TRACE = 0x10
+};
+
+/*--- txrx printouts ---*/
+
+/*
+ * Uncomment this to enable txrx printouts with dynamically adjustable
+ * verbosity. These printouts should not impact performance.
+ */
+#define TXRX_PRINT_ENABLE 1
+/* uncomment this for verbose txrx printouts (may impact performance) */
+/* #define TXRX_PRINT_VERBOSE_ENABLE 1 */
+
+void ol_txrx_print_level_set(unsigned level);
+
+/*--- txrx object (pdev, vdev, peer) display debug functions ---*/
+
+#if TXRX_DEBUG_LEVEL > 5
+void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent);
+void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent);
+void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent);
+#else
+#define ol_txrx_pdev_display(pdev, indent)
+#define ol_txrx_vdev_display(vdev, indent)
+#define ol_txrx_peer_display(peer, indent)
+#endif
+
+/*--- txrx stats display debug functions ---*/
+
+
+void ol_txrx_stats_display(ol_txrx_pdev_handle pdev);
+
+void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev);
+
+
+/*--- txrx protocol analyzer debug feature ---*/
+
+/* uncomment this to enable the protocol analzyer feature */
+/* #define ENABLE_TXRX_PROT_ANALYZE 1 */
+
+#if defined(ENABLE_TXRX_PROT_ANALYZE)
+
+void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev);
+
+#else
+
+#define ol_txrx_prot_ans_display(pdev)
+
+#endif /* ENABLE_TXRX_PROT_ANALYZE */
+
+/*--- txrx sequence number trace debug feature ---*/
+
+/* uncomment this to enable the rx reorder trace feature */
+/* #define ENABLE_RX_REORDER_TRACE 1 */
+
+#define ol_txrx_seq_num_trace_display(pdev) \
+ ol_rx_reorder_trace_display(pdev, 0, 0)
+
+#if defined(ENABLE_RX_REORDER_TRACE)
+
+void
+ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit);
+
+#else
+
+#define ol_rx_reorder_trace_display(pdev, just_once, limit)
+
+#endif /* ENABLE_RX_REORDER_TRACE */
+
+/*--- txrx packet number trace debug feature ---*/
+
+/* uncomment this to enable the rx PN trace feature */
+/* #define ENABLE_RX_PN_TRACE 1 */
+
+#define ol_txrx_pn_trace_display(pdev) ol_rx_pn_trace_display(pdev, 0)
+
+#if defined(ENABLE_RX_PN_TRACE)
+
+void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once);
+
+#else
+
+#define ol_rx_pn_trace_display(pdev, just_once)
+
+#endif /* ENABLE_RX_PN_TRACE */
+
+/*--- tx queue log debug feature ---*/
+/* uncomment this to enable the tx queue log feature */
+/* #define ENABLE_TX_QUEUE_LOG 1 */
+
+#define ol_tx_queue_log_display(pdev)
+
+#endif /* ATH_PERF_PWR_OFFLOAD */
+/*----------------------------------------*/
+
+#endif /* _OL_TXRX_DBG__H_ */
diff --git a/dp/ol/inc/ol_txrx_htt_api.h b/dp/ol/inc/ol_txrx_htt_api.h
new file mode 100644
index 000000000000..09041cadf3ce
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_htt_api.h
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_htt_api.h
+ * @brief Define the host data API functions called by the host HTT SW.
+ */
+#ifndef _OL_TXRX_HTT_API__H_
+#define _OL_TXRX_HTT_API__H_
+
+#include <htt.h> /* HTT_TX_COMPL_IND_STAT */
+#include <athdefs.h> /* A_STATUS */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
+
+static inline uint16_t *ol_tx_msdu_id_storage(cdf_nbuf_t msdu)
+{
+ cdf_assert(cdf_nbuf_headroom(msdu) >= (sizeof(uint16_t) * 2 - 1));
+ return (uint16_t *) (((cdf_size_t) (cdf_nbuf_head(msdu) + 1)) & ~0x1);
+}
+
+/**
+ * @brief Tx MSDU download completion for a LL system
+ * @details
+ * Release the reference to the downloaded tx descriptor.
+ * In the unlikely event that the reference count is zero, free
+ * the tx descriptor and tx frame.
+ *
+ * @param pdev - (abstract) pointer to the txrx physical device
+ * @param status - indication of whether the download succeeded
+ * @param msdu - the downloaded tx frame
+ * @param msdu_id - the txrx ID of the tx frame - this is used for
+ * locating the frame's tx descriptor
+ */
+void
+ol_tx_download_done_ll(void *pdev,
+ A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id);
+
+/**
+ * @brief Tx MSDU download completion for HL system without tx completion msgs
+ * @details
+ * Free the tx descriptor and tx frame.
+ * Invoke the HL tx download scheduler.
+ *
+ * @param pdev - (abstract) pointer to the txrx physical device
+ * @param status - indication of whether the download succeeded
+ * @param msdu - the downloaded tx frame
+ * @param msdu_id - the txrx ID of the tx frame - this is used for
+ * locating the frame's tx descriptor
+ */
+void
+ol_tx_download_done_hl_free(void *pdev,
+ A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id);
+
+/**
+ * @brief Tx MSDU download completion for HL system with tx completion msgs
+ * @details
+ * Release the reference to the downloaded tx descriptor.
+ * In the unlikely event that the reference count is zero, free
+ * the tx descriptor and tx frame.
+ * Optionally, invoke the HL tx download scheduler. (It is probable that
+ * the HL tx download scheduler would operate in response to tx completion
+ * messages rather than download completion events.)
+ *
+ * @param pdev - (abstract) pointer to the txrx physical device
+ * @param status - indication of whether the download succeeded
+ * @param msdu - the downloaded tx frame
+ * @param msdu_id - the txrx ID of the tx frame - this is used for
+ * locating the frame's tx descriptor
+ */
+void
+ol_tx_download_done_hl_retain(void *pdev,
+ A_STATUS status,
+ cdf_nbuf_t msdu, uint16_t msdu_id);
+
+/*
+ * For now, make the host HTT -> host txrx tx completion status
+ * match the target HTT -> host HTT tx completion status, so no
+ * translation is needed.
+ */
+/*
+ * host-only statuses use a different part of the number space
+ * than host-target statuses
+ */
+#define HTT_HOST_ONLY_STATUS_CODE_START 128
+enum htt_tx_status {
+ /* ok - successfully sent + acked */
+ htt_tx_status_ok = HTT_TX_COMPL_IND_STAT_OK,
+
+ /* discard - not sent (congestion control) */
+ htt_tx_status_discard = HTT_TX_COMPL_IND_STAT_DISCARD,
+
+ /* no_ack - sent, but no ack */
+ htt_tx_status_no_ack = HTT_TX_COMPL_IND_STAT_NO_ACK,
+
+ /* download_fail - host could not deliver the tx frame to target */
+ htt_tx_status_download_fail = HTT_HOST_ONLY_STATUS_CODE_START,
+};
+
+/**
+ * @brief Process a tx completion message sent by the target.
+ * @details
+ * When the target is done transmitting a tx frame (either because
+ * the frame was sent + acknowledged, or because the target gave up)
+ * it sends a tx completion message to the host.
+ * This notification function is used regardless of whether the
+ * transmission succeeded or not; the status argument indicates whether
+ * the transmission succeeded.
+ * This tx completion message indicates via the descriptor ID which
+ * tx frames were completed, and indicates via the status whether the
+ * frames were transmitted successfully.
+ * The host frees the completed descriptors / frames (updating stats
+ * in the process).
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * (registered with HTT as a context pointer during attach time)
+ * @param num_msdus - how many MSDUs are referenced by the tx completion
+ * message
+ * @param status - whether transmission was successful
+ * @param tx_msdu_id_iterator - abstract method of finding the IDs for the
+ * individual MSDUs referenced by the tx completion message, via the
+ * htt_tx_compl_desc_id API function
+ */
+void
+ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
+ int num_msdus,
+ enum htt_tx_status status, void *tx_msdu_id_iterator);
+
+void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits);
+
+/**
+ * @brief Init the total amount of target credit.
+ * @details
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * @param credit_delta - how much to increment the target's tx credit by
+ */
+void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta);
+
+/**
+ * @brief Process a tx completion message for a single MSDU.
+ * @details
+ * The ol_tx_single_completion_handler function performs the same tx
+ * completion processing as the ol_tx_completion_handler, but for a
+ * single frame.
+ * ol_tx_completion_handler is optimized to handle batch completions
+ * as efficiently as possible; in contrast ol_tx_single_completion_handler
+ * handles single frames as simply and generally as possible.
+ * Thus, this ol_tx_single_completion_handler function is suitable for
+ * intermittent usage, such as for tx mgmt frames.
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * @param status - whether transmission was successful
+ * @param tx_msdu_id - ID of the frame which completed transmission
+ */
+void
+ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
+ enum htt_tx_status status, uint16_t tx_desc_id);
+
+/**
+ * @brief Update the amount of target credit.
+ * @details
+ * When the target finishes with an old transmit frame, it can use the
+ * space that was occupied by the old tx frame to store a new tx frame.
+ * This function is used to inform the txrx layer, where the HL tx download
+ * scheduler resides, about such updates to the target's tx credit.
+ * This credit update is done explicitly, rather than having the txrx layer
+ * update the credit count itself inside the ol_tx_completion handler
+ * function. This provides HTT with the flexibility to limit the rate of
+ * downloads from the TXRX layer's download scheduler, by controlling how
+ * much credit the download scheduler gets, and also provides the flexibility
+ * to account for a change in the tx memory pool size within the target.
+ * This function is only used for HL systems; in LL systems, each tx frame
+ * is assumed to use exactly one credit (for its target-side tx descriptor),
+ * and any rate limiting is managed within the target.
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * @param credit_delta - how much to increment the target's tx credit by
+ */
+void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta);
+
+/**
+ * @brief Process an rx indication message sent by the target.
+ * @details
+ * The target sends a rx indication message to the host as a
+ * notification that there are new rx frames available for the
+ * host to process.
+ * The HTT host layer locates the rx descriptors and rx frames
+ * associated with the indication, and calls this function to
+ * invoke the rx data processing on the new frames.
+ * (For LL, the rx descriptors and frames are delivered directly
+ * to the host via MAC DMA, while for HL the rx descriptor and
+ * frame for individual frames are combined with the rx indication
+ * message.)
+ * All MPDUs referenced by a rx indication message belong to the
+ * same peer-TID.
+ *
+ * @param pdev - the data physical device that received the frames
+ * (registered with HTT as a context pointer during attach time)
+ * @param rx_ind_msg - the network buffer holding the rx indication message
+ * (For HL, this netbuf also holds the rx desc and rx payload, but
+ * the data SW is agnostic to whether the desc and payload are
+ * piggybacked with the rx indication message.)
+ * @param peer_id - which peer sent this rx data
+ * @param tid - what (extended) traffic type the rx data is
+ * @param num_mpdu_ranges - how many ranges of MPDUs does the message describe.
+ * Each MPDU within the range has the same rx status.
+ */
+void
+ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ uint16_t peer_id, uint8_t tid, int num_mpdu_ranges);
+
+/**
+ * @brief Process an rx fragment indication message sent by the target.
+ * @details
+ * The target sends a rx fragment indication message to the host as a
+ * notification that there are new rx fragment available for the
+ * host to process.
+ * The HTT host layer locates the rx descriptors and rx fragment
+ * associated with the indication, and calls this function to
+ * invoke the rx fragment data processing on the new fragment.
+ *
+ * @param pdev - the data physical device that received the frames
+ * (registered with HTT as a context pointer during attach time)
+ * @param rx_frag_ind_msg - the network buffer holding the rx fragment indication message
+ * @param peer_id - which peer sent this rx data
+ * @param tid - what (extended) traffic type the rx data is
+ */
+void ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_frag_ind_msg,
+ uint16_t peer_id, uint8_t tid);
+
+/**
+ * @brief Process rx offload deliver indication message sent by the target.
+ * @details
+ * When the target exits offload mode, target delivers packets that it has
+ * held in its memory to the host using this message.
+ * Low latency case:
+ * The message contains the number of MSDUs that are being delivered by the
+ * target to the host. The packet itself resides in host ring along with some
+ * metadata describing the peer id, vdev id, tid, FW desc and length of
+ * the packet being delivered.
+ * Hight letency case:
+ * The message itself contains the payload of the MSDU being delivered by
+ * the target to the host. The message also contains meta data describing
+ * the packet such as peer id, vdev id, tid, FW desc and length of the packet
+ * being delivered. Refer to htt.h for the exact structure of the message.
+ * @param pdev - the data physical device that received the frame.
+ * @param msg - offload deliver indication message
+ * @param msdu_cnt - number of MSDUs being delivred.
+ */
+void
+ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t msg, int msdu_cnt);
+
+/**
+ * @brief Process a peer map message sent by the target.
+ * @details
+ * Each time the target allocates a new peer ID, it will inform the
+ * host via the "peer map" message. This function processes that
+ * message. The host data SW looks for a peer object whose MAC address
+ * matches the MAC address specified in the peer map message, and then
+ * sets up a mapping between the peer ID specified in the message and
+ * the peer object that was found.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - ID generated by the target to refer to the peer in question
+ * The target may create multiple IDs for a single peer.
+ * @param vdev_id - Reference to the virtual device the peer is associated with
+ * @param peer_mac_addr - MAC address of the peer in question
+ * @param tx_ready - whether transmits to this peer can be done already, or
+ * need to wait for a call to peer_tx_ready (only applies to HL systems)
+ */
+void
+ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready);
+
+/**
+ * @brief notify the host that the target is ready to transmit to a new peer.
+ * @details
+ * Some targets can immediately accept tx frames for a new peer, as soon as
+ * the peer's association completes. Other target need a short setup time
+ * before they are ready to accept tx frames for the new peer.
+ * If the target needs time for setup, it will provide a peer_tx_ready
+ * message when it is done with the setup. This function forwards this
+ * notification from the target to the host's tx queue manager.
+ * This function only applies for HL systems, in which the host determines
+ * which peer a given tx frame is for, and stores the tx frames in queues.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - ID for the new peer which can now accept tx frames
+ */
+void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id);
+
+/**
+ * @brief Process a peer unmap message sent by the target.
+ * @details
+ * Each time the target frees a peer ID, it will inform the host via the
+ * "peer unmap" message. This function processes that message.
+ * The host data SW uses the peer ID from the message to find the peer
+ * object from peer_map[peer_id], then invalidates peer_map[peer_id]
+ * (by setting it to NULL), and checks whether there are any remaining
+ * references to the peer object. If not, the function deletes the
+ * peer object.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - ID that is being freed.
+ * The target may create multiple IDs for a single peer.
+ */
+void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id);
+
+/**
+ * @brief Process a security indication message sent by the target.
+ * @details
+ * When a key is assigned to a peer, the target will inform the host
+ * with a security indication message.
+ * The host remembers the security type, and infers whether a rx PN
+ * check is needed.
+ *
+ * @param pdev - data physical device handle
+ * @param peer_id - which peer the security info is for
+ * @param sec_type - which type of security / key the peer is using
+ * @param is_unicast - whether security spec is for a unicast or multicast key
+ * @param michael_key - key used for TKIP MIC (if sec_type == TKIP)
+ * @param rx_pn - RSC used for WAPI PN replay check (if sec_type == WAPI)
+ */
+void
+ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ enum htt_sec_type sec_type,
+ int is_unicast, uint32_t *michael_key, uint32_t *rx_pn);
+
+/**
+ * @brief Process an ADDBA message sent by the target.
+ * @details
+ * When the target notifies the host of an ADDBA event for a specified
+ * peer-TID, the host will set up the rx reordering state for the peer-TID.
+ * Specifically, the host will create a rx reordering array whose length
+ * is based on the window size specified in the ADDBA.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - which peer the ADDBA event is for
+ * @param tid - which traffic ID within the peer the ADDBA event is for
+ * @param win_sz - how many sequence numbers are in the ARQ block ack window
+ * set up by the ADDBA event
+ * @param start_seq_num - the initial value of the sequence number during the
+ * block ack agreement, as specified by the ADDBA request.
+ * @param failed - indicate whether the target's ADDBA setup succeeded:
+ * 0 -> success, 1 -> fail
+ */
+void
+ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ uint8_t win_sz, uint16_t start_seq_num, uint8_t failed);
+
+/**
+ * @brief Process a DELBA message sent by the target.
+ * @details
+ * When the target notifies the host of a DELBA event for a specified
+ * peer-TID, the host will clean up the rx reordering state for the peer-TID.
+ * Specifically, the host will remove the rx reordering array, and will
+ * set the reorder window size to be 1 (stop and go ARQ).
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - which peer the ADDBA event is for
+ * @param tid - which traffic ID within the peer the ADDBA event is for
+ */
+void
+ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid);
+
+enum htt_rx_flush_action {
+ htt_rx_flush_release,
+ htt_rx_flush_discard,
+};
+
+/**
+ * @brief Process a rx reorder flush message sent by the target.
+ * @details
+ * The target's rx reorder logic can send a flush indication to the
+ * host's rx reorder buffering either as a flush IE within a rx
+ * indication message, or as a standalone rx reorder flush message.
+ * This ol_rx_flush_handler function processes the standalone rx
+ * reorder flush message from the target.
+ * The flush message specifies a range of sequence numbers whose
+ * rx frames are flushed.
+ * Some sequence numbers within the specified range may not have
+ * rx frames; the host needs to check for each sequence number in
+ * the specified range whether there are rx frames held for that
+ * sequence number.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - which peer's rx data is being flushed
+ * @param tid - which traffic ID within the peer has the rx data being flushed
+ * @param seq_num_start - Which sequence number within the rx reordering
+ * buffer the flushing should start with.
+ * This is the LSBs of the 802.11 sequence number.
+ * This sequence number is masked with the rounded-to-power-of-two
+ * window size to generate a reorder buffer index.
+ * The flush includes this initial sequence number.
+ * @param seq_num_end - Which sequence number within the rx reordering
+ * buffer the flushing should stop at.
+ * This is the LSBs of the 802.11 sequence number.
+ * This sequence number is masked with the rounded-to-power-of-two
+ * window size to generate a reorder buffer index.
+ * The flush excludes this final sequence number.
+ * @param action - whether to release or discard the rx frames
+ */
+void
+ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ uint16_t seq_num_start,
+ uint16_t seq_num_end, enum htt_rx_flush_action action);
+
+/**
+ * @brief Process a rx pn indication message
+ * @details
+ * When the peer is configured to get PN checking done in target,
+ * the target instead of sending reorder flush/release messages
+ * sends PN indication messages which contain the start and end
+ * sequence numbers to be flushed/released along with the sequence
+ * numbers of MPDUs that failed the PN check in target.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param peer_id - which peer's rx data is being flushed
+ * @param tid - which traffic ID within the peer
+ * @param seq_num_start - Which sequence number within the rx reordering
+ * buffer to start with.
+ * This is the LSBs of the 802.11 sequence number.
+ * This sequence number is masked with the rounded-to-power-of-two
+ * window size to generate a reorder buffer index.
+ * This is the initial sequence number.
+ * @param seq_num_end - Which sequence number within the rx reordering
+ * buffer to stop at.
+ * This is the LSBs of the 802.11 sequence number.
+ * This sequence number is masked with the rounded-to-power-of-two
+ * window size to generate a reorder buffer index.
+ * The processing stops right before this sequence number
+ * @param pn_ie_cnt - Indicates the number of PN information elements.
+ * @param pn_ie - Pointer to the array of PN information elements. Each
+ * PN information element contains the LSBs of the 802.11 sequence number
+ * of the MPDU that failed the PN checking in target.
+ */
+void
+ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ int seq_num_start,
+ int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie);
+
+/**
+ * @brief Process a stats message sent by the target.
+ * @details
+ * The host can request target for stats.
+ * The target sends the stats to the host via a confirmation message.
+ * This ol_txrx_fw_stats_handler function processes the confirmation message.
+ * Currently, this processing consists of copying the stats from the message
+ * buffer into the txrx pdev object, and waking the sleeping host context
+ * that requested the stats.
+ *
+ * @param pdev - data physical device handle
+ * (registered with HTT as a context pointer during attach time)
+ * @param cookie - Value echoed from the cookie in the stats request
+ * message. This allows the host SW to find the stats request object.
+ * (Currently, this cookie is unused.)
+ * @param stats_info_list - stats confirmation message contents, containing
+ * a list of the stats requested from the target
+ */
+void
+ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
+ uint64_t cookie, uint8_t *stats_info_list);
+
+/**
+ * @brief Process a tx inspect message sent by the target.
+ * @details:
+ * TODO: update
+ * This tx inspect message indicates via the descriptor ID
+ * which tx frames are to be inspected by host. The host
+ * re-injects the packet back to the host for a number of
+ * cases.
+ *
+ * @param pdev - the data physical device that sent the tx frames
+ * (registered with HTT as a context pointer during attach time)
+ * @param num_msdus - how many MSDUs are referenced by the tx completion
+ * message
+ * @param tx_msdu_id_iterator - abstract method of finding the IDs for the
+ * individual MSDUs referenced by the tx completion message, via the
+ * htt_tx_compl_desc_id API function
+ */
+void
+ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
+ int num_msdus, void *tx_desc_id_iterator);
+
+/**
+ * @brief Get the UAPSD mask.
+ * @details
+ * This function will return the UAPSD TID mask.
+ *
+ * @param txrx_pdev - pointer to the txrx pdev object
+ * @param peer_id - PeerID.
+ * @return uapsd mask value
+ */
+uint8_t
+ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id);
+
+/**
+ * @brief Get the Qos Capable.
+ * @details
+ * This function will return the txrx_peer qos_capable.
+ *
+ * @param txrx_pdev - pointer to the txrx pdev object
+ * @param peer_id - PeerID.
+ * @return qos_capable value
+ */
+uint8_t
+ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id);
+
+/**
+ * @brief Process an rx indication message sent by the target.
+ * @details
+ * The target sends a rx indication message to the host as a
+ * notification that there are new rx frames available for the
+ * host to process.
+ * The HTT host layer locates the rx descriptors and rx frames
+ * associated with the indication, and calls this function to
+ * invoke the rx data processing on the new frames.
+ * All MPDUs referenced by a rx indication message belong to the
+ * same peer-TID. The frames indicated have been re-ordered by
+ * the target.
+ *
+ * @param pdev - the data physical device that received the frames
+ * (registered with HTT as a context pointer during attach time)
+ * @param rx_ind_msg - the network buffer holding the rx indication message
+ * @param peer_id - which peer sent this rx data
+ * @param tid - what (extended) traffic type the rx data is
+ * @param is_offload - is this an offload indication?
+ */
+void
+ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ uint16_t peer_id,
+ uint8_t tid, uint8_t is_offload);
+
+#endif /* _OL_TXRX_HTT_API__H_ */
diff --git a/dp/ol/inc/ol_txrx_osif_api.h b/dp/ol/inc/ol_txrx_osif_api.h
new file mode 100644
index 000000000000..1bc73d28a550
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_osif_api.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_osif_api.h
+ * @brief Define the host data API functions called by the host OS shim SW.
+ */
+#ifndef _OL_TXRX_OSIF_API__H_
+#define _OL_TXRX_OSIF_API__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+#include <ol_osif_api.h> /* ol_osif_vdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle, etc. */
+#include <ol_txrx_ctrl_api.h>
+#include "cds_sched.h"
+
+/**
+ * struct ol_rx_cached_buf - rx cached buffer
+ * @list: linked list
+ * @buf: skb buffer
+ */
+struct ol_rx_cached_buf {
+ struct list_head list;
+ cdf_nbuf_t buf;
+};
+
+/**
+ * @typedef ol_txrx_rx_fp
+ * @brief receive function to hand batches of data frames from txrx to OS shim
+ */
+typedef void (*ol_txrx_rx_fp)(void *osif_dev, cdf_nbuf_t msdus);
+
+/**
+ * @typedef ol_txrx_tx_fp
+ * @brief top-level transmit function
+ */
+typedef cdf_nbuf_t (*ol_txrx_tx_fp)(ol_txrx_vdev_handle data_vdev,
+ cdf_nbuf_t msdu_list);
+
+/**
+ * @typedef ol_txrx_tx_non_std_fp
+ * @brief top-level transmit function for non-standard tx frames
+ * @details
+ * This function pointer provides an alternative to the ol_txrx_tx_fp
+ * to support non-standard transmits. In particular, this function
+ * supports transmission of:
+ * 1. "Raw" frames
+ * These raw frames already have an 802.11 header; the usual
+ * 802.11 header encapsulation by the driver does not apply.
+ * 2. TSO segments
+ * During tx completion, the txrx layer needs to reclaim the buffer
+ * that holds the ethernet/IP/TCP header created for the TSO segment.
+ * Thus, these tx frames need to be marked as TSO, to show that they
+ * need this special handling during tx completion.
+ *
+ * @param data_vdev - which virtual device should transmit the frame
+ * @param tx_spec - what non-standard operations to apply to the tx frame
+ * @param msdu_list - tx frame(s), in a null-terminated list
+ */
+typedef cdf_nbuf_t (*ol_txrx_tx_non_std_fp)(ol_txrx_vdev_handle data_vdev,
+ enum ol_tx_spec tx_spec,
+ cdf_nbuf_t msdu_list);
+
+struct txrx_rx_metainfo;
+
+/**
+ * @typedef ol_txrx_tx_fc_fp
+ * @brief tx flow control notification function from txrx to OS shim
+ * @param osif_dev - the virtual device's OS shim object
+ * @param tx_resume - tx os q should be resumed or not
+ */
+typedef void (*ol_txrx_tx_flow_control_fp)(void *osif_dev,
+ bool tx_resume);
+
+/**
+ * struct ol_txrx_desc_type - txrx descriptor type
+ * @sta_id: sta id
+ * @is_qos_enabled: is station qos enabled
+ * @is_wapi_supported: is station wapi supported
+ */
+struct ol_txrx_desc_type {
+ uint8_t sta_id;
+ uint8_t is_qos_enabled;
+ uint8_t is_wapi_supported;
+};
+
+
+typedef CDF_STATUS (*ol_rx_callback_fp)(void *p_cds_gctx,
+ cdf_nbuf_t pDataBuff,
+ uint8_t ucSTAId);
+
+typedef void (*ol_tx_pause_callback_fp)(uint8_t vdev_id,
+ enum netif_action_type action,
+ enum netif_reason_type reason);
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+CDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb);
+#else
+static inline
+CDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
+{
+ return CDF_STATUS_SUCCESS;
+
+}
+#endif
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+
+int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
+ ol_txrx_tx_flow_control_fp flowControl,
+ void *osif_fc_ctx);
+
+int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id);
+
+void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
+ bool tx_resume);
+bool
+ol_txrx_get_tx_resource(uint8_t sta_id,
+ unsigned int low_watermark,
+ unsigned int high_watermark_offset);
+
+int
+ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth);
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+/**
+ * @typedef ol_txrx_rx_fp
+ * @brief receive function to hand batches of data frames from txrx to OS shim
+ */
+
+struct ol_txrx_osif_ops {
+ /* tx function pointers - specified by txrx, stored by OS shim */
+ struct {
+ ol_txrx_tx_fp std;
+ ol_txrx_tx_non_std_fp non_std;
+ ol_txrx_tx_flow_control_fp flow_control_cb;
+ } tx;
+
+ /* rx function pointers - specified by OS shim, stored by txrx */
+ struct {
+ ol_txrx_rx_fp std;
+ } rx;
+};
+
+/**
+ * @brief Link a vdev's data object with the matching OS shim vdev object.
+ * @details
+ * The data object for a virtual device is created by the function
+ * ol_txrx_vdev_attach. However, rather than fully linking the
+ * data vdev object with the vdev objects from the other subsystems
+ * that the data vdev object interacts with, the txrx_vdev_attach
+ * function focuses primarily on creating the data vdev object.
+ * After the creation of both the data vdev object and the OS shim
+ * vdev object, this txrx_osif_vdev_attach function is used to connect
+ * the two vdev objects, so the data SW can use the OS shim vdev handle
+ * when passing rx data received by a vdev up to the OS shim.
+ *
+ * @param txrx_vdev - the virtual device's data object
+ * @param osif_vdev - the virtual device's OS shim object
+ * @param txrx_ops - (pointers to) the functions used for tx and rx data xfer
+ * There are two portions of these txrx operations.
+ * The rx portion is filled in by OSIF SW before calling
+ * ol_txrx_osif_vdev_register; inside the ol_txrx_osif_vdev_register
+ * the txrx SW stores a copy of these rx function pointers, to use
+ * as it delivers rx data frames to the OSIF SW.
+ * The tx portion is filled in by the txrx SW inside
+ * ol_txrx_osif_vdev_register; when the function call returns,
+ * the OSIF SW stores a copy of these tx functions to use as it
+ * delivers tx data frames to the txrx SW.
+ * The rx function pointer inputs consist of the following:
+ * rx: the OS shim rx function to deliver rx data frames to.
+ * This can have different values for different virtual devices,
+ * e.g. so one virtual device's OS shim directly hands rx frames to
+ * the OS, but another virtual device's OS shim filters out P2P
+ * messages before sending the rx frames to the OS.
+ * The netbufs delivered to the osif_rx function are in the format
+ * specified by the OS to use for tx and rx frames (either 802.3 or
+ * native WiFi).
+ * rx_mon: the OS shim rx monitor function to deliver monitor data to
+ * Though in practice, it is probable that the same function will
+ * be used for delivering rx monitor data for all virtual devices,
+ * in theory each different virtual device can have a different
+ * OS shim function for accepting rx monitor data.
+ * The netbufs delivered to the osif_rx_mon function are in 802.11
+ * format. Each netbuf holds a 802.11 MPDU, not an 802.11 MSDU.
+ * Depending on compile-time configuration, each netbuf may also
+ * have a monitor-mode encapsulation header such as a radiotap
+ * header added before the MPDU contents.
+ * The tx function pointer outputs consist of the following:
+ * tx: the tx function pointer for standard data frames
+ * This function pointer is set by the txrx SW to perform
+ * host-side transmit operations based on whether a HL or LL
+ * host/target interface is in use.
+ * tx_non_std: the tx function pointer for non-standard data frames,
+ * such as TSO frames, explicitly-prioritized frames, or "raw"
+ * frames which skip some of the tx operations, such as 802.11
+ * MAC header encapsulation.
+ */
+void
+ol_txrx_osif_vdev_register(ol_txrx_vdev_handle txrx_vdev,
+ void *osif_vdev, struct ol_txrx_osif_ops *txrx_ops);
+
+/**
+ * @brief Divide a jumbo TCP frame into smaller segments.
+ * @details
+ * For efficiency, the protocol stack above the WLAN driver may operate
+ * on jumbo tx frames, which are larger than the 802.11 MTU.
+ * The OSIF SW uses this txrx API function to divide the jumbo tx TCP frame
+ * into a series of segment frames.
+ * The segments are created as clones of the input jumbo frame.
+ * The txrx SW generates a new encapsulation header (ethernet + IP + TCP)
+ * for each of the output segment frames. The exact format of this header,
+ * e.g. 802.3 vs. Ethernet II, and IPv4 vs. IPv6, is chosen to match the
+ * header format of the input jumbo frame.
+ * The input jumbo frame is not modified.
+ * After the ol_txrx_osif_tso_segment returns, the OSIF SW needs to perform
+ * DMA mapping on each of the segment network buffers, and also needs to
+ *
+ * @param txrx_vdev - which virtual device will transmit the TSO segments
+ * @param max_seg_payload_bytes - the maximum size for the TCP payload of
+ * each segment frame.
+ * This does not include the ethernet + IP + TCP header sizes.
+ * @param jumbo_tcp_frame - jumbo frame which needs to be cloned+segmented
+ * @return
+ * NULL if the segmentation fails, - OR -
+ * a NULL-terminated list of segment network buffers
+ */
+cdf_nbuf_t ol_txrx_osif_tso_segment(ol_txrx_vdev_handle txrx_vdev,
+ int max_seg_payload_bytes,
+ cdf_nbuf_t jumbo_tcp_frame);
+
+cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
+ uint8_t proto_type);
+
+#ifdef IPA_OFFLOAD
+cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
+ cdf_nbuf_t skb);
+#endif
+
+CDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
+ struct ol_txrx_desc_type *sta_desc);
+
+CDF_STATUS ol_txrx_clear_peer(uint8_t sta_id);
+
+CDF_STATUS ol_txrx_change_peer_state(uint8_t sta_id,
+ enum ol_txrx_peer_state sta_state,
+ bool roam_synch_in_progress);
+
+void ol_rx_data_process(struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t rx_buf_list);
+
+void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
+ bool drop);
+
+#if defined(FEATURE_LRO)
+void ol_register_lro_flush_cb(void (handler)(void *), void *data);
+void ol_deregister_lro_flush_cb(void);
+#endif
+#endif /* _OL_TXRX_OSIF_API__H_ */
diff --git a/dp/ol/inc/ol_txrx_stats.h b/dp/ol/inc/ol_txrx_stats.h
new file mode 100644
index 000000000000..44b384d5b245
--- /dev/null
+++ b/dp/ol/inc/ol_txrx_stats.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_status.h
+ * @brief Functions provided for visibility and debugging.
+ * NOTE: This file is used by both kernel driver SW and userspace SW.
+ * Thus, do not reference use any kernel header files or defs in this file!
+ */
+#ifndef _OL_TXRX_STATS__H_
+#define _OL_TXRX_STATS__H_
+
+#include <athdefs.h> /* uint64_t */
+
+
+
+struct ol_txrx_stats_elem {
+ uint64_t pkts;
+ uint64_t bytes;
+};
+
+#define NUM_MAX_TSO_SEGS 8
+#define NUM_MAX_TSO_SEGS_MASK (NUM_MAX_TSO_SEGS - 1)
+
+#define NUM_MAX_TSO_MSDUS 128
+#define NUM_MAX_TSO_MSDUS_MASK (NUM_MAX_TSO_MSDUS - 1)
+
+struct ol_txrx_stats_tso_msdu {
+ struct cdf_tso_seg_t tso_segs[NUM_MAX_TSO_SEGS];
+ uint32_t num_seg;
+ uint32_t tso_seg_idx;
+};
+
+struct ol_txrx_stats_tso_info {
+ struct ol_txrx_stats_tso_msdu tso_msdu_info[NUM_MAX_TSO_MSDUS];
+ uint32_t tso_msdu_idx;
+};
+
+/**
+ * @brief data stats published by the host txrx layer
+ */
+struct ol_txrx_stats {
+ struct {
+ /* MSDUs given to the txrx layer by the management stack */
+ struct ol_txrx_stats_elem mgmt;
+ /* MSDUs successfully sent across the WLAN */
+ struct ol_txrx_stats_elem delivered;
+ struct {
+ /* MSDUs that the host did not accept */
+ struct ol_txrx_stats_elem host_reject;
+ /* MSDUs which could not be downloaded to the target */
+ struct ol_txrx_stats_elem download_fail;
+ /* MSDUs which the target discarded
+ (lack of memory or old age) */
+ struct ol_txrx_stats_elem target_discard;
+ /* MSDUs which the target sent but
+ couldn't get an ack for */
+ struct ol_txrx_stats_elem no_ack;
+ } dropped;
+ /* contains information of packets recevied per tx completion*/
+ struct {
+ uint32_t pkts_1;
+ uint32_t pkts_2_10;
+ uint32_t pkts_11_20;
+ uint32_t pkts_21_30;
+ uint32_t pkts_31_40;
+ uint32_t pkts_41_50;
+ uint32_t pkts_51_60;
+ uint32_t pkts_61_plus;
+ } comp_histogram;
+ /* TSO (TCP segmentation offload) information */
+ struct {
+ struct ol_txrx_stats_elem tso_pkts;
+#if defined(FEATURE_TSO)
+ struct ol_txrx_stats_tso_info tso_info;
+#endif
+ } tso;
+ } tx;
+ struct {
+ /* MSDUs given to the OS shim */
+ struct ol_txrx_stats_elem delivered;
+ struct {
+ /* MSDUs forwarded to network stack */
+ u_int32_t packets_stack;
+ /* MSDUs forwarded from the rx path to the tx path */
+ u_int32_t packets_fwd;
+ /* MSDUs forwarded to stack and tx path */
+ u_int32_t packets_stack_n_fwd;
+ } intra_bss_fwd;
+ } rx;
+};
+
+/*
+ * Structure to consolidate host stats
+ */
+struct ieee80211req_ol_ath_host_stats {
+ struct ol_txrx_stats txrx_stats;
+ struct {
+ int pkt_q_fail_count;
+ int pkt_q_empty_count;
+ int send_q_empty_count;
+ } htc;
+ struct {
+ int pipe_no_resrc_count;
+ int ce_ring_delta_fail_count;
+ } hif;
+};
+
+#endif /* _OL_TXRX_STATS__H_ */
diff --git a/dp/ol/inc/ol_vowext_dbg_defs.h b/dp/ol/inc/ol_vowext_dbg_defs.h
new file mode 100644
index 000000000000..3be07aca6368
--- /dev/null
+++ b/dp/ol/inc/ol_vowext_dbg_defs.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _VOW_DEFINES__H_
+#define _VOW_DEFINES__H_
+
+#define UDP_CKSUM_OFFSET 40 /* UDP check sum offset in network buffer */
+#define RTP_HDR_OFFSET 42 /* RTP header offset in network buffer */
+#define EXT_HDR_OFFSET 54 /* Extension header offset in network buffer */
+#define UDP_PDU_RTP_EXT 0x90 /* ((2 << 6) | (1 << 4)) RTP V2 + X bit */
+#define IP_VER4_N_NO_EXTRA_HEADERS 0x45
+#define IPERF3_DATA_OFFSET 12 /* iperf3 data offset from EXT_HDR_OFFSET */
+#define HAL_RX_40 0x08 /* 40 Mhz */
+#define HAL_RX_GI 0x04 /* full gi */
+
+struct vow_extstats {
+ uint8_t rx_rssi_ctl0; /* control channel chain0 rssi */
+ uint8_t rx_rssi_ctl1; /* control channel chain1 rssi */
+ uint8_t rx_rssi_ctl2; /* control channel chain2 rssi */
+ uint8_t rx_rssi_ext0; /* extention channel chain0 rssi */
+ uint8_t rx_rssi_ext1; /* extention channel chain1 rssi */
+ uint8_t rx_rssi_ext2; /* extention channel chain2 rssi */
+ uint8_t rx_rssi_comb; /* combined RSSI value */
+ uint8_t rx_bw; /* Band width 0-20, 1-40, 2-80 */
+ uint8_t rx_sgi; /* Guard interval, 0-Long GI, 1-Short GI */
+ uint8_t rx_nss; /* Number of spatial streams */
+ uint8_t rx_mcs; /* Rate MCS value */
+ uint8_t rx_ratecode; /* Hardware rate code */
+ uint8_t rx_rs_flags; /* Recieve misc flags */
+ uint8_t rx_moreaggr; /* 0 - non aggr frame */
+ uint32_t rx_macTs; /* Time stamp */
+ uint16_t rx_seqno; /* rx sequence number */
+};
+
+/**
+ * @brief populates vow ext stats in given network buffer.
+ * @param msdu - network buffer handle
+ * @param pdev - handle to htt dev.
+ */
+void ol_ath_add_vow_extstats(htt_pdev_handle pdev, cdf_nbuf_t msdu);
+
+#endif /* _VOW_DEFINES__H_ */
diff --git a/dp/txrx/ipv6_defs.h b/dp/txrx/ipv6_defs.h
new file mode 100644
index 000000000000..c1c52babe485
--- /dev/null
+++ b/dp/txrx/ipv6_defs.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _IPV6__H_
+#define _IPV6__H_
+
+#if defined(ATH_TARGET)
+#include <osapi.h> /* A_UINT8 */
+#else
+#include <a_types.h> /* A_UINT8 */
+#endif
+
+/* utilities for converting between network byte order and native endianness */
+#ifndef BYTESWAP32
+#define BYTESWAP32(x) \
+ ((((x) & 0x000000ff) << 24) /* byte 0 -> byte 3 */ | \
+ (((x) & 0x0000ff00) << 8) /* byte 1 -> byte 2 */ | \
+ (((x) & 0x00ff0000) >> 8) /* byte 2 -> byte 1 */ | \
+ (((x) & 0xff000000) >> 24) /* byte 3 -> byte 0 */)
+#endif /* BYTESWAP32 */
+
+#ifndef BE_TO_CPU32
+#if defined(ATH_TARGET)
+/* assume target is little-endian */
+#define BE_TO_CPU32(x) BYTESWAP32(x)
+#else
+#ifdef BIG_ENDIAN_HOST
+#define BE_TO_CPU32(x) (x)
+#else
+#define BE_TO_CPU32(x) BYTESWAP32(x)
+#endif
+#endif
+#endif /* BE_TO_CPU32 */
+
+/* IPv6 header definition */
+
+#define IPV6_ADDR_LEN 4 /* bytes */
+struct ipv6_hdr_t {
+ A_UINT32 ver_tclass_flowlabel; /* version, traffic class, and flow label */
+ A_UINT8 pyld_len[2]; /* payload length */
+ A_UINT8 next_hdr;
+ A_UINT8 hop_limit;
+ A_UINT8 src_addr[IPV6_ADDR_LEN];
+ A_UINT8 dst_addr[IPV6_ADDR_LEN];
+};
+
+#define IPV6_HDR_LEN (sizeof(struct ipv6_hdr_t))
+#define IPV6_HDR_OFFSET_NEXT_HDR (offsetof(struct ipv6_hdr_t, next_hdr))
+#define IPV6_HDR_OFFSET_DST_ADDR (offsetof(struct ipv6_hdr_t, dst_addr[0]))
+
+/* IPv6 header field access macros */
+
+#define IPV6_HDR_VERSION_M 0xF0000000
+#define IPV6_HDR_VERSION_S 28
+
+#define IPV6_HDR_TRAFFIC_CLASS_M 0x0FF00000
+#define IPV6_HDR_TRAFFIC_CLASS_S 20
+
+#define IPV6_HDR_FLOW_LABEL_M 0x000FFFFF
+#define IPV6_HDR_FLOW_LABEL_S 0
+
+static inline A_UINT8 ipv6_version(struct ipv6_hdr_t *ipv6_hdr)
+{
+ return
+ (BE_TO_CPU32(ipv6_hdr->ver_tclass_flowlabel) &
+ IPV6_HDR_VERSION_M) >> IPV6_HDR_VERSION_S;
+}
+
+static inline A_UINT8 ipv6_traffic_class(struct ipv6_hdr_t *ipv6_hdr)
+{
+ return
+ (A_UINT8) ((BE_TO_CPU32(ipv6_hdr->ver_tclass_flowlabel) &
+ IPV6_HDR_TRAFFIC_CLASS_M) >> IPV6_HDR_TRAFFIC_CLASS_S);
+}
+
+static inline A_UINT32 ipv6_flow_label(struct ipv6_hdr_t *ipv6_hdr)
+{
+ return
+ (BE_TO_CPU32(ipv6_hdr->ver_tclass_flowlabel) &
+ IPV6_HDR_FLOW_LABEL_M) >> IPV6_HDR_FLOW_LABEL_S;
+}
+
+#endif /* _IPV6__H_ */
diff --git a/dp/txrx/ol_cfg.c b/dp/txrx/ol_cfg.c
new file mode 100644
index 000000000000..c921e2dddaf9
--- /dev/null
+++ b/dp/txrx/ol_cfg.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <ol_cfg.h>
+#include <ol_if_athvar.h>
+
+unsigned int vow_config = 0;
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_tx_set_flow_control_parameters() - set flow control parameters
+ * @cfg_ctx: cfg context
+ * @cfg_param: cfg parameters
+ *
+ * Return: none
+ */
+static
+void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
+ struct txrx_pdev_cfg_param_t cfg_param)
+{
+ cfg_ctx->tx_flow_start_queue_offset =
+ cfg_param.tx_flow_start_queue_offset;
+ cfg_ctx->tx_flow_stop_queue_th =
+ cfg_param.tx_flow_stop_queue_th;
+}
+#else
+static
+void ol_tx_set_flow_control_parameters(struct txrx_pdev_cfg_t *cfg_ctx,
+ struct txrx_pdev_cfg_param_t cfg_param)
+{
+ return;
+}
+#endif
+
+#if CFG_TGT_DEFAULT_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
+static inline
+uint8_t ol_defrag_timeout_check(void)
+{
+ return 1;
+}
+#else
+static inline
+uint8_t ol_defrag_timeout_check(void)
+{
+ return 0;
+}
+#endif
+
+/* FIX THIS -
+ * For now, all these configuration parameters are hardcoded.
+ * Many of these should actually be determined dynamically instead.
+ */
+
+ol_pdev_handle ol_pdev_cfg_attach(cdf_device_t osdev,
+ struct txrx_pdev_cfg_param_t cfg_param)
+{
+ struct txrx_pdev_cfg_t *cfg_ctx;
+
+ cfg_ctx = cdf_mem_malloc(sizeof(*cfg_ctx));
+ if (!cfg_ctx) {
+ printk(KERN_ERR "cfg ctx allocation failed\n");
+ return NULL;
+ }
+
+ /*
+ * Need to change HTT_LL_TX_HDR_SIZE_IP accordingly.
+ * Include payload, up to the end of UDP header for IPv4 case
+ */
+ cfg_ctx->tx_download_size = 16;
+ /* temporarily diabled PN check for Riva/Pronto */
+ cfg_ctx->rx_pn_check = 1;
+ cfg_ctx->defrag_timeout_check = ol_defrag_timeout_check();
+ cfg_ctx->max_peer_id = 511;
+ cfg_ctx->max_vdev = CFG_TGT_NUM_VDEV;
+ cfg_ctx->pn_rx_fwd_check = 1;
+ cfg_ctx->frame_type = wlan_frm_fmt_802_3;
+ cfg_ctx->max_thruput_mbps = 800;
+ cfg_ctx->max_nbuf_frags = 1;
+ cfg_ctx->vow_config = vow_config;
+ cfg_ctx->target_tx_credit = CFG_TGT_NUM_MSDU_DESC;
+ cfg_ctx->throttle_period_ms = 40;
+ cfg_ctx->rx_fwd_disabled = 0;
+ cfg_ctx->is_packet_log_enabled = 0;
+ cfg_ctx->is_full_reorder_offload = cfg_param.is_full_reorder_offload;
+ cfg_ctx->ipa_uc_rsc.uc_offload_enabled =
+ cfg_param.is_uc_offload_enabled;
+ cfg_ctx->ipa_uc_rsc.tx_max_buf_cnt = cfg_param.uc_tx_buffer_count;
+ cfg_ctx->ipa_uc_rsc.tx_buf_size = cfg_param.uc_tx_buffer_size;
+ cfg_ctx->ipa_uc_rsc.rx_ind_ring_size =
+ cfg_param.uc_rx_indication_ring_count;
+ cfg_ctx->ipa_uc_rsc.tx_partition_base = cfg_param.uc_tx_partition_base;
+ cfg_ctx->enable_rxthread = cfg_param.enable_rxthread;
+ cfg_ctx->ip_tcp_udp_checksum_offload =
+ cfg_param.ip_tcp_udp_checksum_offload;
+ cfg_ctx->ce_classify_enabled = cfg_param.ce_classify_enabled;
+
+ ol_tx_set_flow_control_parameters(cfg_ctx, cfg_param);
+ return (ol_pdev_handle) cfg_ctx;
+}
+
+int ol_cfg_is_high_latency(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->is_high_latency;
+}
+
+int ol_cfg_max_peer_id(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ /*
+ * TBDXXX - this value must match the peer table
+ * size allocated in FW
+ */
+ return cfg->max_peer_id;
+}
+
+int ol_cfg_max_vdevs(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->max_vdev;
+}
+
+int ol_cfg_rx_pn_check(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->rx_pn_check;
+}
+
+int ol_cfg_rx_fwd_check(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->pn_rx_fwd_check;
+}
+
+void ol_set_cfg_rx_fwd_disabled(ol_pdev_handle pdev, uint8_t disable_rx_fwd)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ cfg->rx_fwd_disabled = disable_rx_fwd;
+}
+
+void ol_set_cfg_packet_log_enabled(ol_pdev_handle pdev, uint8_t val)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ cfg->is_packet_log_enabled = val;
+}
+
+uint8_t ol_cfg_is_packet_log_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->is_packet_log_enabled;
+}
+
+int ol_cfg_rx_fwd_disabled(ol_pdev_handle pdev)
+{
+#if defined(ATHR_WIN_NWF)
+ /* for Windows, let the OS handle the forwarding */
+ return 1;
+#else
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->rx_fwd_disabled;
+#endif
+}
+
+int ol_cfg_rx_fwd_inter_bss(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->rx_fwd_inter_bss;
+}
+
+enum wlan_frm_fmt ol_cfg_frame_type(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->frame_type;
+}
+
+int ol_cfg_max_thruput_mbps(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->max_thruput_mbps;
+}
+
+int ol_cfg_netbuf_frags_max(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->max_nbuf_frags;
+}
+
+int ol_cfg_tx_free_at_download(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->tx_free_at_download;
+}
+
+uint16_t ol_cfg_target_tx_credit(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ uint16_t rc;
+ uint16_t vow_max_sta = (cfg->vow_config & 0xffff0000) >> 16;
+ uint16_t vow_max_desc_persta = cfg->vow_config & 0x0000ffff;
+
+ rc = (cfg->target_tx_credit + (vow_max_sta * vow_max_desc_persta));
+
+ return rc;
+}
+
+int ol_cfg_tx_download_size(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->tx_download_size;
+}
+
+int ol_cfg_rx_host_defrag_timeout_duplicate_check(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->defrag_timeout_check;
+}
+
+int ol_cfg_throttle_period_ms(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->throttle_period_ms;
+}
+
+int ol_cfg_is_full_reorder_offload(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->is_full_reorder_offload;
+}
+
+/**
+ * ol_cfg_is_rx_thread_enabled() - return rx_thread is enable/disable
+ * @pdev : handle to the physical device
+ *
+ * Return: 1 - enable, 0 - disable
+ */
+int ol_cfg_is_rx_thread_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->enable_rxthread;
+}
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_cfg_get_tx_flow_stop_queue_th() - return stop queue threshold
+ * @pdev : handle to the physical device
+ *
+ * Return: stop queue threshold
+ */
+int ol_cfg_get_tx_flow_stop_queue_th(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->tx_flow_stop_queue_th;
+}
+
+/**
+ * ol_cfg_get_tx_flow_start_queue_offset() - return start queue offset
+ * @pdev : handle to the physical device
+ *
+ * Return: start queue offset
+ */
+int ol_cfg_get_tx_flow_start_queue_offset(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->tx_flow_start_queue_offset;
+}
+#endif
+
+#ifdef IPA_OFFLOAD
+unsigned int ol_cfg_ipa_uc_offload_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return (unsigned int)cfg->ipa_uc_rsc.uc_offload_enabled;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_buf_size(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_buf_size;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_max_buf_cnt(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_max_buf_cnt;
+}
+
+unsigned int ol_cfg_ipa_uc_rx_ind_ring_size(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.rx_ind_ring_size;
+}
+
+unsigned int ol_cfg_ipa_uc_tx_partition_base(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ipa_uc_rsc.tx_partition_base;
+}
+#endif /* IPA_OFFLOAD */
+
+/**
+ * ol_cfg_is_ce_classify_enabled() - Return if CE classification is enabled
+ * or disabled
+ * @pdev : handle to the physical device
+ *
+ * Return: 1 - enabled, 0 - disabled
+ */
+bool ol_cfg_is_ce_classify_enabled(ol_pdev_handle pdev)
+{
+ struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)pdev;
+ return cfg->ce_classify_enabled;
+}
diff --git a/dp/txrx/ol_ctrl_txrx_api.h b/dp/txrx/ol_ctrl_txrx_api.h
new file mode 100644
index 000000000000..49a952e2733f
--- /dev/null
+++ b/dp/txrx/ol_ctrl_txrx_api.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_ctrl_txrx_api.h
+ * @brief Define the host control API functions called by the host data SW.
+ */
+#ifndef _OL_CTRL_TXRX_API__H_
+#define _OL_CTRL_TXRX_API__H_
+
+/* #include <osapi_linux.h> / * uint8_t * / */
+#include <osdep.h> /* uint8_t */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+#include <ol_ctrl_api.h> /* ol_vdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_peer_handle, etc. */
+#include <cds_ieee80211_common.h> /* ieee80211_frame */
+
+enum ol_rx_err_type {
+ OL_RX_ERR_DEFRAG_MIC,
+ OL_RX_ERR_PN,
+ OL_RX_ERR_UNKNOWN_PEER,
+ OL_RX_ERR_MALFORMED,
+ OL_RX_ERR_TKIP_MIC,
+ OL_RX_ERR_DECRYPT,
+ OL_RX_ERR_MPDU_LENGTH,
+ OL_RX_ERR_ENCRYPT_REQUIRED,
+ OL_RX_ERR_DUP,
+ OL_RX_ERR_UNKNOWN,
+ OL_RX_ERR_FCS,
+ OL_RX_ERR_PRIVACY,
+ OL_RX_ERR_NONE_FRAG,
+ OL_RX_ERR_NONE = 0xFF
+};
+
+#ifdef SUPPORT_HOST_STATISTICS
+/** * @brief Update tx statistics
+ * @details
+ * Update tx statistics after tx complete.
+ *
+ * @param pdev - ol_pdev_handle instance
+ * @param vdev_id - ID of the virtual device that tx frame
+ * @param had_error - whether there is error when tx
+ */
+void ol_tx_statistics(ol_pdev_handle pdev, uint16_t vdev_id, int had_error);
+#else
+#define ol_tx_statistics(pdev, vdev_id, had_error)
+#endif
+
+/** * @brief Count on received packets for invalid peer case
+ *
+ * @param pdev - txrx pdev handle
+ * @param wh - received frame
+ * @param err_type - what kind of error occurred
+ */
+void ol_rx_err_inv_peer_statistics(ol_pdev_handle pdev,
+ struct ieee80211_frame *wh,
+ enum ol_rx_err_type err_type);
+
+/**
+ * @brief Count on received packets, both success and failed
+ *
+ * @param pdev - ol_pdev_handle handle
+ * @param vdev_id - ID of the virtual device received the erroneous rx frame
+ * @param err_type - what kind of error occurred
+ * @param sec_type - The cipher type the peer is using
+ * @param is_mcast - whether this is one multi cast frame
+ */
+void ol_rx_err_statistics(ol_pdev_handle pdev,
+ uint8_t vdev_id,
+ enum ol_rx_err_type err_type,
+ enum ol_sec_type sec_type, int is_mcast);
+
+/**
+ * @brief Provide notification of failure during host rx processing
+ * @details
+ * Indicate an error during host rx data processing, including what
+ * kind of error happened, when it happened, which peer and TID the
+ * erroneous rx frame is from, and what the erroneous rx frame itself
+ * is.
+ *
+ * @param pdev - handle to the ctrl SW's physical device object
+ * @param vdev_id - ID of the virtual device received the erroneous rx frame
+ * @param peer_mac_addr - MAC address of the peer that sent the erroneous
+ * rx frame
+ * @param tid - which TID within the peer sent the erroneous rx frame
+ * @param tsf32 - the timstamp in TSF units of the erroneous rx frame, or
+ * one of the fragments that when reassembled, constitute the rx frame
+ * @param err_type - what kind of error occurred
+ * @param rx_frame - the rx frame that had an error
+ * @pn - Packet sequence number
+ * @key_id - Key index octet received in IV of the frame
+ */
+void
+ol_rx_err(ol_pdev_handle pdev,
+ uint8_t vdev_id,
+ uint8_t *peer_mac_addr,
+ int tid,
+ uint32_t tsf32,
+ enum ol_rx_err_type err_type,
+ cdf_nbuf_t rx_frame, uint64_t *pn, uint8_t key_id);
+
+enum ol_rx_notify_type {
+ OL_RX_NOTIFY_IPV4_IGMP,
+};
+
+/**
+ * @brief Provide notification of reception of data of special interest.
+ * @details
+ * Indicate when "special" data has been received. The nature of the
+ * data that results in it being considered special is specified in the
+ * notify_type argument.
+ * This function is currently used by the data-path SW to notify the
+ * control path SW when the following types of rx data are received:
+ * + IPv4 IGMP frames
+ * The control SW can use these to learn about multicast group
+ * membership, if it so chooses.
+ *
+ * @param pdev - handle to the ctrl SW's physical device object
+ * @param vdev_id - ID of the virtual device received the special data
+ * @param peer_mac_addr - MAC address of the peer that sent the special data
+ * @param tid - which TID within the peer sent the special data
+ * @param tsf32 - the timstamp in TSF units of the special data
+ * @param notify_type - what kind of special data was received
+ * @param rx_frame - the rx frame containing the special data
+ */
+void
+ol_rx_notify(ol_pdev_handle pdev,
+ uint8_t vdev_id,
+ uint8_t *peer_mac_addr,
+ int tid,
+ uint32_t tsf32,
+ enum ol_rx_notify_type notify_type, cdf_nbuf_t rx_frame);
+
+/**
+ * @brief Indicate when a paused STA has tx data available.
+ * @details
+ * Indicate to the control SW when a paused peer that previously
+ * has all its peer-TID queues empty gets a MSDU to transmit.
+ * Conversely, indicate when a paused peer that had data in one or more of
+ * its peer-TID queues has all queued data removed (e.g. due to a U-APSD
+ * triggered transmission), but is still paused.
+ * It is up to the control SW to determine whether the peer is paused due to
+ * being in power-save sleep, or some other reason, and thus whether it is
+ * necessary to set the TIM in beacons to notify a sleeping STA that it has
+ * data.
+ * The data SW will also issue this ol_tx_paused_peer_data call when an
+ * unpaused peer that currently has tx data in one or more of its
+ * peer-TID queues becomes paused.
+ * The data SW will not issue this ol_tx_paused_peer_data call when a
+ * peer with data in one or more of its peer-TID queues becomes unpaused.
+ *
+ * @param peer - the paused peer
+ * @param has_tx_data -
+ * 1 -> a paused peer that previously had no tx data now does, -OR-
+ * 0 -> a paused peer that previously had tx data now doesnt
+ */
+void ol_tx_paused_peer_data(ol_peer_handle peer, int has_tx_data);
+
+#define ol_ctrl_addba_req(pdev, peer_mac_addr, tid) ol_addba_req_reject
+#define ol_ctrl_rx_addba_complete(pdev, peer_mac_addr, tid, failed) /* no-op */
+
+void ol_txrx_set_peer_authorized_event(struct ol_txrx_vdev_t *vdev);
+
+
+#endif /* _OL_CTRL_TXRX_API__H_ */
diff --git a/dp/txrx/ol_osif_txrx_api.h b/dp/txrx/ol_osif_txrx_api.h
new file mode 100644
index 000000000000..b47a0b6cdbbf
--- /dev/null
+++ b/dp/txrx/ol_osif_txrx_api.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_osif_txrx_api.h
+ * @brief Define the OS specific API functions called by txrx SW.
+ */
+#ifndef _OL_OSIF_TXRX_API_H_
+#define _OL_OSIF_TXRX_API_H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+/**
+ * @brief Call tx completion handler to release the buffers
+ * @details
+ *
+ * Invoke tx completion handler when the tx credit goes below low water mark.
+ * This eliminate the packet drop in the host driver due to send routine not
+ * yielding the cpu when the amount of traffic pumped from the network layer
+ * is very high.
+ *
+ * @param osdev
+ */
+
+void ol_osif_ath_tasklet(cdf_device_t osdev);
+
+#endif /* _OL_OSIF_TXRX_API_H_ */
diff --git a/dp/txrx/ol_rx.c b/dp/txrx/ol_rx.c
new file mode 100644
index 000000000000..19db13998ccc
--- /dev/null
+++ b/dp/txrx/ol_rx.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_util.h> /* cdf_os_cpu_to_le64 */
+#include <cdf_types.h> /* bool */
+#include <cds_ieee80211_common.h> /* ieee80211_frame */
+
+/* external API header files */
+#include <ol_ctrl_txrx_api.h> /* ol_rx_notify */
+#include <ol_htt_api.h> /* htt_pdev_handle */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
+#include <ol_txrx_htt_api.h> /* ol_rx_indication_handler */
+#include <ol_htt_rx_api.h> /* htt_rx_peer_id, etc. */
+
+/* internal API header files */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
+#include <ol_rx_reorder.h> /* ol_rx_reorder_store, etc. */
+#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_UPDATE */
+#include <ol_rx_defrag.h> /* ol_rx_defrag_waitlist_flush */
+#include <ol_txrx_internal.h>
+#include <wdi_event.h>
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#include <ol_txrx_encap.h> /* ol_rx_decap_info_t, etc */
+#endif
+
+/* FIX THIS: txrx should not include private header files of other modules */
+#include <htt_types.h>
+#include <ol_if_athvar.h>
+#include <enet.h> /* ethernet + SNAP/LLC header defs and
+ ethertype values */
+#include <ip_prot.h> /* IP protocol values */
+#include <ipv4.h> /* IPv4 header defs */
+#include <ipv6_defs.h> /* IPv6 header defs */
+#include <ol_vowext_dbg_defs.h>
+#include <ol_txrx_osif_api.h>
+#include <wma.h>
+
+#ifdef HTT_RX_RESTORE
+#if defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+#endif
+
+void ol_rx_data_process(struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t rx_buf_list);
+
+
+#ifdef HTT_RX_RESTORE
+
+static void ol_rx_restore_handler(struct work_struct *htt_rx)
+{
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "Enter: %s", __func__);
+ cnss_device_self_recovery();
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "Exit: %s", __func__);
+}
+
+static DECLARE_WORK(ol_rx_restore_work, ol_rx_restore_handler);
+
+void ol_rx_trigger_restore(htt_pdev_handle htt_pdev, cdf_nbuf_t head_msdu,
+ cdf_nbuf_t tail_msdu)
+{
+ cdf_nbuf_t next;
+
+ while (head_msdu) {
+ next = cdf_nbuf_next(head_msdu);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "freeing %p\n", head_msdu);
+ cdf_nbuf_free(head_msdu);
+ head_msdu = next;
+ }
+
+ if (!htt_pdev->rx_ring.htt_rx_restore) {
+ cds_set_recovery_in_progress(true);
+ htt_pdev->rx_ring.htt_rx_restore = 1;
+ schedule_work(&ol_rx_restore_work);
+ }
+}
+#endif
+
+static void ol_rx_process_inv_peer(ol_txrx_pdev_handle pdev,
+ void *rx_mpdu_desc, cdf_nbuf_t msdu)
+{
+ uint8_t a1[IEEE80211_ADDR_LEN];
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ struct ol_txrx_vdev_t *vdev = NULL;
+ struct ieee80211_frame *wh;
+ struct wdi_event_rx_peer_invalid_msg msg;
+
+ wh = (struct ieee80211_frame *)
+ htt_rx_mpdu_wifi_hdr_retrieve(htt_pdev, rx_mpdu_desc);
+ /*
+ * Klocwork issue #6152
+ * All targets that send a "INVALID_PEER" rx status provide a
+ * 802.11 header for each rx MPDU, so it is certain that
+ * htt_rx_mpdu_wifi_hdr_retrieve will succeed.
+ * However, both for robustness, e.g. if this function is given a
+ * MSDU descriptor rather than a MPDU descriptor, and to make it
+ * clear to static analysis that this code is safe, add an explicit
+ * check that htt_rx_mpdu_wifi_hdr_retrieve provides a non-NULL value.
+ */
+ if (wh == NULL || !IEEE80211_IS_DATA(wh))
+ return;
+
+ /* ignore frames for non-existent bssids */
+ cdf_mem_copy(a1, wh->i_addr1, IEEE80211_ADDR_LEN);
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (cdf_mem_compare(a1, vdev->mac_addr.raw, IEEE80211_ADDR_LEN)
+ == 0) {
+ break;
+ }
+ }
+ if (!vdev)
+ return;
+
+ msg.wh = wh;
+ msg.msdu = msdu;
+ msg.vdev_id = vdev->vdev_id;
+#ifdef WDI_EVENT_ENABLE
+ wdi_event_handler(WDI_EVENT_RX_PEER_INVALID, pdev, &msg);
+#endif
+}
+
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+static inline int16_t
+ol_rx_rssi_avg(struct ol_txrx_pdev_t *pdev, int16_t rssi_old, int16_t rssi_new)
+{
+ int rssi_old_weight;
+
+ if (rssi_new == HTT_RSSI_INVALID)
+ return rssi_old;
+ if (rssi_old == HTT_RSSI_INVALID)
+ return rssi_new;
+
+ rssi_old_weight =
+ (1 << pdev->rssi_update_shift) - pdev->rssi_new_weight;
+ return (rssi_new * pdev->rssi_new_weight +
+ rssi_old * rssi_old_weight) >> pdev->rssi_update_shift;
+}
+
+static void
+ol_rx_ind_rssi_update(struct ol_txrx_peer_t *peer, cdf_nbuf_t rx_ind_msg)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+ peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
+ htt_rx_ind_rssi_dbm(pdev->htt_pdev,
+ rx_ind_msg));
+}
+
+static void
+ol_rx_mpdu_rssi_update(struct ol_txrx_peer_t *peer, void *rx_mpdu_desc)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+ if (!peer)
+ return;
+ peer->rssi_dbm = ol_rx_rssi_avg(pdev, peer->rssi_dbm,
+ htt_rx_mpdu_desc_rssi_dbm(
+ pdev->htt_pdev,
+ rx_mpdu_desc));
+}
+
+#else
+#define ol_rx_ind_rssi_update(peer, rx_ind_msg) /* no-op */
+#define ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc) /* no-op */
+#endif /* QCA_SUPPORT_PEER_DATA_RX_RSSI */
+
+void discard_msdus(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t head_msdu,
+ cdf_nbuf_t tail_msdu)
+{
+ while (1) {
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(
+ head_msdu);
+ htt_rx_desc_frame_free
+ (htt_pdev,
+ head_msdu);
+ if (head_msdu ==
+ tail_msdu) {
+ break;
+ }
+ head_msdu = next;
+ }
+ return;
+}
+
+void chain_msdus(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t head_msdu,
+ cdf_nbuf_t tail_msdu)
+{
+ while (1) {
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(head_msdu);
+ htt_rx_desc_frame_free(
+ htt_pdev,
+ head_msdu);
+ if (head_msdu == tail_msdu)
+ break;
+ head_msdu = next;
+ }
+ return;
+}
+
+void process_reorder(ol_txrx_pdev_handle pdev,
+ void *rx_mpdu_desc,
+ uint8_t tid,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t head_msdu,
+ cdf_nbuf_t tail_msdu,
+ int num_mpdu_ranges,
+ int num_pdus,
+ bool rx_ind_release
+ )
+{
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ enum htt_rx_status mpdu_status;
+ int reorder_idx;
+ reorder_idx = htt_rx_mpdu_desc_reorder_idx(htt_pdev, rx_mpdu_desc);
+ OL_RX_REORDER_TRACE_ADD(pdev, tid,
+ reorder_idx,
+ htt_rx_mpdu_desc_seq_num(htt_pdev,
+ rx_mpdu_desc),
+ 1);
+ ol_rx_mpdu_rssi_update(peer, rx_mpdu_desc);
+ /*
+ * In most cases, out-of-bounds and duplicate sequence number detection
+ * is performed by the target, but in some cases it is done by the host.
+ * Specifically, the host does rx out-of-bounds sequence number
+ * detection for:
+ * 1. Peregrine or Rome target
+ * for peer-TIDs that do not have aggregation enabled, if the
+ * RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK flag
+ * is set during the driver build.
+ * 2. Riva-family targets, which have rx reorder timeouts handled by
+ * the host rather than the target.
+ * (The target already does duplicate detection, but the host
+ * may have given up waiting for a particular sequence number before
+ * it arrives. In this case, the out-of-bounds sequence number
+ * of the late frame allows the host to discard it, rather than
+ * sending it out of order.
+ */
+ mpdu_status = OL_RX_SEQ_NUM_CHECK(pdev,
+ peer,
+ tid,
+ rx_mpdu_desc);
+ if (mpdu_status != htt_rx_status_ok) {
+ /*
+ * If the sequence number was out of bounds, the MPDU needs
+ * to be discarded.
+ */
+ discard_msdus(htt_pdev, head_msdu, tail_msdu);
+ /*
+ * For Peregrine and Rome,
+ * OL_RX_REORDER_SEQ_NUM_CHECK should only fail for the case
+ * of (duplicate) non-aggregates.
+ *
+ * For Riva, Pronto and Northstar,
+ * there should be only one MPDU delivered at a time.
+ * Thus, there are no further MPDUs that need to be
+ * processed here.
+ * Just to be sure this is true, check the assumption
+ * that this was the only MPDU referenced by the rx
+ * indication.
+ */
+ TXRX_ASSERT2((num_mpdu_ranges == 1) && num_mpdus == 1);
+
+ /*
+ * The MPDU was not stored in the rx reorder array, so
+ * there's nothing to release.
+ */
+ rx_ind_release = false;
+ } else {
+ ol_rx_reorder_store(pdev, peer, tid,
+ reorder_idx, head_msdu, tail_msdu);
+ if (peer->tids_rx_reorder[tid].win_sz_mask == 0) {
+ peer->tids_last_seq[tid] = htt_rx_mpdu_desc_seq_num(
+ htt_pdev,
+ rx_mpdu_desc);
+ }
+ }
+ return;
+} /* process_reorder */
+
+void
+ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ uint16_t peer_id, uint8_t tid, int num_mpdu_ranges)
+{
+ int mpdu_range, i;
+ unsigned seq_num_start = 0, seq_num_end = 0;
+ bool rx_ind_release = false;
+ struct ol_txrx_vdev_t *vdev = NULL;
+ struct ol_txrx_peer_t *peer;
+ htt_pdev_handle htt_pdev;
+ uint16_t center_freq;
+ uint16_t chan1;
+ uint16_t chan2;
+ uint8_t phymode;
+ bool ret;
+
+ htt_pdev = pdev->htt_pdev;
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (!peer) {
+ /*
+ * If we can't find a peer send this packet to OCB interface
+ * using OCB self peer
+ */
+ if (!ol_txrx_get_ocb_peer(pdev, &peer))
+ peer = NULL;
+ }
+
+ if (peer) {
+ vdev = peer->vdev;
+ ol_rx_ind_rssi_update(peer, rx_ind_msg);
+
+ if (vdev->opmode == wlan_op_mode_ocb) {
+ htt_rx_ind_legacy_rate(pdev->htt_pdev, rx_ind_msg,
+ &peer->last_pkt_legacy_rate,
+ &peer->last_pkt_legacy_rate_sel);
+ peer->last_pkt_rssi_cmb = htt_rx_ind_rssi_dbm(
+ pdev->htt_pdev, rx_ind_msg);
+ for (i = 0; i < 4; i++)
+ peer->last_pkt_rssi[i] =
+ htt_rx_ind_rssi_dbm_chain(
+ pdev->htt_pdev, rx_ind_msg, i);
+ htt_rx_ind_timestamp(pdev->htt_pdev, rx_ind_msg,
+ &peer->last_pkt_timestamp_microsec,
+ &peer->last_pkt_timestamp_submicrosec);
+ peer->last_pkt_tsf = htt_rx_ind_tsf32(pdev->htt_pdev,
+ rx_ind_msg);
+ peer->last_pkt_tid = htt_rx_ind_ext_tid(pdev->htt_pdev,
+ rx_ind_msg);
+ }
+ }
+
+ TXRX_STATS_INCR(pdev, priv.rx.normal.ppdus);
+
+ OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
+
+ if (htt_rx_ind_flush(pdev->htt_pdev, rx_ind_msg) && peer) {
+ htt_rx_ind_flush_seq_num_range(pdev->htt_pdev, rx_ind_msg,
+ &seq_num_start, &seq_num_end);
+ if (tid == HTT_INVALID_TID) {
+ /*
+ * host/FW reorder state went out-of sync
+ * for a while because FW ran out of Rx indication
+ * buffer. We have to discard all the buffers in
+ * reorder queue.
+ */
+ ol_rx_reorder_peer_cleanup(vdev, peer);
+ } else {
+ ol_rx_reorder_flush(vdev, peer, tid, seq_num_start,
+ seq_num_end, htt_rx_flush_release);
+ }
+ }
+
+ if (htt_rx_ind_release(pdev->htt_pdev, rx_ind_msg)) {
+ /* the ind info of release is saved here and do release at the
+ * end. This is for the reason of in HL case, the cdf_nbuf_t
+ * for msg and payload are the same buf. And the buf will be
+ * changed during processing */
+ rx_ind_release = true;
+ htt_rx_ind_release_seq_num_range(pdev->htt_pdev, rx_ind_msg,
+ &seq_num_start, &seq_num_end);
+ }
+#ifdef DEBUG_DMA_DONE
+ pdev->htt_pdev->rx_ring.dbg_initial_msdu_payld =
+ pdev->htt_pdev->rx_ring.sw_rd_idx.msdu_payld;
+#endif
+
+ for (mpdu_range = 0; mpdu_range < num_mpdu_ranges; mpdu_range++) {
+ enum htt_rx_status status;
+ int i, num_mpdus;
+ cdf_nbuf_t head_msdu, tail_msdu, msdu;
+ void *rx_mpdu_desc;
+
+#ifdef DEBUG_DMA_DONE
+ pdev->htt_pdev->rx_ring.dbg_mpdu_range = mpdu_range;
+#endif
+
+ htt_rx_ind_mpdu_range_info(pdev->htt_pdev, rx_ind_msg,
+ mpdu_range, &status, &num_mpdus);
+ if ((status == htt_rx_status_ok) && peer) {
+ TXRX_STATS_ADD(pdev, priv.rx.normal.mpdus, num_mpdus);
+ /* valid frame - deposit it into rx reordering buffer */
+ for (i = 0; i < num_mpdus; i++) {
+ int msdu_chaining;
+ /*
+ * Get a linked list of the MSDUs that comprise
+ * this MPDU.
+ * This also attaches each rx MSDU descriptor to
+ * the corresponding rx MSDU network buffer.
+ * (In some systems, the rx MSDU desc is already
+ * in the same buffer as the MSDU payload; in
+ * other systems they are separate, so a pointer
+ * needs to be set in the netbuf to locate the
+ * corresponding rx descriptor.)
+ *
+ * It is neccessary to call htt_rx_amsdu_pop
+ * before htt_rx_mpdu_desc_list_next, because
+ * the (MPDU) rx descriptor has DMA unmapping
+ * done during the htt_rx_amsdu_pop call.
+ * The rx desc should not be accessed until this
+ * DMA unmapping has been done, since the DMA
+ * unmapping involves making sure the cache area
+ * for the mapped buffer is flushed, so the data
+ * written by the MAC DMA into memory will be
+ * fetched, rather than garbage from the cache.
+ */
+
+#ifdef DEBUG_DMA_DONE
+ pdev->htt_pdev->rx_ring.dbg_mpdu_count = i;
+#endif
+
+ msdu_chaining =
+ htt_rx_amsdu_pop(htt_pdev,
+ rx_ind_msg,
+ &head_msdu,
+ &tail_msdu);
+#ifdef HTT_RX_RESTORE
+ if (htt_pdev->rx_ring.rx_reset) {
+ ol_rx_trigger_restore(htt_pdev,
+ head_msdu,
+ tail_msdu);
+ return;
+ }
+#endif
+ rx_mpdu_desc =
+ htt_rx_mpdu_desc_list_next(htt_pdev,
+ rx_ind_msg);
+ ret = htt_rx_msdu_center_freq(htt_pdev, peer,
+ rx_mpdu_desc, &center_freq, &chan1,
+ &chan2, &phymode);
+ if (ret == true) {
+ peer->last_pkt_center_freq =
+ center_freq;
+ } else {
+ peer->last_pkt_center_freq = 0;
+ }
+
+ /* Pktlog */
+#ifdef WDI_EVENT_ENABLE
+ wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE,
+ pdev, head_msdu);
+#endif
+
+ if (msdu_chaining) {
+ /*
+ * TBDXXX - to deliver SDU with
+ * chaining, we need to stitch those
+ * scattered buffers into one single
+ * buffer.
+ * Just discard it now.
+ */
+ chain_msdus(htt_pdev,
+ head_msdu,
+ tail_msdu);
+ } else {
+ process_reorder(pdev, rx_mpdu_desc,
+ tid, peer,
+ head_msdu, tail_msdu,
+ num_mpdu_ranges,
+ num_mpdus,
+ rx_ind_release);
+ }
+
+ }
+ } else {
+ /* invalid frames - discard them */
+ OL_RX_REORDER_TRACE_ADD(pdev, tid,
+ TXRX_SEQ_NUM_ERR(status),
+ TXRX_SEQ_NUM_ERR(status),
+ num_mpdus);
+ TXRX_STATS_ADD(pdev, priv.rx.err.mpdu_bad, num_mpdus);
+ for (i = 0; i < num_mpdus; i++) {
+ /* pull the MPDU's MSDUs off the buffer queue */
+ htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &msdu,
+ &tail_msdu);
+#ifdef HTT_RX_RESTORE
+ if (htt_pdev->rx_ring.rx_reset) {
+ ol_rx_trigger_restore(htt_pdev, msdu,
+ tail_msdu);
+ return;
+ }
+#endif
+ /* pull the MPDU desc off the desc queue */
+ rx_mpdu_desc =
+ htt_rx_mpdu_desc_list_next(htt_pdev,
+ rx_ind_msg);
+ OL_RX_ERR_STATISTICS_2(pdev, vdev, peer,
+ rx_mpdu_desc, msdu,
+ status);
+
+ if (status == htt_rx_status_tkip_mic_err &&
+ vdev != NULL && peer != NULL) {
+ union htt_rx_pn_t pn;
+ uint8_t key_id;
+ htt_rx_mpdu_desc_pn(
+ pdev->htt_pdev,
+ htt_rx_msdu_desc_retrieve(
+ pdev->htt_pdev,
+ msdu), &pn, 48);
+ if (htt_rx_msdu_desc_key_id(
+ pdev->htt_pdev,
+ htt_rx_msdu_desc_retrieve(
+ pdev->htt_pdev,
+ msdu),
+ &key_id) == true) {
+ ol_rx_err(pdev->ctrl_pdev,
+ vdev->vdev_id,
+ peer->mac_addr.raw,
+ tid, 0,
+ OL_RX_ERR_TKIP_MIC,
+ msdu, &pn.pn48,
+ key_id);
+ }
+ }
+#ifdef WDI_EVENT_ENABLE
+ if (status != htt_rx_status_ctrl_mgmt_null) {
+ /* Pktlog */
+ wdi_event_handler(
+ WDI_EVENT_RX_DESC_REMOTE, pdev,
+ msdu);
+ }
+#endif
+ if (status == htt_rx_status_err_inv_peer) {
+ /* once per mpdu */
+ ol_rx_process_inv_peer(pdev,
+ rx_mpdu_desc,
+ msdu);
+ }
+ while (1) {
+ /* Free the nbuf */
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(msdu);
+ htt_rx_desc_frame_free(htt_pdev, msdu);
+ if (msdu == tail_msdu)
+ break;
+ msdu = next;
+ }
+ }
+ }
+ }
+ /*
+ * Now that a whole batch of MSDUs have been pulled out of HTT
+ * and put into the rx reorder array, it is an appropriate time
+ * to request HTT to provide new rx MSDU buffers for the target
+ * to fill.
+ * This could be done after the end of this function, but it's
+ * better to do it now, rather than waiting until after the driver
+ * and OS finish processing the batch of rx MSDUs.
+ */
+ htt_rx_msdu_buff_replenish(htt_pdev);
+
+ if ((true == rx_ind_release) && peer && vdev) {
+ ol_rx_reorder_release(vdev, peer, tid, seq_num_start,
+ seq_num_end);
+ }
+ OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
+ OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
+
+ if (pdev->rx.flags.defrag_timeout_check)
+ ol_rx_defrag_waitlist_flush(pdev);
+}
+
+void
+ol_rx_sec_ind_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ enum htt_sec_type sec_type,
+ int is_unicast, uint32_t *michael_key, uint32_t *rx_pn)
+{
+ struct ol_txrx_peer_t *peer;
+ int sec_index, i;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (!peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Couldn't find peer from ID %d - skipping security inits\n",
+ peer_id);
+ return;
+ }
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "sec spec for peer %p (%02x:%02x:%02x:%02x:%02x:%02x): "
+ "%s key of type %d\n",
+ peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5],
+ is_unicast ? "ucast" : "mcast", sec_type);
+ sec_index = is_unicast ? txrx_sec_ucast : txrx_sec_mcast;
+ peer->security[sec_index].sec_type = sec_type;
+ /* michael key only valid for TKIP
+ but for simplicity, copy it anyway */
+ cdf_mem_copy(&peer->security[sec_index].michael_key[0],
+ michael_key,
+ sizeof(peer->security[sec_index].michael_key));
+
+ if (sec_type != htt_sec_type_wapi) {
+ cdf_mem_set(peer->tids_last_pn_valid,
+ OL_TXRX_NUM_EXT_TIDS, 0x00);
+ } else if (sec_index == txrx_sec_mcast || peer->tids_last_pn_valid[0]) {
+ for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
+ /*
+ * Setting PN valid bit for WAPI sec_type,
+ * since WAPI PN has to be started with predefined value
+ */
+ peer->tids_last_pn_valid[i] = 1;
+ cdf_mem_copy((uint8_t *) &peer->tids_last_pn[i],
+ (uint8_t *) rx_pn,
+ sizeof(union htt_rx_pn_t));
+ peer->tids_last_pn[i].pn128[1] =
+ cdf_os_cpu_to_le64(
+ peer->tids_last_pn[i].pn128[1]);
+ peer->tids_last_pn[i].pn128[0] =
+ cdf_os_cpu_to_le64(
+ peer->tids_last_pn[i].pn128[0]);
+ }
+ }
+}
+
+#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
+
+#include <cds_ieee80211_common.h>
+
+static void transcap_nwifi_to_8023(cdf_nbuf_t msdu)
+{
+ struct ieee80211_frame *wh;
+ uint32_t hdrsize;
+ struct llc *llchdr;
+ struct ether_header *eth_hdr;
+ uint16_t ether_type = 0;
+ uint8_t a1[IEEE80211_ADDR_LEN];
+ uint8_t a2[IEEE80211_ADDR_LEN];
+ uint8_t a3[IEEE80211_ADDR_LEN];
+ uint8_t fc1;
+
+ wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+ cdf_mem_copy(a1, wh->i_addr1, IEEE80211_ADDR_LEN);
+ cdf_mem_copy(a2, wh->i_addr2, IEEE80211_ADDR_LEN);
+ cdf_mem_copy(a3, wh->i_addr3, IEEE80211_ADDR_LEN);
+ fc1 = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
+ /* Native Wifi header is 80211 non-QoS header */
+ hdrsize = sizeof(struct ieee80211_frame);
+
+ llchdr = (struct llc *)(((uint8_t *) cdf_nbuf_data(msdu)) + hdrsize);
+ ether_type = llchdr->llc_un.type_snap.ether_type;
+
+ /*
+ * Now move the data pointer to the beginning of the mac header :
+ * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
+ */
+ cdf_nbuf_pull_head(msdu,
+ (hdrsize + sizeof(struct llc) -
+ sizeof(struct ether_header)));
+ eth_hdr = (struct ether_header *)(cdf_nbuf_data(msdu));
+ switch (fc1) {
+ case IEEE80211_FC1_DIR_NODS:
+ cdf_mem_copy(eth_hdr->ether_dhost, a1, IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->ether_shost, a2, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ cdf_mem_copy(eth_hdr->ether_dhost, a3, IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->ether_shost, a2, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ cdf_mem_copy(eth_hdr->ether_dhost, a1, IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->ether_shost, a3, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ break;
+ }
+ eth_hdr->ether_type = ether_type;
+}
+#endif
+
+void ol_rx_notify(ol_pdev_handle pdev,
+ uint8_t vdev_id,
+ uint8_t *peer_mac_addr,
+ int tid,
+ uint32_t tsf32,
+ enum ol_rx_notify_type notify_type, cdf_nbuf_t rx_frame)
+{
+ /*
+ * NOTE: This is used in qca_main for AP mode to handle IGMP
+ * packets specially. Umac has a corresponding handler for this
+ * not sure if we need to have this for CLD as well.
+ */
+}
+
+/**
+ * @brief Look into a rx MSDU to see what kind of special handling it requires
+ * @details
+ * This function is called when the host rx SW sees that the target
+ * rx FW has marked a rx MSDU as needing inspection.
+ * Based on the results of the inspection, the host rx SW will infer
+ * what special handling to perform on the rx frame.
+ * Currently, the only type of frames that require special handling
+ * are IGMP frames. The rx data-path SW checks if the frame is IGMP
+ * (it should be, since the target would not have set the inspect flag
+ * otherwise), and then calls the ol_rx_notify function so the
+ * control-path SW can perform multicast group membership learning
+ * by sniffing the IGMP frame.
+ */
+#define SIZEOF_80211_HDR (sizeof(struct ieee80211_frame))
+void
+ol_rx_inspect(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu, void *rx_desc)
+{
+ ol_txrx_pdev_handle pdev = vdev->pdev;
+ uint8_t *data, *l3_hdr;
+ uint16_t ethertype;
+ int offset;
+
+ data = cdf_nbuf_data(msdu);
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ offset = SIZEOF_80211_HDR + LLC_SNAP_HDR_OFFSET_ETHERTYPE;
+ l3_hdr = data + SIZEOF_80211_HDR + LLC_SNAP_HDR_LEN;
+ } else {
+ offset = ETHERNET_ADDR_LEN * 2;
+ l3_hdr = data + ETHERNET_HDR_LEN;
+ }
+ ethertype = (data[offset] << 8) | data[offset + 1];
+ if (ethertype == ETHERTYPE_IPV4) {
+ offset = IPV4_HDR_OFFSET_PROTOCOL;
+ if (l3_hdr[offset] == IP_PROTOCOL_IGMP) {
+ ol_rx_notify(pdev->ctrl_pdev,
+ vdev->vdev_id,
+ peer->mac_addr.raw,
+ tid,
+ htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
+ rx_desc),
+ OL_RX_NOTIFY_IPV4_IGMP, msdu);
+ }
+ }
+}
+
+void
+ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t msg, int msdu_cnt)
+{
+ int vdev_id, peer_id, tid;
+ cdf_nbuf_t head_buf, tail_buf, buf;
+ struct ol_txrx_peer_t *peer;
+ uint8_t fw_desc;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+
+ while (msdu_cnt) {
+ htt_rx_offload_msdu_pop(htt_pdev, msg, &vdev_id, &peer_id,
+ &tid, &fw_desc, &head_buf, &tail_buf);
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer) {
+ ol_rx_data_process(peer, head_buf);
+ } else {
+ buf = head_buf;
+ while (1) {
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(buf);
+ htt_rx_desc_frame_free(htt_pdev, buf);
+ if (buf == tail_buf)
+ break;
+ buf = next;
+ }
+ }
+ msdu_cnt--;
+ }
+ htt_rx_msdu_buff_replenish(htt_pdev);
+}
+
+void
+ol_rx_mic_error_handler(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t tid,
+ u_int16_t peer_id,
+ void *msdu_desc,
+ cdf_nbuf_t msdu)
+{
+ union htt_rx_pn_t pn = {0};
+ u_int8_t key_id = 0;
+
+ struct ol_txrx_peer_t *peer = NULL;
+ struct ol_txrx_vdev_t *vdev = NULL;
+
+ if (pdev) {
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer) {
+ vdev = peer->vdev;
+ if (vdev) {
+ htt_rx_mpdu_desc_pn(vdev->pdev->htt_pdev,
+ msdu_desc, &pn, 48);
+
+ if (htt_rx_msdu_desc_key_id(
+ vdev->pdev->htt_pdev, msdu_desc,
+ &key_id) == true) {
+ ol_rx_err(vdev->pdev->ctrl_pdev,
+ vdev->vdev_id,
+ peer->mac_addr.raw, tid, 0,
+ OL_RX_ERR_TKIP_MIC, msdu,
+ &pn.pn48, key_id);
+ }
+ }
+ }
+ }
+}
+
+/**
+ * @brief Check the first msdu to decide whether the a-msdu should be accepted.
+ */
+bool
+ol_rx_filter(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, cdf_nbuf_t msdu, void *rx_desc)
+{
+#define FILTER_STATUS_REJECT 1
+#define FILTER_STATUS_ACCEPT 0
+ uint8_t *wh;
+ uint32_t offset = 0;
+ uint16_t ether_type = 0;
+ bool is_encrypted = false, is_mcast = false;
+ uint8_t i;
+ enum privacy_filter_packet_type packet_type =
+ PRIVACY_FILTER_PACKET_UNICAST;
+ ol_txrx_pdev_handle pdev = vdev->pdev;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ int sec_idx;
+
+ /*
+ * Safemode must avoid the PrivacyExemptionList and
+ * ExcludeUnencrypted checking
+ */
+ if (vdev->safemode)
+ return FILTER_STATUS_ACCEPT;
+
+ is_mcast = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc);
+ if (vdev->num_filters > 0) {
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ offset = SIZEOF_80211_HDR +
+ LLC_SNAP_HDR_OFFSET_ETHERTYPE;
+ } else {
+ offset = ETHERNET_ADDR_LEN * 2;
+ }
+ /* get header info from msdu */
+ wh = cdf_nbuf_data(msdu);
+
+ /* get ether type */
+ ether_type = (wh[offset] << 8) | wh[offset + 1];
+ /* get packet type */
+ if (true == is_mcast)
+ packet_type = PRIVACY_FILTER_PACKET_MULTICAST;
+ else
+ packet_type = PRIVACY_FILTER_PACKET_UNICAST;
+ }
+ /* get encrypt info */
+ is_encrypted = htt_rx_mpdu_is_encrypted(htt_pdev, rx_desc);
+#ifdef ATH_SUPPORT_WAPI
+ if ((true == is_encrypted) && (ETHERTYPE_WAI == ether_type)) {
+ /* We expect the WAI frames to be always unencrypted when
+ the UMAC gets it.*/
+ return FILTER_STATUS_REJECT;
+ }
+#endif /* ATH_SUPPORT_WAPI */
+
+ for (i = 0; i < vdev->num_filters; i++) {
+ enum privacy_filter filter_type;
+ enum privacy_filter_packet_type filter_packet_type;
+
+ /* skip if the ether type does not match */
+ if (vdev->privacy_filters[i].ether_type != ether_type)
+ continue;
+
+ /* skip if the packet type does not match */
+ filter_packet_type = vdev->privacy_filters[i].packet_type;
+ if (filter_packet_type != packet_type &&
+ filter_packet_type != PRIVACY_FILTER_PACKET_BOTH) {
+ continue;
+ }
+
+ filter_type = vdev->privacy_filters[i].filter_type;
+ if (filter_type == PRIVACY_FILTER_ALWAYS) {
+ /*
+ * In this case, we accept the frame if and only if
+ * it was originally NOT encrypted.
+ */
+ if (true == is_encrypted)
+ return FILTER_STATUS_REJECT;
+ else
+ return FILTER_STATUS_ACCEPT;
+
+ } else if (filter_type == PRIVACY_FILTER_KEY_UNAVAILABLE) {
+ /*
+ * In this case, we reject the frame if it was
+ * originally NOT encrypted but we have the key mapping
+ * key for this frame.
+ */
+ if (!is_encrypted &&
+ !is_mcast &&
+ (peer->security[txrx_sec_ucast].sec_type !=
+ htt_sec_type_none) &&
+ (peer->keyinstalled || !ETHERTYPE_IS_EAPOL_WAPI(
+ ether_type))) {
+ return FILTER_STATUS_REJECT;
+ } else {
+ return FILTER_STATUS_ACCEPT;
+ }
+ } else {
+ /*
+ * The privacy exemption does not apply to this frame.
+ */
+ break;
+ }
+ }
+
+ /*
+ * If the privacy exemption list does not apply to the frame,
+ * check ExcludeUnencrypted.
+ * If ExcludeUnencrypted is not set, or if this was oringially
+ * an encrypted frame, it will be accepted.
+ */
+ if (!vdev->drop_unenc || (true == is_encrypted))
+ return FILTER_STATUS_ACCEPT;
+
+ /*
+ * If this is a open connection, it will be accepted.
+ */
+ sec_idx = (true == is_mcast) ? txrx_sec_mcast : txrx_sec_ucast;
+ if (peer->security[sec_idx].sec_type == htt_sec_type_none)
+ return FILTER_STATUS_ACCEPT;
+
+ if ((false == is_encrypted) && vdev->drop_unenc) {
+ OL_RX_ERR_STATISTICS(pdev, vdev, OL_RX_ERR_PRIVACY,
+ pdev->sec_types[htt_sec_type_none],
+ is_mcast);
+ }
+ return FILTER_STATUS_REJECT;
+}
+
+void
+ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+{
+ ol_txrx_pdev_handle pdev = vdev->pdev;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ cdf_nbuf_t deliver_list_head = NULL;
+ cdf_nbuf_t deliver_list_tail = NULL;
+ cdf_nbuf_t msdu;
+ bool filter = false;
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ struct ol_rx_decap_info_t info;
+ cdf_mem_set(&info, sizeof(info), 0);
+#endif
+
+ msdu = msdu_list;
+ /*
+ * Check each MSDU to see whether it requires special handling,
+ * and free each MSDU's rx descriptor
+ */
+ while (msdu) {
+ void *rx_desc;
+ int discard, inspect, dummy_fwd;
+ cdf_nbuf_t next = cdf_nbuf_next(msdu);
+
+ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ info.is_msdu_cmpl_mpdu =
+ htt_rx_msdu_desc_completes_mpdu(htt_pdev, rx_desc);
+ info.is_first_subfrm =
+ htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc);
+ if (OL_RX_DECAP(vdev, peer, msdu, &info) != A_OK) {
+ discard = 1;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "decap error %p from peer %p "
+ "(%02x:%02x:%02x:%02x:%02x:%02x) len %d\n",
+ msdu, peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5],
+ cdf_nbuf_len(msdu));
+ goto DONE;
+ }
+#endif
+ htt_rx_msdu_actions(pdev->htt_pdev, rx_desc, &discard,
+ &dummy_fwd, &inspect);
+ if (inspect)
+ ol_rx_inspect(vdev, peer, tid, msdu, rx_desc);
+
+ /*
+ * Check the first msdu in the mpdu, if it will be filtered out,
+ * then discard the entire mpdu.
+ */
+ if (htt_rx_msdu_first_msdu_flag(htt_pdev, rx_desc))
+ filter = ol_rx_filter(vdev, peer, msdu, rx_desc);
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+DONE:
+#endif
+ htt_rx_msdu_desc_free(htt_pdev, msdu);
+ if (discard || (true == filter)) {
+ ol_txrx_frms_dump("rx discarding:",
+ pdev, deliver_list_head,
+ ol_txrx_frm_dump_tcp_seq |
+ ol_txrx_frm_dump_contents,
+ 0 /* don't print contents */);
+ cdf_nbuf_free(msdu);
+ /* If discarding packet is last packet of the delivery
+ list, NULL terminator should be added
+ for delivery list. */
+ if (next == NULL && deliver_list_head) {
+ /* add NULL terminator */
+ cdf_nbuf_set_next(deliver_list_tail, NULL);
+ }
+ } else {
+ /*
+ * If this is for OCB,
+ * then prepend the RX stats header.
+ */
+ if (vdev->opmode == wlan_op_mode_ocb) {
+ int i;
+ struct ol_txrx_ocb_chan_info *chan_info = 0;
+ int packet_freq = peer->last_pkt_center_freq;
+ for (i = 0; i < vdev->ocb_channel_count; i++) {
+ if (vdev->ocb_channel_info[i].
+ chan_freq == packet_freq) {
+ chan_info = &vdev->
+ ocb_channel_info[i];
+ break;
+ }
+ }
+ if (!chan_info || !chan_info->
+ disable_rx_stats_hdr) {
+ struct ether_header eth_header = {
+ {0} };
+ struct ocb_rx_stats_hdr_t rx_header = {
+ 0};
+
+ /*
+ * Construct the RX stats header and
+ * push that to the frontof the packet.
+ */
+ rx_header.version = 1;
+ rx_header.length = sizeof(rx_header);
+ rx_header.channel_freq =
+ peer->last_pkt_center_freq;
+ rx_header.rssi_cmb =
+ peer->last_pkt_rssi_cmb;
+ cdf_mem_copy(rx_header.rssi,
+ peer->last_pkt_rssi,
+ sizeof(rx_header.rssi));
+ if (peer->last_pkt_legacy_rate_sel ==
+ 0) {
+ switch (peer->
+ last_pkt_legacy_rate) {
+ case 0x8:
+ rx_header.datarate = 6;
+ break;
+ case 0x9:
+ rx_header.datarate = 4;
+ break;
+ case 0xA:
+ rx_header.datarate = 2;
+ break;
+ case 0xB:
+ rx_header.datarate = 0;
+ break;
+ case 0xC:
+ rx_header.datarate = 7;
+ break;
+ case 0xD:
+ rx_header.datarate = 5;
+ break;
+ case 0xE:
+ rx_header.datarate = 3;
+ break;
+ case 0xF:
+ rx_header.datarate = 1;
+ break;
+ default:
+ rx_header.datarate =
+ 0xFF;
+ break;
+ }
+ } else {
+ rx_header.datarate = 0xFF;
+ }
+
+ rx_header.timestamp_microsec = peer->
+ last_pkt_timestamp_microsec;
+ rx_header.timestamp_submicrosec = peer->
+ last_pkt_timestamp_submicrosec;
+ rx_header.tsf32 = peer->last_pkt_tsf;
+ rx_header.ext_tid = peer->last_pkt_tid;
+
+ cdf_nbuf_push_head(msdu,
+ sizeof(rx_header));
+ cdf_mem_copy(cdf_nbuf_data(msdu),
+ &rx_header, sizeof(rx_header));
+
+ /* Construct the ethernet header with
+ type 0x8152 and push that to the
+ front of the packet to indicate the
+ RX stats header. */
+ eth_header.ether_type = CDF_SWAP_U16(
+ ETHERTYPE_OCB_RX);
+ cdf_nbuf_push_head(msdu,
+ sizeof(eth_header));
+ cdf_mem_copy(cdf_nbuf_data(msdu),
+ &eth_header,
+ sizeof(eth_header));
+ }
+ }
+ OL_RX_PEER_STATS_UPDATE(peer, msdu);
+ OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc,
+ OL_RX_ERR_NONE);
+ TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
+ OL_TXRX_LIST_APPEND(deliver_list_head,
+ deliver_list_tail, msdu);
+ }
+ msdu = next;
+ }
+ /* sanity check - are there any frames left to give to the OS shim? */
+ if (!deliver_list_head)
+ return;
+
+#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
+ if (pdev->host_80211_enable)
+ for (msdu = deliver_list_head; msdu; msdu = cdf_nbuf_next(msdu))
+ transcap_nwifi_to_8023(msdu);
+#endif
+
+ ol_txrx_frms_dump("rx delivering:",
+ pdev, deliver_list_head,
+ ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
+ 0 /* don't print contents */);
+
+ ol_rx_data_process(peer, deliver_list_head);
+}
+
+void
+ol_rx_discard(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+{
+ ol_txrx_pdev_handle pdev = vdev->pdev;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+
+ while (msdu_list) {
+ cdf_nbuf_t msdu = msdu_list;
+
+ msdu_list = cdf_nbuf_next(msdu_list);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "discard rx %p from partly-deleted peer %p "
+ "(%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ msdu, peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+ htt_rx_desc_frame_free(htt_pdev, msdu);
+ }
+}
+
+void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer)
+{
+ uint8_t tid;
+ for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
+ ol_rx_reorder_init(&peer->tids_rx_reorder[tid], tid);
+
+ /* invalid sequence number */
+ peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX;
+ }
+ /*
+ * Set security defaults: no PN check, no security.
+ * The target may send a HTT SEC_IND message to overwrite
+ * these defaults.
+ */
+ peer->security[txrx_sec_ucast].sec_type =
+ peer->security[txrx_sec_mcast].sec_type = htt_sec_type_none;
+ peer->keyinstalled = 0;
+ cdf_atomic_init(&peer->fw_pn_check);
+}
+
+void
+ol_rx_peer_cleanup(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer)
+{
+ peer->keyinstalled = 0;
+ ol_rx_reorder_peer_cleanup(vdev, peer);
+}
+
+/*
+ * Free frames including both rx descriptors and buffers
+ */
+void ol_rx_frames_free(htt_pdev_handle htt_pdev, cdf_nbuf_t frames)
+{
+ cdf_nbuf_t next, frag = frames;
+
+ while (frag) {
+ next = cdf_nbuf_next(frag);
+ htt_rx_desc_frame_free(htt_pdev, frag);
+ frag = next;
+ }
+}
+
+void
+ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_ind_msg,
+ uint16_t peer_id,
+ uint8_t tid, uint8_t is_offload)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ struct ol_txrx_peer_t *peer = NULL;
+ htt_pdev_handle htt_pdev = NULL;
+ int status;
+ cdf_nbuf_t head_msdu, tail_msdu = NULL;
+
+ if (pdev) {
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ htt_pdev = pdev->htt_pdev;
+ } else {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Invalid pdev passed!\n", __func__);
+ cdf_assert_always(pdev);
+ return;
+ }
+
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s %d: rx_ind_msg 0x%p peer_id %d tid %d is_offload %d\n",
+ __func__, __LINE__, rx_ind_msg, peer_id, tid, is_offload);
+#endif
+
+ /*
+ * Get a linked list of the MSDUs in the rx in order indication.
+ * This also attaches each rx MSDU descriptor to the
+ * corresponding rx MSDU network buffer.
+ */
+ status = htt_rx_amsdu_pop(htt_pdev, rx_ind_msg, &head_msdu, &tail_msdu);
+ if (cdf_unlikely(0 == status)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "%s: Pop status is 0, returning here\n", __func__);
+ return;
+ }
+
+ /* Replenish the rx buffer ring first to provide buffers to the target
+ rather than waiting for the indeterminate time taken by the OS
+ to consume the rx frames */
+ htt_rx_msdu_buff_replenish(htt_pdev);
+
+ /* Send the chain of MSDUs to the OS */
+ /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
+ cdf_nbuf_set_next(tail_msdu, NULL);
+
+ /* Pktlog */
+#ifdef WDI_EVENT_ENABLE
+ wdi_event_handler(WDI_EVENT_RX_DESC_REMOTE, pdev, head_msdu);
+#endif
+
+ /* if this is an offload indication, peer id is carried in the
+ rx buffer */
+ if (peer) {
+ vdev = peer->vdev;
+ } else {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
+ "%s: Couldn't find peer from ID 0x%x\n",
+ __func__, peer_id);
+ while (head_msdu) {
+ cdf_nbuf_t msdu = head_msdu;
+ head_msdu = cdf_nbuf_next(head_msdu);
+ htt_rx_desc_frame_free(htt_pdev, msdu);
+ }
+ return;
+ }
+
+ peer->rx_opt_proc(vdev, peer, tid, head_msdu);
+}
+
+/* the msdu_list passed here must be NULL terminated */
+void
+ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu;
+
+ msdu = msdu_list;
+ /*
+ * Currently, this does not check each MSDU to see whether it requires
+ * special handling. MSDUs that need special handling (example: IGMP
+ * frames) should be sent via a seperate HTT message. Also, this does
+ * not do rx->tx forwarding or filtering.
+ */
+
+ while (msdu) {
+ cdf_nbuf_t next = cdf_nbuf_next(msdu);
+
+ OL_RX_PEER_STATS_UPDATE(peer, msdu);
+ OL_RX_ERR_STATISTICS_1(vdev->pdev, vdev, peer, rx_desc,
+ OL_RX_ERR_NONE);
+ TXRX_STATS_MSDU_INCR(vdev->pdev, rx.delivered, msdu);
+
+ msdu = next;
+ }
+
+ ol_txrx_frms_dump("rx delivering:",
+ pdev, deliver_list_head,
+ ol_txrx_frm_dump_tcp_seq | ol_txrx_frm_dump_contents,
+ 0 /* don't print contents */);
+
+ ol_rx_data_process(peer, msdu_list);
+}
+
+void
+ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
+ uint32_t msdu_count,
+ uint32_t *msg_word)
+{
+ int vdev_id, peer_id, tid;
+ cdf_nbuf_t head_buf, tail_buf, buf;
+ struct ol_txrx_peer_t *peer;
+ uint8_t fw_desc;
+ int msdu_iter = 0;
+
+ while (msdu_count) {
+ htt_rx_offload_paddr_msdu_pop_ll(htt_pdev, msg_word, msdu_iter,
+ &vdev_id, &peer_id, &tid,
+ &fw_desc, &head_buf,
+ &tail_buf);
+
+ peer = ol_txrx_peer_find_by_id(htt_pdev->txrx_pdev, peer_id);
+ if (peer) {
+ ol_rx_data_process(peer, head_buf);
+ } else {
+ buf = head_buf;
+ while (1) {
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(buf);
+ htt_rx_desc_frame_free(htt_pdev, buf);
+ if (buf == tail_buf)
+ break;
+ buf = next;
+ }
+ }
+ msdu_iter++;
+ msdu_count--;
+ }
+ htt_rx_msdu_buff_replenish(htt_pdev);
+}
+
+
+#ifdef NEVERDEFINED
+/**
+ * @brief populates vow ext stats in given network buffer.
+ * @param msdu - network buffer handle
+ * @param pdev - handle to htt dev.
+ */
+void ol_ath_add_vow_extstats(htt_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+ /* FIX THIS:
+ * txrx should not be directly using data types (scn)
+ * that are internal to other modules.
+ */
+ struct ol_ath_softc_net80211 *scn =
+ (struct ol_ath_softc_net80211 *)pdev->ctrl_pdev;
+
+ if (scn->vow_extstats == 0) {
+ return;
+ } else {
+ uint8_t *data, *l3_hdr, *bp;
+ uint16_t ethertype;
+ int offset;
+ struct vow_extstats vowstats;
+
+ data = cdf_nbuf_data(msdu);
+
+ offset = ETHERNET_ADDR_LEN * 2;
+ l3_hdr = data + ETHERNET_HDR_LEN;
+ ethertype = (data[offset] << 8) | data[offset + 1];
+ if (ethertype == ETHERTYPE_IPV4) {
+ offset = IPV4_HDR_OFFSET_PROTOCOL;
+ if ((l3_hdr[offset] == IP_PROTOCOL_UDP) &&
+ (l3_hdr[0] == IP_VER4_N_NO_EXTRA_HEADERS)) {
+ bp = data + EXT_HDR_OFFSET;
+
+ if ((data[RTP_HDR_OFFSET] == UDP_PDU_RTP_EXT) &&
+ (bp[0] == 0x12) &&
+ (bp[1] == 0x34) &&
+ (bp[2] == 0x00) && (bp[3] == 0x08)) {
+ /*
+ * Clear UDP checksum so we do not have
+ * to recalculate it
+ * after filling in status fields.
+ */
+ data[UDP_CKSUM_OFFSET] = 0;
+ data[(UDP_CKSUM_OFFSET + 1)] = 0;
+
+ bp += IPERF3_DATA_OFFSET;
+
+ htt_rx_get_vowext_stats(msdu,
+ &vowstats);
+
+ /* control channel RSSI */
+ *bp++ = vowstats.rx_rssi_ctl0;
+ *bp++ = vowstats.rx_rssi_ctl1;
+ *bp++ = vowstats.rx_rssi_ctl2;
+
+ /* rx rate info */
+ *bp++ = vowstats.rx_bw;
+ *bp++ = vowstats.rx_sgi;
+ *bp++ = vowstats.rx_nss;
+
+ *bp++ = vowstats.rx_rssi_comb;
+ /* rsflags */
+ *bp++ = vowstats.rx_rs_flags;
+
+ /* Time stamp Lo */
+ *bp++ = (uint8_t)
+ ((vowstats.
+ rx_macTs & 0x0000ff00) >> 8);
+ *bp++ = (uint8_t)
+ (vowstats.rx_macTs & 0x0000ff);
+ /* rx phy errors */
+ *bp++ = (uint8_t)
+ ((scn->chan_stats.
+ phy_err_cnt >> 8) & 0xff);
+ *bp++ =
+ (uint8_t) (scn->chan_stats.
+ phy_err_cnt & 0xff);
+ /* rx clear count */
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ rx_clear_count >> 24) & 0xff);
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ rx_clear_count >> 16) & 0xff);
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ rx_clear_count >> 8) & 0xff);
+ *bp++ = (uint8_t)
+ (scn->mib_cycle_cnts.
+ rx_clear_count & 0xff);
+ /* rx cycle count */
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ cycle_count >> 24) & 0xff);
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ cycle_count >> 16) & 0xff);
+ *bp++ = (uint8_t)
+ ((scn->mib_cycle_cnts.
+ cycle_count >> 8) & 0xff);
+ *bp++ = (uint8_t)
+ (scn->mib_cycle_cnts.
+ cycle_count & 0xff);
+
+ *bp++ = vowstats.rx_ratecode;
+ *bp++ = vowstats.rx_moreaggr;
+
+ /* sequence number */
+ *bp++ = (uint8_t)
+ ((vowstats.rx_seqno >> 8) &
+ 0xff);
+ *bp++ = (uint8_t)
+ (vowstats.rx_seqno & 0xff);
+ }
+ }
+ }
+ }
+}
+
+#endif
diff --git a/dp/txrx/ol_rx.h b/dp/txrx/ol_rx.h
new file mode 100644
index 000000000000..33d749fccf00
--- /dev/null
+++ b/dp/txrx/ol_rx.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX__H_
+#define _OL_RX__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+
+void
+ol_rx_deliver(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t head_msdu);
+
+void
+ol_rx_discard(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t head_msdu);
+
+void ol_rx_frames_free(htt_pdev_handle htt_pdev, cdf_nbuf_t frames);
+
+void ol_rx_peer_init(struct ol_txrx_pdev_t *pdev, struct ol_txrx_peer_t *peer);
+
+void
+ol_rx_peer_cleanup(struct ol_txrx_vdev_t *vdev, struct ol_txrx_peer_t *peer);
+
+void
+ol_rx_in_order_deliver(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t head_msdu);
+
+void
+ol_rx_offload_paddr_deliver_ind_handler(htt_pdev_handle htt_pdev,
+ uint32_t msdu_count,
+ uint32_t *msg_word);
+
+void
+ol_rx_mic_error_handler(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t tid,
+ u_int16_t peer_id,
+ void *msdu_desc,
+ cdf_nbuf_t msdu);
+
+#endif /* _OL_RX__H_ */
diff --git a/dp/txrx/ol_rx_defrag.c b/dp/txrx/ol_rx_defrag.c
new file mode 100644
index 000000000000..33e829ed5ebf
--- /dev/null
+++ b/dp/txrx/ol_rx_defrag.c
@@ -0,0 +1,1063 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*-
+ * Copyright (c) 2002-2007 Sam Leffler, Errno Consulting
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <ol_htt_api.h>
+#include <ol_txrx_api.h>
+#include <ol_txrx_htt_api.h>
+#include <ol_htt_rx_api.h>
+#include <ol_txrx_types.h>
+#include <ol_rx_reorder.h>
+#include <ol_rx_pn.h>
+#include <ol_rx_fwd.h>
+#include <ol_rx.h>
+#include <ol_txrx_internal.h>
+#include <ol_ctrl_txrx_api.h>
+#include <ol_txrx_peer_find.h>
+#include <cdf_nbuf.h>
+#include <ieee80211.h>
+#include <cdf_util.h>
+#include <athdefs.h>
+#include <cdf_memory.h>
+#include <ol_rx_defrag.h>
+#include <enet.h>
+#include <cdf_time.h> /* cdf_system_time */
+
+#define DEFRAG_IEEE80211_ADDR_EQ(a1, a2) \
+ (cdf_mem_compare(a1, a2, IEEE80211_ADDR_LEN) == 0)
+
+#define DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
+ cdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
+
+#define DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
+ (((wh)->i_fc[0] & \
+ (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
+ (IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
+
+#define DEFRAG_IEEE80211_QOS_GET_TID(_x) \
+ ((_x)->i_qos[0] & IEEE80211_QOS_TID)
+
+const struct ol_rx_defrag_cipher f_ccmp = {
+ "AES-CCM",
+ IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
+ IEEE80211_WEP_MICLEN,
+ 0,
+};
+
+const struct ol_rx_defrag_cipher f_tkip = {
+ "TKIP",
+ IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
+ IEEE80211_WEP_CRCLEN,
+ IEEE80211_WEP_MICLEN,
+};
+
+const struct ol_rx_defrag_cipher f_wep = {
+ "WEP",
+ IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
+ IEEE80211_WEP_CRCLEN,
+ 0,
+};
+
+inline struct ieee80211_frame *ol_rx_frag_get_mac_hdr(
+ htt_pdev_handle htt_pdev,
+ cdf_nbuf_t frag)
+{
+ return
+ (struct ieee80211_frame *) cdf_nbuf_data(frag);
+}
+#define ol_rx_frag_pull_hdr(pdev, frag, hdrsize) \
+ cdf_nbuf_pull_head(frag, hdrsize);
+#define OL_RX_FRAG_CLONE(frag) NULL /* no-op */
+
+static inline void
+ol_rx_frag_desc_adjust(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t msdu,
+ void **rx_desc_old_position,
+ void **ind_old_position, int *rx_desc_len)
+{
+ *rx_desc_old_position = NULL;
+ *ind_old_position = NULL;
+ *rx_desc_len = 0;
+}
+
+/*
+ * Process incoming fragments
+ */
+void
+ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t rx_frag_ind_msg,
+ uint16_t peer_id, uint8_t tid)
+{
+ uint16_t seq_num;
+ int seq_num_start, seq_num_end;
+ struct ol_txrx_peer_t *peer;
+ htt_pdev_handle htt_pdev;
+ cdf_nbuf_t head_msdu, tail_msdu;
+ void *rx_mpdu_desc;
+
+ htt_pdev = pdev->htt_pdev;
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+
+ if (!ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev) &&
+ htt_rx_ind_flush(pdev->htt_pdev, rx_frag_ind_msg) && peer) {
+ htt_rx_frag_ind_flush_seq_num_range(pdev->htt_pdev,
+ rx_frag_ind_msg,
+ &seq_num_start,
+ &seq_num_end);
+ /*
+ * Assuming flush indication for frags sent from target is
+ * separate from normal frames
+ */
+ ol_rx_reorder_flush_frag(htt_pdev, peer, tid, seq_num_start);
+ }
+ if (peer) {
+ htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
+ &tail_msdu);
+ cdf_assert(head_msdu == tail_msdu);
+ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
+ rx_mpdu_desc =
+ htt_rx_mpdu_desc_list_next(htt_pdev, head_msdu);
+ } else {
+ rx_mpdu_desc =
+ htt_rx_mpdu_desc_list_next(htt_pdev,
+ rx_frag_ind_msg);
+ }
+ seq_num = htt_rx_mpdu_desc_seq_num(htt_pdev, rx_mpdu_desc);
+ OL_RX_ERR_STATISTICS_1(pdev, peer->vdev, peer, rx_mpdu_desc,
+ OL_RX_ERR_NONE_FRAG);
+ ol_rx_reorder_store_frag(pdev, peer, tid, seq_num, head_msdu);
+ } else {
+ /* invalid frame - discard it */
+ htt_rx_frag_pop(htt_pdev, rx_frag_ind_msg, &head_msdu,
+ &tail_msdu);
+ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
+ htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu);
+ else
+ htt_rx_mpdu_desc_list_next(htt_pdev, rx_frag_ind_msg);
+
+ htt_rx_desc_frame_free(htt_pdev, head_msdu);
+ }
+ /* request HTT to provide new rx MSDU buffers for the target to fill. */
+ htt_rx_msdu_buff_replenish(htt_pdev);
+}
+
+/*
+ * Flushing fragments
+ */
+void
+ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, int seq_num)
+{
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ int seq;
+
+ seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
+ if (rx_reorder_array_elem->head) {
+ ol_rx_frames_free(htt_pdev, rx_reorder_array_elem->head);
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ }
+}
+
+/*
+ * Reorder and store fragments
+ */
+void
+ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, uint16_t seq_num, cdf_nbuf_t frag)
+{
+ struct ieee80211_frame *fmac_hdr, *mac_hdr;
+ uint8_t fragno, more_frag, all_frag_present = 0;
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ uint16_t frxseq, rxseq, seq;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+
+ seq = seq_num & peer->tids_rx_reorder[tid].win_sz_mask;
+ cdf_assert(seq == 0);
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[seq];
+
+ mac_hdr = (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev, frag);
+ rxseq = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
+ IEEE80211_SEQ_SEQ_SHIFT;
+ fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
+ IEEE80211_SEQ_FRAG_MASK;
+ more_frag = mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
+
+ if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
+ rx_reorder_array_elem->head = frag;
+ rx_reorder_array_elem->tail = frag;
+ cdf_nbuf_set_next(frag, NULL);
+ ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ return;
+ }
+ if (rx_reorder_array_elem->head) {
+ fmac_hdr = (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev,
+ rx_reorder_array_elem->head);
+ frxseq = cdf_le16_to_cpu(*(uint16_t *) fmac_hdr->i_seq) >>
+ IEEE80211_SEQ_SEQ_SHIFT;
+ if (rxseq != frxseq
+ || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr1,
+ fmac_hdr->i_addr1)
+ || !DEFRAG_IEEE80211_ADDR_EQ(mac_hdr->i_addr2,
+ fmac_hdr->i_addr2)) {
+ ol_rx_frames_free(htt_pdev,
+ rx_reorder_array_elem->head);
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_reorder_store: %s mismatch \n",
+ (rxseq == frxseq)
+ ? "address"
+ : "seq number");
+ }
+ }
+
+ ol_rx_fraglist_insert(htt_pdev, &rx_reorder_array_elem->head,
+ &rx_reorder_array_elem->tail, frag,
+ &all_frag_present);
+
+ if (pdev->rx.flags.defrag_timeout_check)
+ ol_rx_defrag_waitlist_remove(peer, tid);
+
+ if (all_frag_present) {
+ ol_rx_defrag(pdev, peer, tid, rx_reorder_array_elem->head);
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ peer->tids_rx_reorder[tid].defrag_timeout_ms = 0;
+ peer->tids_last_seq[tid] = seq_num;
+ } else if (pdev->rx.flags.defrag_timeout_check) {
+ uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
+
+ peer->tids_rx_reorder[tid].defrag_timeout_ms =
+ now_ms + pdev->rx.defrag.timeout_ms;
+ ol_rx_defrag_waitlist_add(peer, tid);
+ }
+}
+
+/*
+ * Insert and store fragments
+ */
+void
+ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t *head_addr,
+ cdf_nbuf_t *tail_addr,
+ cdf_nbuf_t frag, uint8_t *all_frag_present)
+{
+ cdf_nbuf_t next, prev = NULL, cur = *head_addr;
+ struct ieee80211_frame *mac_hdr, *cmac_hdr, *next_hdr, *lmac_hdr;
+ uint8_t fragno, cur_fragno, lfragno, next_fragno;
+ uint8_t last_morefrag = 1, count = 0;
+ cdf_nbuf_t frag_clone;
+
+ cdf_assert(frag);
+ frag_clone = OL_RX_FRAG_CLONE(frag);
+ frag = frag_clone ? frag_clone : frag;
+
+ mac_hdr = (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev, frag);
+ fragno = cdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
+ IEEE80211_SEQ_FRAG_MASK;
+
+ if (!(*head_addr)) {
+ *head_addr = frag;
+ *tail_addr = frag;
+ cdf_nbuf_set_next(*tail_addr, NULL);
+ return;
+ }
+ /* For efficiency, compare with tail first */
+ lmac_hdr = (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev, *tail_addr);
+ lfragno = cdf_le16_to_cpu(*(uint16_t *) lmac_hdr->i_seq) &
+ IEEE80211_SEQ_FRAG_MASK;
+ if (fragno > lfragno) {
+ cdf_nbuf_set_next(*tail_addr, frag);
+ *tail_addr = frag;
+ cdf_nbuf_set_next(*tail_addr, NULL);
+ } else {
+ do {
+ cmac_hdr = (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev, cur);
+ cur_fragno =
+ cdf_le16_to_cpu(*(uint16_t *) cmac_hdr->i_seq) &
+ IEEE80211_SEQ_FRAG_MASK;
+ prev = cur;
+ cur = cdf_nbuf_next(cur);
+ } while (fragno > cur_fragno);
+
+ if (fragno == cur_fragno) {
+ htt_rx_desc_frame_free(htt_pdev, frag);
+ *all_frag_present = 0;
+ return;
+ } else {
+ cdf_nbuf_set_next(prev, frag);
+ cdf_nbuf_set_next(frag, cur);
+ }
+ }
+ next = cdf_nbuf_next(*head_addr);
+ lmac_hdr = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
+ *tail_addr);
+ last_morefrag = lmac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
+ if (!last_morefrag) {
+ do {
+ next_hdr =
+ (struct ieee80211_frame *)
+ ol_rx_frag_get_mac_hdr(htt_pdev, next);
+ next_fragno =
+ cdf_le16_to_cpu(*(uint16_t *) next_hdr->i_seq) &
+ IEEE80211_SEQ_FRAG_MASK;
+ count++;
+ if (next_fragno != count)
+ break;
+
+ next = cdf_nbuf_next(next);
+ } while (next);
+
+ if (!next) {
+ *all_frag_present = 1;
+ return;
+ }
+ }
+ *all_frag_present = 0;
+}
+
+/*
+ * add tid to pending fragment wait list
+ */
+void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+ struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
+
+ TAILQ_INSERT_TAIL(&pdev->rx.defrag.waitlist, rx_reorder,
+ defrag_waitlist_elem);
+}
+
+/*
+ * remove tid from pending fragment wait list
+ */
+void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned tid)
+{
+ struct ol_txrx_pdev_t *pdev = peer->vdev->pdev;
+ struct ol_rx_reorder_t *rx_reorder = &peer->tids_rx_reorder[tid];
+
+ if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
+
+ TAILQ_REMOVE(&pdev->rx.defrag.waitlist, rx_reorder,
+ defrag_waitlist_elem);
+
+ rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
+ rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
+ } else if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_FATAL_ERR,
+ "waitlist->tqe_prv = NULL\n");
+ CDF_ASSERT(0);
+ rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
+ }
+}
+
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ((type *)((char *)(ptr) - (char *)(&((type *)0)->member)))
+#endif
+
+/*
+ * flush stale fragments from the waitlist
+ */
+void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_rx_reorder_t *rx_reorder, *tmp;
+ uint32_t now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
+
+ TAILQ_FOREACH_SAFE(rx_reorder, &pdev->rx.defrag.waitlist,
+ defrag_waitlist_elem, tmp) {
+ struct ol_txrx_peer_t *peer;
+ struct ol_rx_reorder_t *rx_reorder_base;
+ unsigned tid;
+
+ if (rx_reorder->defrag_timeout_ms > now_ms)
+ break;
+
+ tid = rx_reorder->tid;
+ /* get index 0 of the rx_reorder array */
+ rx_reorder_base = rx_reorder - tid;
+ peer =
+ container_of(rx_reorder_base, struct ol_txrx_peer_t,
+ tids_rx_reorder[0]);
+
+ ol_rx_defrag_waitlist_remove(peer, tid);
+ ol_rx_reorder_flush_frag(pdev->htt_pdev, peer, tid,
+ 0 /* frags always stored at seq 0 */);
+ }
+}
+
+/*
+ * Handling security checking and processing fragments
+ */
+void
+ol_rx_defrag(ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ cdf_nbuf_t tmp_next, msdu, prev = NULL, cur = frag_list;
+ uint8_t index, tkip_demic = 0;
+ uint16_t hdr_space;
+ void *rx_desc;
+ struct ieee80211_frame *wh;
+ uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
+
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ vdev = peer->vdev;
+
+ /* bypass defrag for safe mode */
+ if (vdev->safemode) {
+ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev))
+ ol_rx_in_order_deliver(vdev, peer, tid, frag_list);
+ else
+ ol_rx_deliver(vdev, peer, tid, frag_list);
+ return;
+ }
+
+ while (cur) {
+ tmp_next = cdf_nbuf_next(cur);
+ cdf_nbuf_set_next(cur, NULL);
+ if (!ol_rx_pn_check_base(vdev, peer, tid, cur)) {
+ /* PN check failed,discard frags */
+ if (prev) {
+ cdf_nbuf_set_next(prev, NULL);
+ ol_rx_frames_free(htt_pdev, frag_list);
+ }
+ ol_rx_frames_free(htt_pdev, tmp_next);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "ol_rx_defrag: PN Check failed\n");
+ return;
+ }
+ /* remove FCS from each fragment */
+ cdf_nbuf_trim_tail(cur, DEFRAG_IEEE80211_FCS_LEN);
+ prev = cur;
+ cdf_nbuf_set_next(cur, tmp_next);
+ cur = tmp_next;
+ }
+ cur = frag_list;
+ wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, cur);
+ hdr_space = ol_rx_frag_hdrsize(wh);
+ rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev, frag_list);
+ cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(htt_pdev, rx_desc));
+ index = htt_rx_msdu_is_wlan_mcast(htt_pdev, rx_desc) ?
+ txrx_sec_mcast : txrx_sec_ucast;
+
+ switch (peer->security[index].sec_type) {
+ case htt_sec_type_tkip:
+ tkip_demic = 1;
+ /* fall-through to rest of tkip ops */
+ case htt_sec_type_tkip_nomic:
+ while (cur) {
+ tmp_next = cdf_nbuf_next(cur);
+ if (!ol_rx_frag_tkip_decap(pdev, cur, hdr_space)) {
+ /* TKIP decap failed, discard frags */
+ ol_rx_frames_free(htt_pdev, frag_list);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_defrag: TKIP decap failed\n");
+ return;
+ }
+ cur = tmp_next;
+ }
+ break;
+
+ case htt_sec_type_aes_ccmp:
+ while (cur) {
+ tmp_next = cdf_nbuf_next(cur);
+ if (!ol_rx_frag_ccmp_demic(pdev, cur, hdr_space)) {
+ /* CCMP demic failed, discard frags */
+ ol_rx_frames_free(htt_pdev, frag_list);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_defrag: CCMP demic failed\n");
+ return;
+ }
+ if (!ol_rx_frag_ccmp_decap(pdev, cur, hdr_space)) {
+ /* CCMP decap failed, discard frags */
+ ol_rx_frames_free(htt_pdev, frag_list);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_defrag: CCMP decap failed\n");
+ return;
+ }
+ cur = tmp_next;
+ }
+ break;
+
+ case htt_sec_type_wep40:
+ case htt_sec_type_wep104:
+ case htt_sec_type_wep128:
+ while (cur) {
+ tmp_next = cdf_nbuf_next(cur);
+ if (!ol_rx_frag_wep_decap(pdev, cur, hdr_space)) {
+ /* wep decap failed, discard frags */
+ ol_rx_frames_free(htt_pdev, frag_list);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_defrag: wep decap failed\n");
+ return;
+ }
+ cur = tmp_next;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ msdu = ol_rx_defrag_decap_recombine(htt_pdev, frag_list, hdr_space);
+ if (!msdu)
+ return;
+
+ if (tkip_demic) {
+ cdf_mem_copy(key,
+ peer->security[index].michael_key,
+ sizeof(peer->security[index].michael_key));
+ if (!ol_rx_frag_tkip_demic(pdev, key, msdu, hdr_space)) {
+ htt_rx_desc_frame_free(htt_pdev, msdu);
+ ol_rx_err(pdev->ctrl_pdev,
+ vdev->vdev_id, peer->mac_addr.raw, tid, 0,
+ OL_RX_DEFRAG_ERR, msdu, NULL, 0);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "\n ol_rx_defrag: TKIP demic failed\n");
+ return;
+ }
+ }
+ wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev, msdu);
+ if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh))
+ ol_rx_defrag_qos_decap(pdev, msdu, hdr_space);
+ if (ol_cfg_frame_type(pdev->ctrl_pdev) == wlan_frm_fmt_802_3)
+ ol_rx_defrag_nwifi_to_8023(pdev, msdu);
+
+ ol_rx_fwd_check(vdev, peer, tid, msdu);
+}
+
+/*
+ * Handling TKIP processing for defragmentation
+ */
+int
+ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t msdu, uint16_t hdrlen)
+{
+ uint8_t *ivp, *origHdr;
+
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ msdu,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+ /* Header should have extended IV */
+ origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
+
+ ivp = origHdr + hdrlen;
+ if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
+ return OL_RX_DEFRAG_ERR;
+
+ cdf_mem_move(origHdr + f_tkip.ic_header, origHdr, hdrlen);
+ cdf_nbuf_pull_head(msdu, f_tkip.ic_header);
+ cdf_nbuf_trim_tail(msdu, f_tkip.ic_trailer);
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Handling WEP processing for defragmentation
+ */
+int
+ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t hdrlen)
+{
+ uint8_t *origHdr;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ msdu,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+ origHdr = (uint8_t *) (cdf_nbuf_data(msdu) + rx_desc_len);
+ cdf_mem_move(origHdr + f_wep.ic_header, origHdr, hdrlen);
+ cdf_nbuf_pull_head(msdu, f_wep.ic_header);
+ cdf_nbuf_trim_tail(msdu, f_wep.ic_trailer);
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+int
+ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev, const uint8_t *key,
+ cdf_nbuf_t msdu, uint16_t hdrlen)
+{
+ int status;
+ uint32_t pktlen;
+ uint8_t mic[IEEE80211_WEP_MICLEN];
+ uint8_t mic0[IEEE80211_WEP_MICLEN];
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ msdu,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ pktlen = ol_rx_defrag_len(msdu) - rx_desc_len;
+
+ status = ol_rx_defrag_mic(pdev, key, msdu, hdrlen,
+ pktlen - (hdrlen + f_tkip.ic_miclen), mic);
+ if (status != OL_RX_DEFRAG_OK)
+ return OL_RX_DEFRAG_ERR;
+
+ ol_rx_defrag_copydata(msdu, pktlen - f_tkip.ic_miclen + rx_desc_len,
+ f_tkip.ic_miclen, (caddr_t) mic0);
+ if (cdf_mem_compare(mic, mic0, f_tkip.ic_miclen))
+ return OL_RX_DEFRAG_ERR;
+
+ cdf_nbuf_trim_tail(msdu, f_tkip.ic_miclen);
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Handling CCMP processing for defragmentation
+ */
+int
+ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t nbuf, uint16_t hdrlen)
+{
+ uint8_t *ivp, *origHdr;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ nbuf,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ origHdr = (uint8_t *) (cdf_nbuf_data(nbuf) + rx_desc_len);
+ ivp = origHdr + hdrlen;
+ if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
+ return OL_RX_DEFRAG_ERR;
+
+ cdf_mem_move(origHdr + f_ccmp.ic_header, origHdr, hdrlen);
+ cdf_nbuf_pull_head(nbuf, f_ccmp.ic_header);
+
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Verify and strip MIC from the frame.
+ */
+int
+ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t wbuf, uint16_t hdrlen)
+{
+ uint8_t *ivp, *origHdr;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ wbuf,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ origHdr = (uint8_t *) (cdf_nbuf_data(wbuf) + rx_desc_len);
+
+ ivp = origHdr + hdrlen;
+ if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
+ return OL_RX_DEFRAG_ERR;
+
+ cdf_nbuf_trim_tail(wbuf, f_ccmp.ic_trailer);
+
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Craft pseudo header used to calculate the MIC.
+ */
+void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[])
+{
+ const struct ieee80211_frame_addr4 *wh =
+ (const struct ieee80211_frame_addr4 *)wh0;
+
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+ DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+ wh->i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+ DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+ wh->i_addr2);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+ DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+ wh->i_addr3);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+ DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+ wh->i_addr4);
+ break;
+ }
+ /*
+ * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
+ * it could also be set for deauth, disassoc, action, etc. for
+ * a mgt type frame. It comes into picture for MFP.
+ */
+ if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+ const struct ieee80211_qosframe *qwh =
+ (const struct ieee80211_qosframe *)wh;
+ hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
+ } else {
+ hdr[12] = 0;
+ }
+ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
+}
+
+/*
+ * Michael_mic for defragmentation
+ */
+int
+ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
+ const uint8_t *key,
+ cdf_nbuf_t wbuf,
+ uint16_t off, uint16_t data_len, uint8_t mic[])
+{
+ uint8_t hdr[16] = { 0, };
+ uint32_t l, r;
+ const uint8_t *data;
+ uint32_t space;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ wbuf,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ ol_rx_defrag_michdr((struct ieee80211_frame *)(cdf_nbuf_data(wbuf) +
+ rx_desc_len), hdr);
+ l = get_le32(key);
+ r = get_le32(key + 4);
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ l ^= get_le32(hdr);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[4]);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[8]);
+ michael_block(l, r);
+ l ^= get_le32(&hdr[12]);
+ michael_block(l, r);
+
+ /* first buffer has special handling */
+ data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len + off;
+ space = ol_rx_defrag_len(wbuf) - rx_desc_len - off;
+ for (;; ) {
+ if (space > data_len)
+ space = data_len;
+
+ /* collect 32-bit blocks from current buffer */
+ while (space >= sizeof(uint32_t)) {
+ l ^= get_le32(data);
+ michael_block(l, r);
+ data += sizeof(uint32_t);
+ space -= sizeof(uint32_t);
+ data_len -= sizeof(uint32_t);
+ }
+ if (data_len < sizeof(uint32_t))
+ break;
+
+ wbuf = cdf_nbuf_next(wbuf);
+ if (wbuf == NULL)
+ return OL_RX_DEFRAG_ERR;
+
+ rx_desc_len = 0;
+
+ if (space != 0) {
+ const uint8_t *data_next;
+ /*
+ * Block straddles buffers, split references.
+ */
+ data_next =
+ (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
+ if ((ol_rx_defrag_len(wbuf) - rx_desc_len) <
+ sizeof(uint32_t) - space) {
+ return OL_RX_DEFRAG_ERR;
+ }
+ switch (space) {
+ case 1:
+ l ^= get_le32_split(data[0], data_next[0],
+ data_next[1], data_next[2]);
+ data = data_next + 3;
+ space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
+ - 3;
+ break;
+ case 2:
+ l ^= get_le32_split(data[0], data[1],
+ data_next[0], data_next[1]);
+ data = data_next + 2;
+ space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
+ - 2;
+ break;
+ case 3:
+ l ^= get_le32_split(data[0], data[1], data[2],
+ data_next[0]);
+ data = data_next + 1;
+ space = (ol_rx_defrag_len(wbuf) - rx_desc_len)
+ - 1;
+ break;
+ }
+ michael_block(l, r);
+ data_len -= sizeof(uint32_t);
+ } else {
+ /*
+ * Setup for next buffer.
+ */
+ data = (uint8_t *) cdf_nbuf_data(wbuf) + rx_desc_len;
+ space = ol_rx_defrag_len(wbuf) - rx_desc_len;
+ }
+ }
+ /* Last block and padding (0x5a, 4..7 x 0) */
+ switch (data_len) {
+ case 0:
+ l ^= get_le32_split(0x5a, 0, 0, 0);
+ break;
+ case 1:
+ l ^= get_le32_split(data[0], 0x5a, 0, 0);
+ break;
+ case 2:
+ l ^= get_le32_split(data[0], data[1], 0x5a, 0);
+ break;
+ case 3:
+ l ^= get_le32_split(data[0], data[1], data[2], 0x5a);
+ break;
+ }
+ michael_block(l, r);
+ michael_block(l, r);
+ put_le32(mic, l);
+ put_le32(mic + 4, r);
+
+ return OL_RX_DEFRAG_OK;
+}
+
+/*
+ * Calculate headersize
+ */
+uint16_t ol_rx_frag_hdrsize(const void *data)
+{
+ const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
+ uint16_t size = sizeof(struct ieee80211_frame);
+
+ if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
+ size += IEEE80211_ADDR_LEN;
+
+ if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
+ size += sizeof(uint16_t);
+ if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
+ size += sizeof(struct ieee80211_htc);
+ }
+ return size;
+}
+
+/*
+ * Recombine and decap fragments
+ */
+cdf_nbuf_t
+ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t frag_list, uint16_t hdrsize)
+{
+ cdf_nbuf_t tmp;
+ cdf_nbuf_t msdu = frag_list;
+ cdf_nbuf_t rx_nbuf = frag_list;
+ struct ieee80211_frame *wh;
+
+ msdu = cdf_nbuf_next(msdu);
+ cdf_nbuf_set_next(rx_nbuf, NULL);
+ while (msdu) {
+ htt_rx_msdu_desc_free(htt_pdev, msdu);
+ tmp = cdf_nbuf_next(msdu);
+ cdf_nbuf_set_next(msdu, NULL);
+ ol_rx_frag_pull_hdr(htt_pdev, msdu, hdrsize);
+ if (!ol_rx_defrag_concat(rx_nbuf, msdu)) {
+ ol_rx_frames_free(htt_pdev, tmp);
+ htt_rx_desc_frame_free(htt_pdev, rx_nbuf);
+ cdf_nbuf_free(msdu);
+ /* msdu rx desc already freed above */
+ return NULL;
+ }
+ msdu = tmp;
+ }
+ wh = (struct ieee80211_frame *)ol_rx_frag_get_mac_hdr(htt_pdev,
+ rx_nbuf);
+ wh->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
+ *(uint16_t *) wh->i_seq &= ~IEEE80211_SEQ_FRAG_MASK;
+
+ return rx_nbuf;
+}
+
+void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu)
+{
+ struct ieee80211_frame wh;
+ uint32_t hdrsize;
+ struct llc_snap_hdr_t llchdr;
+ struct ethernet_hdr_t *eth_hdr;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+ struct ieee80211_frame *wh_ptr;
+
+ ol_rx_frag_desc_adjust(pdev,
+ msdu,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ wh_ptr = (struct ieee80211_frame *)(cdf_nbuf_data(msdu) + rx_desc_len);
+ cdf_mem_copy(&wh, wh_ptr, sizeof(wh));
+ hdrsize = sizeof(struct ieee80211_frame);
+ cdf_mem_copy(&llchdr, ((uint8_t *) (cdf_nbuf_data(msdu) +
+ rx_desc_len)) + hdrsize,
+ sizeof(struct llc_snap_hdr_t));
+
+ /*
+ * Now move the data pointer to the beginning of the mac header :
+ * new-header = old-hdr + (wifhdrsize + llchdrsize - ethhdrsize)
+ */
+ cdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
+ sizeof(struct llc_snap_hdr_t) -
+ sizeof(struct ethernet_hdr_t)));
+ eth_hdr = (struct ethernet_hdr_t *)(cdf_nbuf_data(msdu));
+ switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->src_addr, wh.i_addr2, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ cdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(eth_hdr->src_addr, wh.i_addr3, IEEE80211_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ break;
+ }
+
+ cdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
+ sizeof(llchdr.ethertype));
+}
+
+/*
+ * Handling QOS for defragmentation
+ */
+void
+ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t nbuf, uint16_t hdrlen)
+{
+ struct ieee80211_frame *wh;
+ uint16_t qoslen;
+ void *rx_desc_old_position = NULL;
+ void *ind_old_position = NULL;
+ int rx_desc_len = 0;
+
+ ol_rx_frag_desc_adjust(pdev,
+ nbuf,
+ &rx_desc_old_position,
+ &ind_old_position, &rx_desc_len);
+
+ wh = (struct ieee80211_frame *)(cdf_nbuf_data(nbuf) + rx_desc_len);
+ if (DEFRAG_IEEE80211_QOS_HAS_SEQ(wh)) {
+ qoslen = sizeof(struct ieee80211_qoscntl);
+ /* Qos frame with Order bit set indicates a HTC frame */
+ if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
+ qoslen += sizeof(struct ieee80211_htc);
+
+ /* remove QoS filed from header */
+ hdrlen -= qoslen;
+ cdf_mem_move((uint8_t *) wh + qoslen, wh, hdrlen);
+ wh = (struct ieee80211_frame *)cdf_nbuf_pull_head(nbuf,
+ rx_desc_len +
+ qoslen);
+ /* clear QoS bit */
+ /*
+ * KW# 6154 'cdf_nbuf_pull_head' in turn calls
+ * __cdf_nbuf_pull_head,
+ * which returns NULL if there is not sufficient data to pull.
+ * It's guaranteed that cdf_nbuf_pull_head will succeed rather
+ * than returning NULL, since the entire rx frame is already
+ * present in the rx buffer.
+ * However, to make it obvious to static analyzers that this
+ * code is safe, add an explicit check that cdf_nbuf_pull_head
+ * returns a non-NULL value.
+ * Since this part of the code is not performance-critical,
+ * adding this explicit check is okay.
+ */
+ if (wh)
+ wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
+
+ }
+}
diff --git a/dp/txrx/ol_rx_defrag.h b/dp/txrx/ol_rx_defrag.h
new file mode 100644
index 000000000000..512fa5dacb2c
--- /dev/null
+++ b/dp/txrx/ol_rx_defrag.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX_DEFRAG_H_
+#define _OL_RX_DEFRAG_H_
+
+#include <cdf_nbuf.h>
+#include <cds_ieee80211_common.h>
+#include <cdf_util.h>
+#include <cdf_types.h>
+#include <cdf_memory.h>
+#include <ol_txrx_internal.h>
+#include <ol_txrx_dbg.h>
+
+#define DEFRAG_IEEE80211_ADDR_LEN 6
+#define DEFRAG_IEEE80211_KEY_LEN 8
+#define DEFRAG_IEEE80211_FCS_LEN 4
+
+struct ol_rx_defrag_cipher {
+ const char *ic_name;
+ uint16_t ic_header;
+ uint8_t ic_trailer;
+ uint8_t ic_miclen;
+};
+
+enum {
+ OL_RX_DEFRAG_ERR,
+ OL_RX_DEFRAG_OK,
+ OL_RX_DEFRAG_PN_ERR
+};
+
+#define ol_rx_defrag_copydata(buf, offset, len, _to) \
+ cdf_nbuf_copy_bits(buf, offset, len, _to)
+
+#define ol_rx_defrag_len(buf) \
+ cdf_nbuf_len(buf)
+
+void
+ol_rx_fraglist_insert(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t *head_addr,
+ cdf_nbuf_t *tail_addr,
+ cdf_nbuf_t frag, uint8_t *all_frag_present);
+
+void ol_rx_defrag_waitlist_add(struct ol_txrx_peer_t *peer, unsigned tid);
+
+void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned tid);
+
+void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev);
+
+void
+ol_rx_defrag(ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t frag_list);
+
+int
+ol_rx_frag_tkip_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t msdu, uint16_t hdrlen);
+
+int
+ol_rx_frag_wep_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t nbuf, uint16_t hdrlen);
+
+void ol_rx_defrag_nwifi_to_8023(ol_txrx_pdev_handle pdev, cdf_nbuf_t msdu);
+
+void
+ol_rx_defrag_qos_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t nbuf, uint16_t hdrlen);
+
+int
+ol_rx_frag_tkip_demic(ol_txrx_pdev_handle pdev,
+ const uint8_t *key, cdf_nbuf_t msdu, uint16_t hdrlen);
+
+int
+ol_rx_frag_ccmp_decap(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t nbuf, uint16_t hdrlen);
+
+int
+ol_rx_frag_ccmp_demic(ol_txrx_pdev_handle pdev,
+ cdf_nbuf_t wbuf, uint16_t hdrlen);
+
+uint16_t ol_rx_frag_hdrsize(const void *data);
+
+void ol_rx_defrag_michdr(const struct ieee80211_frame *wh0, uint8_t hdr[]);
+
+void
+ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, uint16_t seq_num, cdf_nbuf_t frag);
+
+cdf_nbuf_t
+ol_rx_defrag_decap_recombine(htt_pdev_handle htt_pdev,
+ cdf_nbuf_t frag_list, uint16_t hdrsize);
+
+int
+ol_rx_defrag_mic(ol_txrx_pdev_handle pdev,
+ const uint8_t *key,
+ cdf_nbuf_t wbuf,
+ uint16_t off, uint16_t data_len, uint8_t mic[]);
+
+void
+ol_rx_reorder_flush_frag(htt_pdev_handle htt_pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, int seq_num);
+
+static inline void xor_block(uint8_t *b, const uint8_t *a, cdf_size_t len)
+{
+ cdf_size_t i;
+
+ for (i = 0; i < len; i++)
+ b[i] ^= a[i];
+}
+
+static inline uint32_t rotl(uint32_t val, int bits)
+{
+ return (val << bits) | (val >> (32 - bits));
+}
+
+static inline uint32_t rotr(uint32_t val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+static inline uint32_t xswap(uint32_t val)
+{
+ return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
+}
+
+static inline uint32_t
+get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3)
+{
+ return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+}
+
+static inline uint32_t get_le32(const uint8_t *p)
+{
+ return get_le32_split(p[0], p[1], p[2], p[3]);
+}
+
+static inline void put_le32(uint8_t *p, uint32_t v)
+{
+ p[0] = (v) & 0xff;
+ p[1] = (v >> 8) & 0xff;
+ p[2] = (v >> 16) & 0xff;
+ p[3] = (v >> 24) & 0xff;
+}
+
+static inline uint8_t ol_rx_defrag_concat(cdf_nbuf_t dst, cdf_nbuf_t src)
+{
+ /*
+ * Inside cdf_nbuf_cat, if it is necessary to reallocate dst
+ * to provide space for src, the headroom portion is copied from
+ * the original dst buffer to the larger new dst buffer.
+ * (This is needed, because the headroom of the dst buffer
+ * contains the rx desc.)
+ */
+ if (cdf_nbuf_cat(dst, src))
+ return OL_RX_DEFRAG_ERR;
+
+ return OL_RX_DEFRAG_OK;
+}
+
+#define michael_block(l, r) \
+ do { \
+ r ^= rotl(l, 17); \
+ l += r; \
+ r ^= xswap(l); \
+ l += r; \
+ r ^= rotl(l, 3); \
+ l += r; \
+ r ^= rotr(l, 2); \
+ l += r; \
+ } while (0)
+
+#endif
diff --git a/dp/txrx/ol_rx_fwd.c b/dp/txrx/ol_rx_fwd.c
new file mode 100644
index 000000000000..e97f34f05717
--- /dev/null
+++ b/dp/txrx/ol_rx_fwd.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/* standard header files */
+#include <cdf_nbuf.h> /* cdf_nbuf_map */
+#include <cdf_memory.h> /* cdf_mem_compare */
+
+/* external header files */
+#include <ol_cfg.h> /* wlan_op_mode_ap, etc. */
+#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_retrieve */
+#include <cds_ieee80211_common.h> /* ieee80211_frame, etc. */
+
+/* internal header files */
+#include <ol_txrx_types.h> /* ol_txrx_dev_t, etc. */
+#include <ol_rx_fwd.h> /* our own defs */
+#include <ol_rx.h> /* ol_rx_deliver */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_tx.h>
+/*
+ * Porting from Ap11PrepareForwardedPacket.
+ * This routine is called when a RX data frame from an associated station is
+ * to be forwarded to another associated station. We will prepare the
+ * received packet so that it is suitable for transmission again.
+ * Check that this Packet is suitable for forwarding. If yes, then
+ * prepare the new 802.11 header.
+ */
+static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu)
+{
+ struct ieee80211_frame *mac_header;
+ unsigned char tmp_addr[IEEE80211_ADDR_LEN];
+ unsigned char type;
+ unsigned char subtype;
+ unsigned char fromds;
+ unsigned char tods;
+
+ mac_header = (struct ieee80211_frame *)(cdf_nbuf_data(msdu));
+ TXRX_ASSERT1(mac_header);
+
+ type = mac_header->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = mac_header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ tods = mac_header->i_fc[1] & IEEE80211_FC1_DIR_TODS;
+ fromds = mac_header->i_fc[1] & IEEE80211_FC1_DIR_FROMDS;
+
+ /*
+ * Make sure no QOS or any other non-data subtype
+ * Should be a ToDs data frame.
+ * Make sure that this frame is unicast and not for us.
+ * These packets should come up through the normal rx path and
+ * not forwarded.
+ */
+ if (type != IEEE80211_FC0_TYPE_DATA ||
+ subtype != 0x0 ||
+ ((tods != 1) || (fromds != 0)) ||
+ (cdf_mem_compare
+ (mac_header->i_addr3, vdev->mac_addr.raw,
+ IEEE80211_ADDR_LEN) == 0)) {
+#ifdef DEBUG_HOST_RC
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "Exit: %s | Unnecessary to adjust mac header\n",
+ __func__);
+#endif
+ } else {
+ /* Flip the ToDs bit to FromDs */
+ mac_header->i_fc[1] &= 0xfe;
+ mac_header->i_fc[1] |= 0x2;
+
+ /*
+ * Flip the addresses
+ * (ToDs, addr1, RA=BSSID) move to (FrDs, addr2, TA=BSSID)
+ * (ToDs, addr2, SA) move to (FrDs, addr3, SA)
+ * (ToDs, addr3, DA) move to (FrDs, addr1, DA)
+ */
+
+ memcpy(tmp_addr, mac_header->i_addr2, sizeof(tmp_addr));
+
+ memcpy(mac_header->i_addr2,
+ mac_header->i_addr1, sizeof(tmp_addr));
+
+ memcpy(mac_header->i_addr1,
+ mac_header->i_addr3, sizeof(tmp_addr));
+
+ memcpy(mac_header->i_addr3, tmp_addr, sizeof(tmp_addr));
+ }
+}
+
+static inline void ol_rx_fwd_to_tx(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi)
+ ol_ap_fwd_check(vdev, msdu);
+
+ /*
+ * Map the netbuf, so it's accessible to the DMA that
+ * sends it to the target.
+ */
+ cdf_nbuf_map_single(pdev->osdev, msdu, CDF_DMA_TO_DEVICE);
+ cdf_nbuf_set_next(msdu, NULL); /* add NULL terminator */
+
+ msdu = OL_TX_LL(vdev, msdu);
+
+ if (msdu) {
+ /*
+ * The frame was not accepted by the tx.
+ * We could store the frame and try again later,
+ * but the simplest solution is to discard the frames.
+ */
+ cdf_nbuf_unmap_single(pdev->osdev, msdu, CDF_DMA_TO_DEVICE);
+ cdf_nbuf_tx_free(msdu, NBUF_PKT_ERROR);
+ }
+}
+
+void
+ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ cdf_nbuf_t deliver_list_head = NULL;
+ cdf_nbuf_t deliver_list_tail = NULL;
+ cdf_nbuf_t msdu;
+
+ msdu = msdu_list;
+ while (msdu) {
+ struct ol_txrx_vdev_t *tx_vdev;
+ void *rx_desc;
+ /*
+ * Remember the next list elem, because our processing
+ * may cause the MSDU to get linked into a different list.
+ */
+ msdu_list = cdf_nbuf_next(msdu);
+
+ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu);
+
+ if (!vdev->disable_intrabss_fwd &&
+ htt_rx_msdu_forward(pdev->htt_pdev, rx_desc)) {
+ /*
+ * Use the same vdev that received the frame to
+ * transmit the frame.
+ * This is exactly what we want for intra-BSS
+ * forwarding, like STA-to-STA forwarding and
+ * multicast echo.
+ * If this is a intra-BSS forwarding case (which is not
+ * currently supported), then the tx vdev is different
+ * from the rx vdev.
+ * On the LL host the vdevs are not actually used
+ * for tx, so it would still work to use the rx vdev
+ * rather than the tx vdev.
+ * For HL, the tx classification searches for the DA
+ * within the given vdev, so we would want to get the DA
+ * peer ID from the target, so we can locate
+ * the tx vdev.
+ */
+ tx_vdev = vdev;
+ /*
+ * Copying TID value of RX packet to forwarded
+ * packet if the tid is other than non qos tid.
+ * But for non qos tid fill invalid tid so that
+ * Fw will take care of filling proper tid.
+ */
+ if (tid != HTT_NON_QOS_TID) {
+ cdf_nbuf_set_tid(msdu, tid);
+ } else {
+ cdf_nbuf_set_tid(msdu,
+ ADF_NBUF_TX_EXT_TID_INVALID);
+ }
+ /*
+ * This MSDU needs to be forwarded to the tx path.
+ * Check whether it also needs to be sent to the OS
+ * shim, in which case we need to make a copy
+ * (or clone?).
+ */
+ if (htt_rx_msdu_discard(pdev->htt_pdev, rx_desc)) {
+ htt_rx_msdu_desc_free(pdev->htt_pdev, msdu);
+ cdf_net_buf_debug_release_skb(msdu);
+ ol_rx_fwd_to_tx(tx_vdev, msdu);
+ msdu = NULL; /* already handled this MSDU */
+ TXRX_STATS_ADD(pdev,
+ pub.rx.intra_bss_fwd.packets_fwd, 1);
+ } else {
+ cdf_nbuf_t copy;
+ copy = cdf_nbuf_copy(msdu);
+ if (copy)
+ ol_rx_fwd_to_tx(tx_vdev, copy);
+ TXRX_STATS_ADD(pdev,
+ pub.rx.intra_bss_fwd.packets_stack_n_fwd, 1);
+ }
+ } else {
+ TXRX_STATS_ADD(pdev,
+ pub.rx.intra_bss_fwd.packets_stack, 1);
+ }
+ if (msdu) {
+ /* send this frame to the OS */
+ OL_TXRX_LIST_APPEND(deliver_list_head,
+ deliver_list_tail, msdu);
+ }
+ msdu = msdu_list;
+ }
+ if (deliver_list_head) {
+ /* add NULL terminator */
+ cdf_nbuf_set_next(deliver_list_tail, NULL);
+ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
+ ol_rx_in_order_deliver(vdev, peer, tid,
+ deliver_list_head);
+ } else {
+ ol_rx_deliver(vdev, peer, tid, deliver_list_head);
+ }
+ }
+}
diff --git a/dp/txrx/ol_rx_fwd.h b/dp/txrx/ol_rx_fwd.h
new file mode 100644
index 000000000000..fe570c5acb4c
--- /dev/null
+++ b/dp/txrx/ol_rx_fwd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX_FWD_H_
+#define _OL_RX_FWD_H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+
+#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
+
+cdf_nbuf_t
+ol_rx_fwd_mcast_check_sta(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
+
+cdf_nbuf_t
+ol_rx_fwd_mcast_check_ap(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t msdu, void *rx_desc, int is_wlan_mcast);
+
+/**
+ * @brief Check if rx frames should be transmitted over WLAN.
+ * @details
+ * Check if rx frames should be transmitted back over WLAN, instead of
+ * or in addition to delivering the rx frames to the OS.
+ * Rx frames will be forwarded to the transmit path under the following
+ * conditions:
+ * 1. If the destination is a STA associated to the same virtual device
+ * within this physical device, the rx frame will be forwarded to the
+ * tx path rather than being sent to the OS. If the destination is a
+ * STA associated to a different virtual device within this physical
+ * device, then the rx frame will optionally be forwarded to the tx path.
+ * 2. If the frame is received by an AP, but the destination is for another
+ * AP that the current AP is associated with for WDS forwarding, the
+ * intermediate AP will forward the rx frame to the tx path to transmit
+ * to send to the destination AP, rather than sending it to the OS.
+ * 3. If the AP receives a multicast frame, it will retransmit the frame
+ * within the BSS, in addition to sending the frame to the OS.
+ *
+ * @param vdev - which virtual device the frames were addressed to
+ * @param peer - which peer the rx frames belong to
+ * @param tid - which TID within the peer the rx frames belong to
+ * @param msdu_list - NULL-terminated list of MSDUs to perform the rx->tx
+ * forwarding check on
+ */
+void
+ol_rx_fwd_check(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list);
+
+#endif /* _OL_RX_FWD_H_ */
diff --git a/dp/txrx/ol_rx_pn.c b/dp/txrx/ol_rx_pn.c
new file mode 100644
index 000000000000..74ffdda985e9
--- /dev/null
+++ b/dp/txrx/ol_rx_pn.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2011, 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+
+#include <ol_htt_rx_api.h> /* htt_rx_pn_t, etc. */
+#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
+
+#include <ol_txrx_internal.h> /* ol_rx_mpdu_list_next */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+#include <ol_rx_pn.h> /* our own defs */
+#include <ol_rx_fwd.h> /* ol_rx_fwd_check */
+#include <ol_rx.h> /* ol_rx_deliver */
+
+/* add the MSDUs from this MPDU to the list of good frames */
+#define ADD_MPDU_TO_LIST(head, tail, mpdu, mpdu_tail) do { \
+ if (!head) { \
+ head = mpdu; \
+ } else { \
+ cdf_nbuf_set_next(tail, mpdu); \
+ } \
+ tail = mpdu_tail; \
+ } while (0)
+
+int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode)
+{
+ int rc = ((new_pn->pn24 & 0xffffff) <= (old_pn->pn24 & 0xffffff));
+ return rc;
+}
+
+int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode)
+{
+ int rc = ((new_pn->pn48 & 0xffffffffffffULL) <=
+ (old_pn->pn48 & 0xffffffffffffULL));
+ return rc;
+}
+
+int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode)
+{
+ int pn_is_replay = 0;
+
+ if (new_pn->pn128[1] == old_pn->pn128[1])
+ pn_is_replay = (new_pn->pn128[0] <= old_pn->pn128[0]);
+ else
+ pn_is_replay = (new_pn->pn128[1] < old_pn->pn128[1]);
+
+ if (is_unicast) {
+ if (opmode == wlan_op_mode_ap)
+ pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 0);
+ else
+ pn_is_replay |= ((new_pn->pn128[0] & 0x1ULL) != 1);
+ }
+ return pn_is_replay;
+}
+
+cdf_nbuf_t
+ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ union htt_rx_pn_t *last_pn;
+ cdf_nbuf_t out_list_head = NULL;
+ cdf_nbuf_t out_list_tail = NULL;
+ cdf_nbuf_t mpdu;
+ int index; /* unicast vs. multicast */
+ int pn_len;
+ void *rx_desc;
+ int last_pn_valid;
+
+ /* Make sure host pn check is not redundant */
+ if ((cdf_atomic_read(&peer->fw_pn_check)) ||
+ (vdev->opmode == wlan_op_mode_ibss)) {
+ return msdu_list;
+ }
+
+ /* First, check whether the PN check applies */
+ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, msdu_list);
+ cdf_assert(htt_rx_msdu_has_wlan_mcast_flag(pdev->htt_pdev, rx_desc));
+ index = htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc) ?
+ txrx_sec_mcast : txrx_sec_ucast;
+ pn_len = pdev->rx_pn[peer->security[index].sec_type].len;
+ if (pn_len == 0)
+ return msdu_list;
+
+ last_pn_valid = peer->tids_last_pn_valid[tid];
+ last_pn = &peer->tids_last_pn[tid];
+ mpdu = msdu_list;
+ while (mpdu) {
+ cdf_nbuf_t mpdu_tail, next_mpdu;
+ union htt_rx_pn_t new_pn;
+ int pn_is_replay = 0;
+
+ rx_desc = htt_rx_msdu_desc_retrieve(pdev->htt_pdev, mpdu);
+
+ /*
+ * Find the last MSDU within this MPDU, and
+ * the find the first MSDU within the next MPDU.
+ */
+ ol_rx_mpdu_list_next(pdev, mpdu, &mpdu_tail, &next_mpdu);
+
+ /* Don't check the PN replay for non-encrypted frames */
+ if (!htt_rx_mpdu_is_encrypted(pdev->htt_pdev, rx_desc)) {
+ ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu,
+ mpdu_tail);
+ mpdu = next_mpdu;
+ continue;
+ }
+
+ /* retrieve PN from rx descriptor */
+ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &new_pn, pn_len);
+
+ /* if there was no prior PN, there's nothing to check */
+ if (last_pn_valid) {
+ pn_is_replay =
+ pdev->rx_pn[peer->security[index].sec_type].
+ cmp(&new_pn, last_pn, index == txrx_sec_ucast,
+ vdev->opmode);
+ } else {
+ last_pn_valid = peer->tids_last_pn_valid[tid] = 1;
+ }
+
+ if (pn_is_replay) {
+ cdf_nbuf_t msdu;
+ static uint32_t last_pncheck_print_time /* = 0 */;
+ int log_level;
+ uint32_t current_time_ms;
+
+ /*
+ * This MPDU failed the PN check:
+ * 1. notify the control SW of the PN failure
+ * (so countermeasures can be taken, if necessary)
+ * 2. Discard all the MSDUs from this MPDU.
+ */
+ msdu = mpdu;
+ current_time_ms =
+ cdf_system_ticks_to_msecs(cdf_system_ticks());
+ if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
+ (current_time_ms - last_pncheck_print_time)) {
+ last_pncheck_print_time = current_time_ms;
+ log_level = TXRX_PRINT_LEVEL_WARN;
+ } else {
+ log_level = TXRX_PRINT_LEVEL_INFO2;
+ }
+
+ TXRX_PRINT(log_level,
+ "PN check failed - TID %d, peer %p "
+ "(%02x:%02x:%02x:%02x:%02x:%02x) %s\n"
+ " old PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
+ " new PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
+ " new seq num = %d\n",
+ tid, peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5],
+ (index ==
+ txrx_sec_ucast) ? "ucast" : "mcast",
+ last_pn->pn128[1], last_pn->pn128[0],
+ last_pn->pn128[0] & 0xffffffffffffULL,
+ new_pn.pn128[1], new_pn.pn128[0],
+ new_pn.pn128[0] & 0xffffffffffffULL,
+ htt_rx_mpdu_desc_seq_num(pdev->htt_pdev,
+ rx_desc));
+#if defined(ENABLE_RX_PN_TRACE)
+ ol_rx_pn_trace_display(pdev, 1);
+#endif /* ENABLE_RX_PN_TRACE */
+ ol_rx_err(pdev->ctrl_pdev,
+ vdev->vdev_id, peer->mac_addr.raw, tid,
+ htt_rx_mpdu_desc_tsf32(pdev->htt_pdev,
+ rx_desc), OL_RX_ERR_PN,
+ mpdu, NULL, 0);
+ /* free all MSDUs within this MPDU */
+ do {
+ cdf_nbuf_t next_msdu;
+ OL_RX_ERR_STATISTICS_1(pdev, vdev, peer,
+ rx_desc, OL_RX_ERR_PN);
+ next_msdu = cdf_nbuf_next(msdu);
+ htt_rx_desc_frame_free(pdev->htt_pdev, msdu);
+ if (msdu == mpdu_tail)
+ break;
+ else
+ msdu = next_msdu;
+ } while (1);
+ } else {
+ ADD_MPDU_TO_LIST(out_list_head, out_list_tail, mpdu,
+ mpdu_tail);
+ /*
+ * Remember the new PN.
+ * For simplicity, just do 2 64-bit word copies to
+ * cover the worst case (WAPI), regardless of the length
+ * of the PN.
+ * This is more efficient than doing a conditional
+ * branch to copy only the relevant portion.
+ */
+ last_pn->pn128[0] = new_pn.pn128[0];
+ last_pn->pn128[1] = new_pn.pn128[1];
+ OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc);
+ }
+
+ mpdu = next_mpdu;
+ }
+ /* make sure the list is null-terminated */
+ if (out_list_tail)
+ cdf_nbuf_set_next(out_list_tail, NULL);
+
+ return out_list_head;
+}
+
+void
+ol_rx_pn_check(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list)
+{
+ msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list);
+ ol_rx_fwd_check(vdev, peer, tid, msdu_list);
+}
+
+void
+ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list)
+{
+ msdu_list = ol_rx_pn_check_base(vdev, peer, tid, msdu_list);
+ ol_rx_deliver(vdev, peer, tid, msdu_list);
+}
+
+#if defined(ENABLE_RX_PN_TRACE)
+
+A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev)
+{
+ int num_elems;
+
+ num_elems = 1 << TXRX_RX_PN_TRACE_SIZE_LOG2;
+ pdev->rx_pn_trace.idx = 0;
+ pdev->rx_pn_trace.cnt = 0;
+ pdev->rx_pn_trace.mask = num_elems - 1;
+ pdev->rx_pn_trace.data =
+ cdf_mem_malloc(sizeof(*pdev->rx_pn_trace.data) * num_elems);
+ if (!pdev->rx_pn_trace.data)
+ return A_NO_MEMORY;
+ return A_OK;
+}
+
+void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev)
+{
+ cdf_mem_free(pdev->rx_pn_trace.data);
+}
+
+void
+ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer, uint16_t tid, void *rx_desc)
+{
+ uint32_t idx = pdev->rx_pn_trace.idx;
+ union htt_rx_pn_t pn;
+ uint32_t pn32;
+ uint16_t seq_num;
+ uint8_t unicast;
+
+ htt_rx_mpdu_desc_pn(pdev->htt_pdev, rx_desc, &pn, 48);
+ pn32 = pn.pn48 & 0xffffffff;
+ seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_desc);
+ unicast = !htt_rx_msdu_is_wlan_mcast(pdev->htt_pdev, rx_desc);
+
+ pdev->rx_pn_trace.data[idx].peer = peer;
+ pdev->rx_pn_trace.data[idx].tid = tid;
+ pdev->rx_pn_trace.data[idx].seq_num = seq_num;
+ pdev->rx_pn_trace.data[idx].unicast = unicast;
+ pdev->rx_pn_trace.data[idx].pn32 = pn32;
+ pdev->rx_pn_trace.cnt++;
+ idx++;
+ pdev->rx_pn_trace.idx = idx & pdev->rx_pn_trace.mask;
+}
+
+void ol_rx_pn_trace_display(ol_txrx_pdev_handle pdev, int just_once)
+{
+ static int print_count /* = 0 */;
+ uint32_t i, start, end;
+ uint64_t cnt;
+ int elems;
+ int limit = 0; /* move this to the arg list? */
+
+ if (print_count != 0 && just_once)
+ return;
+
+ print_count++;
+
+ end = pdev->rx_pn_trace.idx;
+ if (pdev->rx_pn_trace.cnt <= pdev->rx_pn_trace.mask) {
+ /* trace log has not yet wrapped around - start at the top */
+ start = 0;
+ cnt = 0;
+ } else {
+ start = end;
+ cnt = pdev->rx_pn_trace.cnt - (pdev->rx_pn_trace.mask + 1);
+ }
+ elems = (end - 1 - start) & pdev->rx_pn_trace.mask;
+ if (limit > 0 && elems > limit) {
+ int delta;
+ delta = elems - limit;
+ start += delta;
+ start &= pdev->rx_pn_trace.mask;
+ cnt += delta;
+ }
+
+ i = start;
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " seq PN");
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " count idx peer tid uni num LSBs");
+ do {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " %6lld %4d %p %2d %d %4d %8d",
+ cnt, i,
+ pdev->rx_pn_trace.data[i].peer,
+ pdev->rx_pn_trace.data[i].tid,
+ pdev->rx_pn_trace.data[i].unicast,
+ pdev->rx_pn_trace.data[i].seq_num,
+ pdev->rx_pn_trace.data[i].pn32);
+ cnt++;
+ i++;
+ i &= pdev->rx_pn_trace.mask;
+ } while (i != end);
+}
+#endif /* ENABLE_RX_PN_TRACE */
diff --git a/dp/txrx/ol_rx_pn.h b/dp/txrx/ol_rx_pn.h
new file mode 100644
index 000000000000..845dc9177394
--- /dev/null
+++ b/dp/txrx/ol_rx_pn.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX_PN_H_
+#define _OL_RX_PN_H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+
+#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
+
+int ol_rx_pn_cmp24(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode);
+
+int ol_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode);
+
+int ol_rx_pn_wapi_cmp(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn, int is_unicast, int opmode);
+
+/**
+ * @brief If applicable, check the Packet Number to detect replays.
+ * @details
+ * Determine whether a PN check is needed, and if so, what the PN size is.
+ * (A PN size of 0 is used to indirectly bypass the PN check for security
+ * methods that don't involve a PN check.)
+ * This function produces event notifications for any PN failures, via the
+ * ol_rx_err function.
+ * After the PN check, call the next stage of rx processing (rx --> tx
+ * forwarding check).
+ *
+ * @param vdev - which virtual device the frames were addressed to
+ * @param peer - which peer the rx frames belong to
+ * @param tid - which TID within the peer the rx frames belong to
+ * @param msdu_list - NULL-terminated list of MSDUs to perform PN check on
+ * (if PN check is applicable, i.e. PN length > 0)
+ */
+void
+ol_rx_pn_check(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer, unsigned tid, cdf_nbuf_t msdu_list);
+
+/**
+ * @brief If applicable, check the Packet Number to detect replays.
+ * @details
+ * Determine whether a PN check is needed, and if so, what the PN size is.
+ * (A PN size of 0 is used to indirectly bypass the PN check for security
+ * methods that don't involve a PN check.)
+ * This function produces event notifications for any PN failures, via the
+ * ol_rx_err function.
+ * After the PN check, deliver the valid rx frames to the OS shim.
+ * (Don't perform a rx --> tx forwarding check.)
+ *
+ * @param vdev - which virtual device the frames were addressed to
+ * @param peer - which peer the rx frames belong to
+ * @param tid - which TID within the peer the rx frames belong to
+ * @param msdu_list - NULL-terminated list of MSDUs to perform PN check on
+ * (if PN check is applicable, i.e. PN length > 0)
+ */
+void
+ol_rx_pn_check_only(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list);
+
+/**
+ * @brief If applicable, check the Packet Number to detect replays.
+ * @details
+ * Same as ol_rx_pn_check but return valid rx netbufs
+ * rather than invoking the rx --> tx forwarding check.
+ *
+ * @param vdev - which virtual device the frames were addressed to
+ * @param peer - which peer the rx frames belong to
+ * @param tid - which TID within the peer the rx frames belong to
+ * @param msdu_list - NULL-terminated list of MSDUs to perform PN check on
+ * (if PN check is applicable, i.e. PN length > 0)
+ * @return list of netbufs that didn't fail the PN check
+ */
+cdf_nbuf_t
+ol_rx_pn_check_base(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list);
+
+#endif /* _OL_RX_PN_H_ */
diff --git a/dp/txrx/ol_rx_reorder.c b/dp/txrx/ol_rx_reorder.c
new file mode 100644
index 000000000000..da0d486dddeb
--- /dev/null
+++ b/dp/txrx/ol_rx_reorder.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*=== header file includes ===*/
+/* generic utilities */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_memory.h> /* cdf_mem_malloc */
+
+#include <ieee80211.h> /* IEEE80211_SEQ_MAX */
+
+/* external interfaces */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_handle */
+#include <ol_txrx_htt_api.h> /* ol_rx_addba_handler, etc. */
+#include <ol_ctrl_txrx_api.h> /* ol_ctrl_rx_addba_complete */
+#include <ol_htt_rx_api.h> /* htt_rx_desc_frame_free */
+#include <ol_ctrl_txrx_api.h> /* ol_rx_err */
+
+/* datapath internal interfaces */
+#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_by_id */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT */
+#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_REMOVE, etc. */
+#include <ol_rx_reorder.h>
+#include <ol_rx_defrag.h>
+
+/*=== data types and defines ===*/
+#define OL_RX_REORDER_ROUND_PWR2(value) g_log2ceil[value]
+
+/*=== global variables ===*/
+
+static char g_log2ceil[] = {
+ 1, /* 0 -> 1 */
+ 1, /* 1 -> 1 */
+ 2, /* 2 -> 2 */
+ 4, 4, /* 3-4 -> 4 */
+ 8, 8, 8, 8, /* 5-8 -> 8 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 9-16 -> 16 */
+ 32, 32, 32, 32, 32, 32, 32, 32,
+ 32, 32, 32, 32, 32, 32, 32, 32, /* 17-32 -> 32 */
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64, /* 33-64 -> 64 */
+};
+
+/*=== function definitions ===*/
+
+/*---*/
+
+#define QCA_SUPPORT_RX_REORDER_RELEASE_CHECK 0
+#define OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, idx_start) /* no-op */
+#define OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask) { idx &= win_sz_mask; }
+#define OL_RX_REORDER_IDX_MAX(win_sz, win_sz_mask) win_sz_mask
+#define OL_RX_REORDER_IDX_INIT(seq_num, win_sz, win_sz_mask) 0 /* n/a */
+#define OL_RX_REORDER_NO_HOLES(rx_reorder) 0
+#define OL_RX_REORDER_MPDU_CNT_INCR(rx_reorder, incr) /* n/a */
+#define OL_RX_REORDER_MPDU_CNT_DECR(rx_reorder, decr) /* n/a */
+
+/*---*/
+
+/* reorder array elements are known to be non-NULL */
+#define OL_RX_REORDER_PTR_CHECK(ptr) /* no-op */
+#define OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu, rx_reorder_array_elem) \
+ do { \
+ if (tail_msdu) { \
+ cdf_nbuf_set_next(tail_msdu, \
+ rx_reorder_array_elem->head); \
+ } \
+ } while (0)
+
+/* functions called by txrx components */
+
+void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid)
+{
+ rx_reorder->win_sz = 1;
+ rx_reorder->win_sz_mask = 0;
+ rx_reorder->array = &rx_reorder->base;
+ rx_reorder->base.head = rx_reorder->base.tail = NULL;
+ rx_reorder->tid = tid;
+ rx_reorder->defrag_timeout_ms = 0;
+
+ rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
+ rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
+}
+
+static enum htt_rx_status
+ol_rx_reorder_seq_num_check(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, unsigned seq_num)
+{
+ unsigned seq_num_delta;
+
+ /* don't check the new seq_num against last_seq
+ if last_seq is not valid */
+ if (peer->tids_last_seq[tid] == IEEE80211_SEQ_MAX)
+ return htt_rx_status_ok;
+
+ /*
+ * For duplicate detection, it might be helpful to also check
+ * whether the retry bit is set or not - a strict duplicate packet
+ * should be the one with retry bit set.
+ * However, since many implementations do not set the retry bit,
+ * and since this same function is also used for filtering out
+ * late-arriving frames (frames that arive after their rx reorder
+ * timeout has expired) which are not retries, don't bother checking
+ * the retry bit for now.
+ */
+ /* note: if new seq_num == old seq_num, seq_num_delta = 4095 */
+ seq_num_delta = (seq_num - 1 - peer->tids_last_seq[tid]) &
+ (IEEE80211_SEQ_MAX - 1); /* account for wraparound */
+
+ if (seq_num_delta > (IEEE80211_SEQ_MAX >> 1)) {
+ return htt_rx_status_err_replay;
+ /* or maybe htt_rx_status_err_dup */
+ }
+ return htt_rx_status_ok;
+}
+
+/**
+ * ol_rx_seq_num_check() - Does duplicate detection for mcast packets and
+ * duplicate detection & check for out-of-order
+ * packets for unicast packets.
+ * @pdev: Pointer to pdev maintained by OL
+ * @peer: Pointer to peer structure maintained by OL
+ * @tid: TID value passed as part of HTT msg by f/w
+ * @rx_mpdu_desc: Pointer to Rx Descriptor for the given MPDU
+ *
+ * This function
+ * 1) For Multicast Frames -- does duplicate detection
+ * A frame is considered duplicate & dropped if it has a seq.number
+ * which is received twice in succession and with the retry bit set
+ * in the second case.
+ * A frame which is older than the last sequence number received
+ * is not considered duplicate but out-of-order. This function does
+ * perform out-of-order check for multicast frames, which is in
+ * keeping with the 802.11 2012 spec section 9.3.2.10
+ * 2) For Unicast Frames -- does duplicate detection & out-of-order check
+ * only for non-aggregation tids.
+ *
+ * Return: Returns htt_rx_status_err_replay, if packet needs to be
+ * dropped, htt_rx_status_ok otherwise.
+ */
+enum htt_rx_status
+ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ uint8_t tid,
+ void *rx_mpdu_desc)
+{
+ uint16_t pkt_tid = 0xffff;
+ uint16_t seq_num = IEEE80211_SEQ_MAX;
+ bool retry = 0;
+
+ seq_num = htt_rx_mpdu_desc_seq_num(pdev->htt_pdev, rx_mpdu_desc);
+
+ /* For mcast packets, we only the dup-detection, not re-order check */
+
+ if (cdf_unlikely(OL_RX_MCAST_TID == tid)) {
+
+ pkt_tid = htt_rx_mpdu_desc_tid(pdev->htt_pdev, rx_mpdu_desc);
+
+ /* Invalid packet TID, expected only for HL */
+ /* Pass the packet on */
+ if (cdf_unlikely(pkt_tid >= OL_TXRX_NUM_EXT_TIDS))
+ return htt_rx_status_ok;
+
+ retry = htt_rx_mpdu_desc_retry(pdev->htt_pdev, rx_mpdu_desc);
+
+ /*
+ * At this point, we define frames to be duplicate if they arrive
+ * "ONLY" in succession with the same sequence number and the last
+ * one has the retry bit set. For an older frame, we consider that
+ * as an out of order frame, and hence do not perform the dup-detection
+ * or out-of-order check for multicast frames as per discussions & spec
+ * Hence "seq_num <= last_seq_num" check is not necessary.
+ */
+ if (cdf_unlikely(retry &&
+ (seq_num == peer->tids_mcast_last_seq[pkt_tid]))) {/* drop mcast */
+ TXRX_STATS_INCR(pdev, priv.rx.err.msdu_mc_dup_drop);
+ return htt_rx_status_err_replay;
+ } else {
+ /*
+ * This is a multicast packet likely to be passed on...
+ * Set the mcast last seq number here
+ * This is fairly accurate since:
+ * a) f/w sends multicast as separate PPDU/HTT messages
+ * b) Mcast packets are not aggregated & hence single
+ * c) Result of b) is that, flush / release bit is set always
+ * on the mcast packets, so likely to be immediatedly released.
+ */
+ peer->tids_mcast_last_seq[pkt_tid] = seq_num;
+ return htt_rx_status_ok;
+ }
+ } else
+ return ol_rx_reorder_seq_num_check(pdev, peer, tid, seq_num);
+}
+
+
+void
+ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid,
+ unsigned idx, cdf_nbuf_t head_msdu, cdf_nbuf_t tail_msdu)
+{
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+
+ idx &= peer->tids_rx_reorder[tid].win_sz_mask;
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
+ if (rx_reorder_array_elem->head) {
+ cdf_nbuf_set_next(rx_reorder_array_elem->tail, head_msdu);
+ } else {
+ rx_reorder_array_elem->head = head_msdu;
+ OL_RX_REORDER_MPDU_CNT_INCR(&peer->tids_rx_reorder[tid], 1);
+ }
+ rx_reorder_array_elem->tail = tail_msdu;
+}
+
+void
+ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, unsigned idx_start, unsigned idx_end)
+{
+ unsigned idx;
+ unsigned win_sz, win_sz_mask;
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ cdf_nbuf_t head_msdu;
+ cdf_nbuf_t tail_msdu;
+
+ OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
+ /* may get reset below */
+ peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
+
+ win_sz = peer->tids_rx_reorder[tid].win_sz;
+ win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
+ idx_start &= win_sz_mask;
+ idx_end &= win_sz_mask;
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx_start];
+
+ head_msdu = rx_reorder_array_elem->head;
+ tail_msdu = rx_reorder_array_elem->tail;
+ rx_reorder_array_elem->head = rx_reorder_array_elem->tail = NULL;
+ OL_RX_REORDER_PTR_CHECK(head_msdu) {
+ OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid], 1);
+ }
+
+ idx = (idx_start + 1);
+ OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
+ while (idx != idx_end) {
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
+ OL_RX_REORDER_PTR_CHECK(rx_reorder_array_elem->head) {
+ OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
+ 1);
+ OL_RX_REORDER_LIST_APPEND(head_msdu, tail_msdu,
+ rx_reorder_array_elem);
+ tail_msdu = rx_reorder_array_elem->tail;
+ }
+ rx_reorder_array_elem->head = rx_reorder_array_elem->tail =
+ NULL;
+ idx++;
+ OL_RX_REORDER_IDX_WRAP(idx, win_sz, win_sz_mask);
+ }
+ OL_RX_REORDER_PTR_CHECK(head_msdu) {
+ uint16_t seq_num;
+ htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
+
+ /*
+ * This logic is not quite correct - the last_seq value should
+ * be the sequence number of the final MPDU released rather than
+ * the initial MPDU released.
+ * However, tracking the sequence number of the first MPDU in
+ * the released batch works well enough:
+ * For Peregrine and Rome, the last_seq is checked only for
+ * non-aggregate cases, where only one MPDU at a time is
+ * released.
+ * For Riva, Pronto, and Northstar, the last_seq is checked to
+ * filter out late-arriving rx frames, whose sequence number
+ * will be less than the first MPDU in this release batch.
+ */
+ seq_num = htt_rx_mpdu_desc_seq_num(
+ htt_pdev,
+ htt_rx_msdu_desc_retrieve(htt_pdev,
+ head_msdu));
+ peer->tids_last_seq[tid] = seq_num;
+ /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
+ cdf_nbuf_set_next(tail_msdu, NULL);
+ peer->rx_opt_proc(vdev, peer, tid, head_msdu);
+ }
+ /*
+ * If the rx reorder timeout is handled by host SW rather than the
+ * target's rx reorder logic, then stop the timer here.
+ * (If there are remaining rx holes, then the timer will be restarted.)
+ */
+ OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
+}
+
+void
+ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid,
+ unsigned idx_start,
+ unsigned idx_end, enum htt_rx_flush_action action)
+{
+ struct ol_txrx_pdev_t *pdev;
+ unsigned win_sz;
+ uint8_t win_sz_mask;
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ cdf_nbuf_t head_msdu = NULL;
+ cdf_nbuf_t tail_msdu = NULL;
+
+ pdev = vdev->pdev;
+ win_sz = peer->tids_rx_reorder[tid].win_sz;
+ win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
+
+ OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
+ /* a idx_end value of 0xffff means to flush the entire array */
+ if (idx_end == 0xffff) {
+ idx_end = idx_start;
+ /*
+ * The array is being flushed in entirety because the block
+ * ack window has been shifted to a new position that does not
+ * overlap with the old position. (Or due to reception of a
+ * DELBA.)
+ * Thus, since the block ack window is essentially being reset,
+ * reset the "next release index".
+ */
+ peer->tids_next_rel_idx[tid] =
+ OL_RX_REORDER_IDX_INIT(0 /*n/a */, win_sz, win_sz_mask);
+ } else {
+ peer->tids_next_rel_idx[tid] = (uint16_t) idx_end;
+ }
+
+ idx_start &= win_sz_mask;
+ idx_end &= win_sz_mask;
+
+ do {
+ rx_reorder_array_elem =
+ &peer->tids_rx_reorder[tid].array[idx_start];
+ idx_start = (idx_start + 1);
+ OL_RX_REORDER_IDX_WRAP(idx_start, win_sz, win_sz_mask);
+
+ if (rx_reorder_array_elem->head) {
+ OL_RX_REORDER_MPDU_CNT_DECR(&peer->tids_rx_reorder[tid],
+ 1);
+ if (head_msdu == NULL) {
+ head_msdu = rx_reorder_array_elem->head;
+ tail_msdu = rx_reorder_array_elem->tail;
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ continue;
+ }
+ cdf_nbuf_set_next(tail_msdu,
+ rx_reorder_array_elem->head);
+ tail_msdu = rx_reorder_array_elem->tail;
+ rx_reorder_array_elem->head =
+ rx_reorder_array_elem->tail = NULL;
+ }
+ } while (idx_start != idx_end);
+
+ ol_rx_defrag_waitlist_remove(peer, tid);
+
+ if (head_msdu) {
+ uint16_t seq_num;
+ htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
+
+ seq_num = htt_rx_mpdu_desc_seq_num(
+ htt_pdev,
+ htt_rx_msdu_desc_retrieve(htt_pdev, head_msdu));
+ peer->tids_last_seq[tid] = seq_num;
+ /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
+ cdf_nbuf_set_next(tail_msdu, NULL);
+ if (action == htt_rx_flush_release) {
+ peer->rx_opt_proc(vdev, peer, tid, head_msdu);
+ } else {
+ do {
+ cdf_nbuf_t next;
+ next = cdf_nbuf_next(head_msdu);
+ htt_rx_desc_frame_free(pdev->htt_pdev,
+ head_msdu);
+ head_msdu = next;
+ } while (head_msdu);
+ }
+ }
+ /*
+ * If the rx reorder array is empty, then reset the last_seq value -
+ * it is likely that a BAR or a sequence number shift caused the
+ * sequence number to jump, so the old last_seq value is not relevant.
+ */
+ if (OL_RX_REORDER_NO_HOLES(&peer->tids_rx_reorder[tid]))
+ peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
+
+ OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid);
+}
+
+void
+ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
+ unsigned tid, unsigned *idx_end)
+{
+ unsigned win_sz, win_sz_mask;
+ unsigned idx_start = 0, tmp_idx = 0;
+
+ win_sz = peer->tids_rx_reorder[tid].win_sz;
+ win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
+
+ OL_RX_REORDER_IDX_START_SELF_SELECT(peer, tid, &idx_start);
+ tmp_idx++;
+ OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
+ /* bypass the initial hole */
+ while (tmp_idx != idx_start &&
+ !peer->tids_rx_reorder[tid].array[tmp_idx].head) {
+ tmp_idx++;
+ OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
+ }
+ /* bypass the present frames following the initial hole */
+ while (tmp_idx != idx_start &&
+ peer->tids_rx_reorder[tid].array[tmp_idx].head) {
+ tmp_idx++;
+ OL_RX_REORDER_IDX_WRAP(tmp_idx, win_sz, win_sz_mask);
+ }
+ /*
+ * idx_end is exclusive rather than inclusive.
+ * In other words, it is the index of the first slot of the second
+ * hole, rather than the index of the final present frame following
+ * the first hole.
+ */
+ *idx_end = tmp_idx;
+}
+
+void
+ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int tid;
+ for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
+ ol_rx_reorder_flush(vdev, peer, tid, 0, 0,
+ htt_rx_flush_discard);
+ }
+ OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer);
+}
+
+/* functions called by HTT */
+
+void
+ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ uint8_t win_sz, uint16_t start_seq_num, uint8_t failed)
+{
+ uint8_t round_pwr2_win_sz;
+ unsigned array_size;
+ struct ol_txrx_peer_t *peer;
+ struct ol_rx_reorder_t *rx_reorder;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer == NULL)
+ return;
+
+ if (pdev->cfg.host_addba) {
+ ol_ctrl_rx_addba_complete(pdev->ctrl_pdev,
+ &peer->mac_addr.raw[0], tid, failed);
+ }
+ if (failed)
+ return;
+
+ peer->tids_last_seq[tid] = IEEE80211_SEQ_MAX; /* invalid */
+ rx_reorder = &peer->tids_rx_reorder[tid];
+
+ TXRX_ASSERT2(win_sz <= 64);
+ rx_reorder->win_sz = win_sz;
+ round_pwr2_win_sz = OL_RX_REORDER_ROUND_PWR2(win_sz);
+ array_size =
+ round_pwr2_win_sz * sizeof(struct ol_rx_reorder_array_elem_t);
+ rx_reorder->array = cdf_mem_malloc(array_size);
+ TXRX_ASSERT1(rx_reorder->array);
+ cdf_mem_set(rx_reorder->array, array_size, 0x0);
+
+ rx_reorder->win_sz_mask = round_pwr2_win_sz - 1;
+ rx_reorder->num_mpdus = 0;
+
+ peer->tids_next_rel_idx[tid] =
+ OL_RX_REORDER_IDX_INIT(start_seq_num, rx_reorder->win_sz,
+ rx_reorder->win_sz_mask);
+}
+
+void
+ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
+{
+ struct ol_txrx_peer_t *peer;
+ struct ol_rx_reorder_t *rx_reorder;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer == NULL)
+ return;
+
+ peer->tids_next_rel_idx[tid] = 0xffff; /* invalid value */
+ rx_reorder = &peer->tids_rx_reorder[tid];
+
+ /* check that there really was a block ack agreement */
+ TXRX_ASSERT1(rx_reorder->win_sz_mask != 0);
+ /*
+ * Deallocate the old rx reorder array.
+ * The call to ol_rx_reorder_init below
+ * will reset rx_reorder->array to point to
+ * the single-element statically-allocated reorder array
+ * used for non block-ack cases.
+ */
+ if (rx_reorder->array != &rx_reorder->base) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s, delete reorder array, tid:%d\n", __func__, tid);
+ cdf_mem_free(rx_reorder->array);
+ }
+
+ /* set up the TID with default parameters (ARQ window size = 1) */
+ ol_rx_reorder_init(rx_reorder, tid);
+}
+
+void
+ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ uint16_t idx_start,
+ uint16_t idx_end, enum htt_rx_flush_action action)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ void *rx_desc;
+ struct ol_txrx_peer_t *peer;
+ int idx;
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+ if (peer)
+ vdev = peer->vdev;
+ else
+ return;
+
+ OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev);
+
+ idx = idx_start & peer->tids_rx_reorder[tid].win_sz_mask;
+ rx_reorder_array_elem = &peer->tids_rx_reorder[tid].array[idx];
+ if (rx_reorder_array_elem->head) {
+ rx_desc =
+ htt_rx_msdu_desc_retrieve(htt_pdev,
+ rx_reorder_array_elem->head);
+ if (htt_rx_msdu_is_frag(htt_pdev, rx_desc)) {
+ ol_rx_reorder_flush_frag(htt_pdev, peer, tid,
+ idx_start);
+ /*
+ * Assuming flush message sent seperately for frags
+ * and for normal frames
+ */
+ OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
+ return;
+ }
+ }
+ ol_rx_reorder_flush(vdev, peer, tid, idx_start, idx_end, action);
+ /*
+ * If the rx reorder timeout is handled by host SW, see if there are
+ * remaining rx holes that require the timer to be restarted.
+ */
+ OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid);
+ OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev);
+}
+
+void
+ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t tid,
+ int seq_num_start,
+ int seq_num_end, uint8_t pn_ie_cnt, uint8_t *pn_ie)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ void *rx_desc;
+ struct ol_txrx_peer_t *peer;
+ struct ol_rx_reorder_array_elem_t *rx_reorder_array_elem;
+ unsigned win_sz_mask;
+ cdf_nbuf_t head_msdu = NULL;
+ cdf_nbuf_t tail_msdu = NULL;
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ int seq_num, i = 0;
+
+ peer = ol_txrx_peer_find_by_id(pdev, peer_id);
+
+ if (!peer) {
+ /*
+ * If we can't find a peer send this packet to OCB interface
+ * using OCB self peer
+ */
+ if (!ol_txrx_get_ocb_peer(pdev, &peer))
+ peer = NULL;
+ }
+
+ if (peer)
+ vdev = peer->vdev;
+ else
+ return;
+
+ cdf_atomic_set(&peer->fw_pn_check, 1);
+ /*TODO: Fragmentation case */
+ win_sz_mask = peer->tids_rx_reorder[tid].win_sz_mask;
+ seq_num_start &= win_sz_mask;
+ seq_num_end &= win_sz_mask;
+ seq_num = seq_num_start;
+
+ do {
+ rx_reorder_array_elem =
+ &peer->tids_rx_reorder[tid].array[seq_num];
+
+ if (rx_reorder_array_elem->head) {
+ if (pn_ie_cnt && seq_num == (int)(pn_ie[i])) {
+ cdf_nbuf_t msdu, next_msdu, mpdu_head,
+ mpdu_tail;
+ static uint32_t last_pncheck_print_time;
+ /* Do not need to initialize as C does it */
+
+ int log_level;
+ uint32_t current_time_ms;
+ union htt_rx_pn_t pn = { 0 };
+ int index, pn_len;
+
+ mpdu_head = msdu = rx_reorder_array_elem->head;
+ mpdu_tail = rx_reorder_array_elem->tail;
+
+ pn_ie_cnt--;
+ i++;
+ rx_desc = htt_rx_msdu_desc_retrieve(htt_pdev,
+ msdu);
+ index = htt_rx_msdu_is_wlan_mcast(
+ pdev->htt_pdev, rx_desc)
+ ? txrx_sec_mcast
+ : txrx_sec_ucast;
+ pn_len = pdev->rx_pn[peer->security[index].
+ sec_type].len;
+ htt_rx_mpdu_desc_pn(htt_pdev, rx_desc, &pn,
+ pn_len);
+
+ current_time_ms = cdf_system_ticks_to_msecs(
+ cdf_system_ticks());
+ if (TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS <
+ (current_time_ms -
+ last_pncheck_print_time)) {
+ last_pncheck_print_time =
+ current_time_ms;
+ log_level = TXRX_PRINT_LEVEL_WARN;
+ } else {
+ log_level = TXRX_PRINT_LEVEL_INFO2;
+ }
+ TXRX_PRINT(log_level,
+ "Tgt PN check failed - TID %d, peer %p "
+ "(%02x:%02x:%02x:%02x:%02x:%02x)\n"
+ " PN (u64 x2)= 0x%08llx %08llx (LSBs = %lld)\n"
+ " new seq num = %d\n",
+ tid, peer,
+ peer->mac_addr.raw[0],
+ peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2],
+ peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4],
+ peer->mac_addr.raw[5], pn.pn128[1],
+ pn.pn128[0],
+ pn.pn128[0] & 0xffffffffffffULL,
+ htt_rx_mpdu_desc_seq_num(htt_pdev,
+ rx_desc));
+ ol_rx_err(pdev->ctrl_pdev, vdev->vdev_id,
+ peer->mac_addr.raw, tid,
+ htt_rx_mpdu_desc_tsf32(htt_pdev,
+ rx_desc),
+ OL_RX_ERR_PN, mpdu_head, NULL, 0);
+
+ /* free all MSDUs within this MPDU */
+ do {
+ next_msdu = cdf_nbuf_next(msdu);
+ htt_rx_desc_frame_free(htt_pdev, msdu);
+ if (msdu == mpdu_tail)
+ break;
+ else
+ msdu = next_msdu;
+ } while (1);
+
+ } else {
+ if (head_msdu == NULL) {
+ head_msdu = rx_reorder_array_elem->head;
+ tail_msdu = rx_reorder_array_elem->tail;
+ } else {
+ cdf_nbuf_set_next(
+ tail_msdu,
+ rx_reorder_array_elem->head);
+ tail_msdu = rx_reorder_array_elem->tail;
+ }
+ }
+ rx_reorder_array_elem->head = NULL;
+ rx_reorder_array_elem->tail = NULL;
+ }
+ seq_num = (seq_num + 1) & win_sz_mask;
+ } while (seq_num != seq_num_end);
+
+ if (head_msdu) {
+ /* rx_opt_proc takes a NULL-terminated list of msdu netbufs */
+ cdf_nbuf_set_next(tail_msdu, NULL);
+ peer->rx_opt_proc(vdev, peer, tid, head_msdu);
+ }
+}
+
+#if defined(ENABLE_RX_REORDER_TRACE)
+
+A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev)
+{
+ int num_elems;
+
+ num_elems = 1 << TXRX_RX_REORDER_TRACE_SIZE_LOG2;
+ pdev->rx_reorder_trace.idx = 0;
+ pdev->rx_reorder_trace.cnt = 0;
+ pdev->rx_reorder_trace.mask = num_elems - 1;
+ pdev->rx_reorder_trace.data = cdf_mem_malloc(
+ sizeof(*pdev->rx_reorder_trace.data) * num_elems);
+ if (!pdev->rx_reorder_trace.data)
+ return A_NO_MEMORY;
+
+ while (--num_elems >= 0)
+ pdev->rx_reorder_trace.data[num_elems].seq_num = 0xffff;
+
+ return A_OK;
+}
+
+void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev)
+{
+ cdf_mem_free(pdev->rx_reorder_trace.data);
+}
+
+void
+ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
+ uint8_t tid,
+ uint16_t reorder_idx, uint16_t seq_num, int num_mpdus)
+{
+ uint32_t idx = pdev->rx_reorder_trace.idx;
+
+ pdev->rx_reorder_trace.data[idx].tid = tid;
+ pdev->rx_reorder_trace.data[idx].reorder_idx = reorder_idx;
+ pdev->rx_reorder_trace.data[idx].seq_num = seq_num;
+ pdev->rx_reorder_trace.data[idx].num_mpdus = num_mpdus;
+ pdev->rx_reorder_trace.cnt++;
+ idx++;
+ pdev->rx_reorder_trace.idx = idx & pdev->rx_reorder_trace.mask;
+}
+
+void
+ol_rx_reorder_trace_display(ol_txrx_pdev_handle pdev, int just_once, int limit)
+{
+ static int print_count;
+ uint32_t i, start, end;
+ uint64_t cnt;
+ int elems;
+
+ if (print_count != 0 && just_once)
+ return;
+
+ print_count++;
+
+ end = pdev->rx_reorder_trace.idx;
+ if (pdev->rx_reorder_trace.data[end].seq_num == 0xffff) {
+ /* trace log has not yet wrapped around - start at the top */
+ start = 0;
+ cnt = 0;
+ } else {
+ start = end;
+ cnt = pdev->rx_reorder_trace.cnt -
+ (pdev->rx_reorder_trace.mask + 1);
+ }
+ elems = (end - 1 - start) & pdev->rx_reorder_trace.mask;
+ if (limit > 0 && elems > limit) {
+ int delta;
+ delta = elems - limit;
+ start += delta;
+ start &= pdev->rx_reorder_trace.mask;
+ cnt += delta;
+ }
+
+ i = start;
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " log array seq");
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " count idx tid idx num (LSBs)");
+ do {
+ uint16_t seq_num, reorder_idx;
+ seq_num = pdev->rx_reorder_trace.data[i].seq_num;
+ reorder_idx = pdev->rx_reorder_trace.data[i].reorder_idx;
+ if (seq_num < (1 << 14)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " %6lld %4d %3d %4d %4d (%d)",
+ cnt, i, pdev->rx_reorder_trace.data[i].tid,
+ reorder_idx, seq_num, seq_num & 63);
+ } else {
+ int err = TXRX_SEQ_NUM_ERR(seq_num);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " %6lld %4d err %d (%d MPDUs)",
+ cnt, i, err,
+ pdev->rx_reorder_trace.data[i].num_mpdus);
+ }
+ cnt++;
+ i++;
+ i &= pdev->rx_reorder_trace.mask;
+ } while (i != end);
+}
+
+#endif /* ENABLE_RX_REORDER_TRACE */
diff --git a/dp/txrx/ol_rx_reorder.h b/dp/txrx/ol_rx_reorder.h
new file mode 100644
index 000000000000..7629c6a9b2f7
--- /dev/null
+++ b/dp/txrx/ol_rx_reorder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX_REORDER__H_
+#define _OL_RX_REORDER__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+
+#include <ol_txrx_api.h> /* ol_txrx_peer_t, etc. */
+
+#include <ol_txrx_types.h> /* ol_rx_reorder_t */
+
+void
+ol_rx_reorder_store(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid,
+ unsigned reorder_array_index,
+ cdf_nbuf_t head_msdu, cdf_nbuf_t tail_msdu);
+
+void
+ol_rx_reorder_release(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid,
+ unsigned seq_num_start, unsigned seq_num_end);
+
+void
+ol_rx_reorder_flush(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid,
+ unsigned seq_num_start,
+ unsigned seq_num_end, enum htt_rx_flush_action action);
+
+/**
+ * @brief - find end of first range of present MPDUs after the initial rx hole
+ * @param[in] peer - which sender's data is being checked
+ * @param[in] tid - which type of data is being checked
+ * @param[out] idx_end - the reorder array index holding the last MPDU in the
+ * range of in-order MPDUs that following the initial hole.
+ * Note that this is the index of the last in-order MPDU following the
+ * first hole, rather than the starting index of the second hole.
+ */
+void
+ol_rx_reorder_first_hole(struct ol_txrx_peer_t *peer,
+ unsigned tid, unsigned *idx_end);
+
+void
+ol_rx_reorder_peer_cleanup(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer);
+
+void ol_rx_reorder_init(struct ol_rx_reorder_t *rx_reorder, uint8_t tid);
+
+enum htt_rx_status
+ol_rx_seq_num_check(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ uint8_t tid, void *rx_mpdu_desc);
+
+/*
+ * Peregrine and Rome: do sequence number checking in the host
+ * for peer-TIDs without aggregation enabled
+ */
+
+#define OL_RX_SEQ_NUM_CHECK(pdev, peer, tid, rx_mpdu_desc) \
+ (pdev->rx.flags.dup_check && peer->tids_rx_reorder[tid].win_sz_mask == 0) ? \
+ ol_rx_seq_num_check( \
+ pdev, peer, tid, \
+ rx_mpdu_desc) : \
+ htt_rx_status_ok
+
+#endif /* _OL_RX_REORDER__H_ */
diff --git a/dp/txrx/ol_rx_reorder_timeout.c b/dp/txrx/ol_rx_reorder_timeout.c
new file mode 100644
index 000000000000..cc2e2a32c78f
--- /dev/null
+++ b/dp/txrx/ol_rx_reorder_timeout.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*=== header file includes ===*/
+/* generic utilities */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_softirq_timer.h>
+#include <cdf_time.h>
+
+/* datapath internal interfaces */
+#include <ol_txrx_types.h> /* ol_txrx_pdev_t, etc. */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
+#include <ol_rx_reorder.h> /* ol_rx_reorder_flush, etc. */
+
+#ifdef QCA_SUPPORT_OL_RX_REORDER_TIMEOUT
+
+void ol_rx_reorder_timeout_remove(struct ol_txrx_peer_t *peer, unsigned tid)
+{
+ struct ol_txrx_pdev_t *pdev;
+ struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
+ struct ol_rx_reorder_timeout_list_elem_t *list_elem;
+ int ac;
+
+ pdev = peer->vdev->pdev;
+ ac = TXRX_TID_TO_WMM_AC(tid);
+ rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac];
+ list_elem = &peer->tids_rx_reorder[tid].timeout;
+ if (!list_elem->active) {
+ /* this element has already been removed */
+ return;
+ }
+ list_elem->active = 0;
+ TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list, list_elem,
+ reorder_timeout_list_elem);
+}
+
+static void
+ol_rx_reorder_timeout_start(struct ol_tx_reorder_cat_timeout_t
+ *rx_reorder_timeout_ac, uint32_t time_now_ms)
+{
+ uint32_t duration_ms;
+ struct ol_rx_reorder_timeout_list_elem_t *list_elem;
+
+ list_elem = TAILQ_FIRST(&rx_reorder_timeout_ac->virtual_timer_list);
+
+ duration_ms = list_elem->timestamp_ms - time_now_ms;
+ cdf_softirq_timer_start(&rx_reorder_timeout_ac->timer, duration_ms);
+}
+
+static inline void
+ol_rx_reorder_timeout_add(struct ol_txrx_peer_t *peer, uint8_t tid)
+{
+ uint32_t time_now_ms;
+ struct ol_txrx_pdev_t *pdev;
+ struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
+ struct ol_rx_reorder_timeout_list_elem_t *list_elem;
+ int ac;
+ int start;
+
+ pdev = peer->vdev->pdev;
+ ac = TXRX_TID_TO_WMM_AC(tid);
+ rx_reorder_timeout_ac = &pdev->rx.reorder_timeout.access_cats[ac];
+ list_elem = &peer->tids_rx_reorder[tid].timeout;
+
+ list_elem->active = 1;
+ list_elem->peer = peer;
+ list_elem->tid = tid;
+
+ /* set the expiration timestamp */
+ time_now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
+ list_elem->timestamp_ms =
+ time_now_ms + rx_reorder_timeout_ac->duration_ms;
+
+ /* add to the queue */
+ start = TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list);
+ TAILQ_INSERT_TAIL(&rx_reorder_timeout_ac->virtual_timer_list,
+ list_elem, reorder_timeout_list_elem);
+ if (start)
+ ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms);
+}
+
+void ol_rx_reorder_timeout_update(struct ol_txrx_peer_t *peer, uint8_t tid)
+{
+ if (!peer)
+ return;
+
+ /*
+ * If there are no holes, i.e. no queued frames,
+ * then timeout doesn't apply.
+ */
+ if (peer->tids_rx_reorder[tid].num_mpdus == 0)
+ return;
+
+ /*
+ * If the virtual timer for this peer-TID is already running,
+ * then leave it.
+ */
+ if (peer->tids_rx_reorder[tid].timeout.active)
+ return;
+
+ ol_rx_reorder_timeout_add(peer, tid);
+}
+
+static void ol_rx_reorder_timeout(void *arg)
+{
+ struct ol_txrx_pdev_t *pdev;
+ struct ol_rx_reorder_timeout_list_elem_t *list_elem, *tmp;
+ uint32_t time_now_ms;
+ struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
+
+ rx_reorder_timeout_ac = (struct ol_tx_reorder_cat_timeout_t *)arg;
+ time_now_ms = cdf_system_ticks_to_msecs(cdf_system_ticks());
+
+ pdev = rx_reorder_timeout_ac->pdev;
+ cdf_spin_lock(&pdev->rx.mutex);
+/* TODO: conditionally take mutex lock during regular rx */
+ TAILQ_FOREACH_SAFE(list_elem,
+ &rx_reorder_timeout_ac->virtual_timer_list,
+ reorder_timeout_list_elem, tmp) {
+ unsigned idx_start, idx_end;
+ struct ol_txrx_peer_t *peer;
+
+ if (list_elem->timestamp_ms > time_now_ms)
+ break; /* time has not expired yet for this element */
+
+ list_elem->active = 0;
+ /* remove the expired element from the list */
+ TAILQ_REMOVE(&rx_reorder_timeout_ac->virtual_timer_list,
+ list_elem, reorder_timeout_list_elem);
+
+ peer = list_elem->peer;
+
+ idx_start = 0xffff; /* start from next_rel_idx */
+ ol_rx_reorder_first_hole(peer, list_elem->tid, &idx_end);
+ ol_rx_reorder_flush(peer->vdev,
+ peer,
+ list_elem->tid,
+ idx_start, idx_end, htt_rx_flush_release);
+ }
+ /* restart the timer if unexpired elements are left in the list */
+ if (!TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list))
+ ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms);
+
+ cdf_spin_unlock(&pdev->rx.mutex);
+}
+
+void ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t *pdev)
+{
+ int i;
+
+ for (i = 0; i < CDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats);
+ i++) {
+ struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
+ rx_reorder_timeout_ac =
+ &pdev->rx.reorder_timeout.access_cats[i];
+ /* init the per-AC timers */
+ cdf_softirq_timer_init(pdev->osdev,
+ &rx_reorder_timeout_ac->timer,
+ ol_rx_reorder_timeout,
+ rx_reorder_timeout_ac);
+ /* init the virtual timer list */
+ TAILQ_INIT(&rx_reorder_timeout_ac->virtual_timer_list);
+ rx_reorder_timeout_ac->pdev = pdev;
+ }
+ pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VO].duration_ms = 40;
+ pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_VI].duration_ms = 100;
+ pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BE].duration_ms = 100;
+ pdev->rx.reorder_timeout.access_cats[TXRX_WMM_AC_BK].duration_ms = 100;
+}
+
+void ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t *peer)
+{
+ int tid;
+
+ for (tid = 0; tid < OL_TXRX_NUM_EXT_TIDS; tid++) {
+ if (peer->tids_rx_reorder[tid].timeout.active)
+ ol_rx_reorder_timeout_remove(peer, tid);
+ }
+}
+
+void ol_rx_reorder_timeout_cleanup(struct ol_txrx_pdev_t *pdev)
+{
+ int i;
+
+ for (i = 0; i < CDF_ARRAY_SIZE(pdev->rx.reorder_timeout.access_cats);
+ i++) {
+ struct ol_tx_reorder_cat_timeout_t *rx_reorder_timeout_ac;
+ rx_reorder_timeout_ac =
+ &pdev->rx.reorder_timeout.access_cats[i];
+ cdf_softirq_timer_cancel(&rx_reorder_timeout_ac->timer);
+ cdf_softirq_timer_free(&rx_reorder_timeout_ac->timer);
+ }
+}
+
+#endif /* QCA_SUPPORT_OL_RX_REORDER_TIMEOUT */
diff --git a/dp/txrx/ol_rx_reorder_timeout.h b/dp/txrx/ol_rx_reorder_timeout.h
new file mode 100644
index 000000000000..1f43871eeb64
--- /dev/null
+++ b/dp/txrx/ol_rx_reorder_timeout.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_RX_REORDER_TIMEOUT__H_
+#define _OL_RX_REORDER_TIMEOUT__H_
+
+#include <ol_txrx_types.h> /* ol_txrx_pdev_t, etc. */
+
+#ifdef QCA_SUPPORT_OL_RX_REORDER_TIMEOUT
+
+void ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t *pdev);
+void ol_rx_reorder_timeout_cleanup(struct ol_txrx_pdev_t *pdev);
+void ol_rx_reorder_timeout_remove(struct ol_txrx_peer_t *peer, unsigned tid);
+void ol_rx_reorder_timeout_update(struct ol_txrx_peer_t *peer, uint8_t tid);
+void ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t *peer);
+
+#define OL_RX_REORDER_TIMEOUT_INIT ol_rx_reorder_timeout_init
+#define OL_RX_REORDER_TIMEOUT_PEER_CLEANUP ol_rx_reorder_timeout_peer_cleanup
+#define OL_RX_REORDER_TIMEOUT_CLEANUP ol_rx_reorder_timeout_cleanup
+#define OL_RX_REORDER_TIMEOUT_REMOVE ol_rx_reorder_timeout_remove
+#define OL_RX_REORDER_TIMEOUT_UPDATE ol_rx_reorder_timeout_update
+#define OL_RX_REORDER_TIMEOUT_PEER_TID_INIT(peer, tid) \
+ (peer)->tids_rx_reorder[(tid)].timeout.active = 0
+#define OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev) \
+ cdf_spin_lock(&(pdev)->rx.mutex)
+#define OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev) \
+ cdf_spin_unlock(&(pdev)->rx.mutex)
+
+#else
+
+#define OL_RX_REORDER_TIMEOUT_INIT(pdev) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_PEER_CLEANUP(peer) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_CLEANUP(pdev) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_REMOVE(peer, tid) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_UPDATE(peer, tid) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_PEER_TID_INIT(peer, tid) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev) /* no-op */
+#define OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev) /* no-op */
+
+#endif /* QCA_SUPPORT_OL_RX_REORDER_TIMEOUT */
+
+#endif /* _OL_RX_REORDER_TIMEOUT__H_ */
diff --git a/dp/txrx/ol_tx.c b/dp/txrx/ol_tx.c
new file mode 100644
index 000000000000..0f718389d702
--- /dev/null
+++ b/dp/txrx/ol_tx.c
@@ -0,0 +1,1368 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/* OS abstraction libraries */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
+#include <cdf_util.h> /* cdf_unlikely */
+
+/* APIs for other modules */
+#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+
+/* internal header files relevant for all systems */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h> /* pdev stats */
+#include <ol_tx_desc.h> /* ol_tx_desc */
+#include <ol_tx_send.h> /* ol_tx_send */
+#include <ol_txrx.h>
+
+/* internal header files relevant only for HL systems */
+#include <ol_tx_queue.h> /* ol_tx_enqueue */
+
+/* internal header files relevant only for specific systems (Pronto) */
+#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
+#include <ol_tx.h>
+
+#ifdef WLAN_FEATURE_FASTPATH
+#include <hif.h> /* HIF_DEVICE */
+#include <htc_api.h> /* Layering violation, but required for fast path */
+#include <htt_internal.h>
+#include <htt_types.h> /* htc_endpoint */
+
+int ce_send_fast(struct CE_handle *copyeng, cdf_nbuf_t *msdus,
+ unsigned int num_msdus, unsigned int transfer_id);
+#endif /* WLAN_FEATURE_FASTPATH */
+
+/*
+ * The TXRX module doesn't accept tx frames unless the target has
+ * enough descriptors for them.
+ * For LL, the TXRX descriptor pool is sized to match the target's
+ * descriptor pool. Hence, if the descriptor allocation in TXRX
+ * succeeds, that guarantees that the target has room to accept
+ * the new tx frame.
+ */
+#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
+ do { \
+ struct ol_txrx_pdev_t *pdev = vdev->pdev; \
+ (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
+ tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
+ if (cdf_unlikely(!tx_desc)) { \
+ TXRX_STATS_MSDU_LIST_INCR( \
+ pdev, tx.dropped.host_reject, msdu); \
+ return msdu; /* the list of unaccepted MSDUs */ \
+ } \
+ } while (0)
+
+#define ol_tx_prepare_tso(vdev, msdu, msdu_info) \
+ do { \
+ msdu_info.tso_info.curr_seg = NULL; \
+ if (cdf_nbuf_is_tso(msdu)) { \
+ int num_seg = cdf_nbuf_get_tso_num_seg(msdu); \
+ msdu_info.tso_info.tso_seg_list = NULL; \
+ msdu_info.tso_info.num_segs = num_seg; \
+ while (num_seg) { \
+ struct cdf_tso_seg_elem_t *tso_seg = \
+ ol_tso_alloc_segment(vdev->pdev); \
+ if (tso_seg) { \
+ tso_seg->next = \
+ msdu_info.tso_info.tso_seg_list; \
+ msdu_info.tso_info.tso_seg_list \
+ = tso_seg; \
+ num_seg--; \
+ } else {\
+ cdf_print("TSO seg alloc failed!\n"); \
+ } \
+ } \
+ cdf_nbuf_get_tso_info(vdev->pdev->osdev, \
+ msdu, &msdu_info.tso_info); \
+ msdu_info.tso_info.curr_seg = \
+ msdu_info.tso_info.tso_seg_list; \
+ num_seg = msdu_info.tso_info.num_segs; \
+ } else { \
+ msdu_info.tso_info.is_tso = 0; \
+ msdu_info.tso_info.num_segs = 1; \
+ } \
+ } while (0)
+
+/**
+ * ol_tx_send_data_frame() - send data frame
+ * @sta_id: sta id
+ * @skb: skb
+ * @proto_type: proto type
+ *
+ * Return: skb/NULL for success
+ */
+cdf_nbuf_t ol_tx_send_data_frame(uint8_t sta_id, cdf_nbuf_t skb,
+ uint8_t proto_type)
+{
+ void *cdf_ctx = cds_get_context(CDF_MODULE_ID_CDF_DEVICE);
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_txrx_peer_t *peer;
+ cdf_nbuf_t ret;
+ CDF_STATUS status;
+
+ if (cdf_unlikely(!pdev)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s:pdev is null", __func__);
+ return skb;
+ }
+ if (cdf_unlikely(!cdf_ctx)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s:cdf_ctx is null", __func__);
+ return skb;
+ }
+
+ if (sta_id >= WLAN_MAX_STA_COUNT) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s:Invalid sta id", __func__);
+ return skb;
+ }
+
+ peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+ if (!peer) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s:Invalid peer", __func__);
+ return skb;
+ }
+
+ if (peer->state < ol_txrx_peer_state_conn) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s: station to be yet registered..dropping pkt", __func__);
+ return skb;
+ }
+
+ status = cdf_nbuf_map_single(cdf_ctx, skb, CDF_DMA_TO_DEVICE);
+ if (cdf_unlikely(status != CDF_STATUS_SUCCESS)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s: nbuf map failed", __func__);
+ return skb;
+ }
+
+ cdf_nbuf_trace_set_proto_type(skb, proto_type);
+
+ if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
+ && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
+ && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
+ cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
+
+ /* Terminate the (single-element) list of tx frames */
+ cdf_nbuf_set_next(skb, NULL);
+ ret = OL_TX_LL(peer->vdev, skb);
+ if (ret) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_WARN,
+ "%s: Failed to tx", __func__);
+ cdf_nbuf_unmap_single(cdf_ctx, ret, CDF_DMA_TO_DEVICE);
+ return ret;
+ }
+
+ return NULL;
+}
+
+#ifdef IPA_OFFLOAD
+/**
+ * ol_tx_send_ipa_data_frame() - send IPA data frame
+ * @vdev: vdev
+ * @skb: skb
+ *
+ * Return: skb/ NULL is for success
+ */
+cdf_nbuf_t ol_tx_send_ipa_data_frame(void *vdev,
+ cdf_nbuf_t skb)
+{
+ ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ cdf_nbuf_t ret;
+
+ if (cdf_unlikely(!pdev)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pdev is NULL", __func__);
+ return skb;
+ }
+
+ if ((ol_cfg_is_ip_tcp_udp_checksum_offload_enabled(pdev->ctrl_pdev))
+ && (cdf_nbuf_get_protocol(skb) == htons(ETH_P_IP))
+ && (cdf_nbuf_get_ip_summed(skb) == CHECKSUM_PARTIAL))
+ cdf_nbuf_set_ip_summed(skb, CHECKSUM_COMPLETE);
+
+ /* Terminate the (single-element) list of tx frames */
+ cdf_nbuf_set_next(skb, NULL);
+ ret = OL_TX_LL((struct ol_txrx_vdev_t *)vdev, skb);
+ if (ret) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "%s: Failed to tx", __func__);
+ return ret;
+ }
+
+ return NULL;
+}
+#endif
+
+
+#if defined(FEATURE_TSO)
+cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu = msdu_list;
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.action.tx_comp_req = 0;
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ cdf_nbuf_t next;
+ struct ol_tx_desc_t *tx_desc;
+ int segments = 1;
+
+ msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.peer = NULL;
+
+ ol_tx_prepare_tso(vdev, msdu, msdu_info);
+ segments = msdu_info.tso_info.num_segs;
+
+ /*
+ * The netbuf may get linked into a different list inside the
+ * ol_tx_send function, so store the next pointer before the
+ * tx_send call.
+ */
+ next = cdf_nbuf_next(msdu);
+ /* init the current segment to the 1st segment in the list */
+ while (segments) {
+
+ if (msdu_info.tso_info.curr_seg)
+ NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
+ curr_seg->seg.tso_frags[0].paddr_low_32;
+
+ segments--;
+
+ /**
+ * if this is a jumbo nbuf, then increment the number
+ * of nbuf users for each additional segment of the msdu.
+ * This will ensure that the skb is freed only after
+ * receiving tx completion for all segments of an nbuf
+ */
+ if (segments)
+ cdf_nbuf_inc_users(msdu);
+
+ ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
+
+ /*
+ * If debug display is enabled, show the meta-data being
+ * downloaded to the target via the HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+
+ ol_tx_send(vdev->pdev, tx_desc, msdu);
+
+ if (msdu_info.tso_info.curr_seg) {
+ msdu_info.tso_info.curr_seg =
+ msdu_info.tso_info.curr_seg->next;
+ }
+
+ cdf_nbuf_dec_num_frags(msdu);
+
+ if (msdu_info.tso_info.is_tso) {
+ TXRX_STATS_TSO_INC_SEG(vdev->pdev);
+ TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
+ }
+ } /* while segments */
+
+ msdu = next;
+ if (msdu_info.tso_info.is_tso) {
+ TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
+ TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
+ }
+ } /* while msdus */
+ return NULL; /* all MSDUs were accepted */
+}
+#else /* TSO */
+
+cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu = msdu_list;
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.action.tx_comp_req = 0;
+ msdu_info.tso_info.is_tso = 0;
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ cdf_nbuf_t next;
+ struct ol_tx_desc_t *tx_desc;
+
+ msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.peer = NULL;
+ ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
+
+ /*
+ * If debug display is enabled, show the meta-data being
+ * downloaded to the target via the HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+ /*
+ * The netbuf may get linked into a different list inside the
+ * ol_tx_send function, so store the next pointer before the
+ * tx_send call.
+ */
+ next = cdf_nbuf_next(msdu);
+ ol_tx_send(vdev->pdev, tx_desc, msdu);
+ msdu = next;
+ }
+ return NULL; /* all MSDUs were accepted */
+}
+#endif /* TSO */
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
+ *
+ * Allocate and prepare Tx descriptor with msdu and fragment descritor
+ * inforamtion.
+ *
+ * @pdev: pointer to ol pdev handle
+ * @vdev: pointer to ol vdev handle
+ * @msdu: linked list of msdu packets
+ * @pkt_download_len: packet download length
+ * @ep_id: endpoint ID
+ * @msdu_info: Handle to msdu_info
+ *
+ * Return: Pointer to Tx descriptor
+ */
+static inline struct ol_tx_desc_t *
+ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
+ ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu,
+ uint32_t pkt_download_len, uint32_t ep_id,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc = NULL;
+ uint32_t *htt_tx_desc;
+ void *htc_hdr_vaddr;
+ u_int32_t num_frags, i;
+
+ tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
+ if (cdf_unlikely(!tx_desc))
+ return NULL;
+
+ tx_desc->netbuf = msdu;
+ if (msdu_info->tso_info.is_tso) {
+ tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
+ tx_desc->pkt_type = ol_tx_frm_tso;
+ TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
+ } else {
+ tx_desc->pkt_type = ol_tx_frm_std;
+ }
+
+ htt_tx_desc = tx_desc->htt_tx_desc;
+
+ /* Make sure frags num is set to 0 */
+ /*
+ * Do this here rather than in hardstart, so
+ * that we can hopefully take only one cache-miss while
+ * accessing skb->cb.
+ */
+
+ /* HTT Header */
+ /* TODO : Take care of multiple fragments */
+
+ /* TODO: Precompute and store paddr in ol_tx_desc_t */
+ /* Virtual address of the HTT/HTC header, added by driver */
+ htc_hdr_vaddr = (char *)htt_tx_desc - HTC_HEADER_LEN;
+ htt_tx_desc_init(pdev->htt_pdev, htt_tx_desc,
+ tx_desc->htt_tx_desc_paddr, tx_desc->id, msdu,
+ &msdu_info->htt, &msdu_info->tso_info,
+ NULL, vdev->opmode == wlan_op_mode_ocb);
+
+ num_frags = cdf_nbuf_get_num_frags(msdu);
+ /* num_frags are expected to be 2 max */
+ num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ?
+ CVG_NBUF_MAX_EXTRA_FRAGS : num_frags;
+#if defined(HELIUMPLUS_PADDR64)
+ /*
+ * Use num_frags - 1, since 1 frag is used to store
+ * the HTT/HTC descriptor
+ * Refer to htt_tx_desc_init()
+ */
+ htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
+ num_frags - 1);
+#else /* ! defined(HELIUMPLUSPADDR64) */
+ htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
+ num_frags-1);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ if (msdu_info->tso_info.is_tso) {
+ htt_tx_desc_fill_tso_info(pdev->htt_pdev,
+ tx_desc->htt_frag_desc, &msdu_info->tso_info);
+ TXRX_STATS_TSO_SEG_UPDATE(pdev,
+ msdu_info->tso_info.curr_seg->seg);
+ } else {
+ for (i = 1; i < num_frags; i++) {
+ cdf_size_t frag_len;
+ u_int32_t frag_paddr;
+
+ frag_len = cdf_nbuf_get_frag_len(msdu, i);
+ frag_paddr = cdf_nbuf_get_frag_paddr_lo(msdu, i);
+#if defined(HELIUMPLUS_PADDR64)
+ htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
+ i - 1, frag_paddr, frag_len);
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
+ __func__, __LINE__, tx_desc->htt_frag_desc,
+ frag_paddr, frag_len);
+ dump_pkt(netbuf, frag_paddr, 64);
+#endif /* HELIUMPLUS_DEBUG */
+#else /* ! defined(HELIUMPLUSPADDR64) */
+ htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc,
+ i - 1, frag_paddr, frag_len);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ }
+ }
+
+ /*
+ * Do we want to turn on word_stream bit-map here ? For linux, non-TSO
+ * this is not required. We still have to mark the swap bit correctly,
+ * when posting to the ring
+ */
+ /* Check to make sure, data download length is correct */
+
+ /*
+ * TODO : Can we remove this check and always download a fixed length ?
+ * */
+ if (cdf_unlikely(cdf_nbuf_len(msdu) < pkt_download_len))
+ pkt_download_len = cdf_nbuf_len(msdu);
+
+ /* Fill the HTC header information */
+ /*
+ * Passing 0 as the seq_no field, we can probably get away
+ * with it for the time being, since this is not checked in f/w
+ */
+ /* TODO : Prefill this, look at multi-fragment case */
+ HTC_TX_DESC_FILL(htc_hdr_vaddr, pkt_download_len, ep_id, 0);
+
+ return tx_desc;
+}
+#if defined(FEATURE_TSO)
+/**
+ * ol_tx_ll_fast() Update metadata information and send msdu to HIF/CE
+ *
+ * @vdev: handle to ol_txrx_vdev_t
+ * @msdu_list: msdu list to be sent out.
+ *
+ * Return: on success return NULL, pointer to nbuf when it fails to send.
+ */
+cdf_nbuf_t
+ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu = msdu_list;
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ uint32_t pkt_download_len =
+ ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
+ uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.action.tx_comp_req = 0;
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ cdf_nbuf_t next;
+ struct ol_tx_desc_t *tx_desc;
+ int segments = 1;
+
+ msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.peer = NULL;
+
+ ol_tx_prepare_tso(vdev, msdu, msdu_info);
+ segments = msdu_info.tso_info.num_segs;
+
+ /*
+ * The netbuf may get linked into a different list
+ * inside the ce_send_fast function, so store the next
+ * pointer before the ce_send call.
+ */
+ next = cdf_nbuf_next(msdu);
+ /* init the current segment to the 1st segment in the list */
+ while (segments) {
+
+ if (msdu_info.tso_info.curr_seg)
+ NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
+ curr_seg->seg.tso_frags[0].paddr_low_32;
+
+ segments--;
+
+ /**
+ * if this is a jumbo nbuf, then increment the number
+ * of nbuf users for each additional segment of the msdu.
+ * This will ensure that the skb is freed only after
+ * receiving tx completion for all segments of an nbuf
+ */
+ if (segments)
+ cdf_nbuf_inc_users(msdu);
+
+ msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
+ msdu_info.htt.info.vdev_id = vdev->vdev_id;
+ msdu_info.htt.action.cksum_offload =
+ cdf_nbuf_get_tx_cksum(msdu);
+ switch (cdf_nbuf_get_exemption_type(msdu)) {
+ case CDF_NBUF_EXEMPT_NO_EXEMPTION:
+ case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
+ /* We want to encrypt this frame */
+ msdu_info.htt.action.do_encrypt = 1;
+ break;
+ case CDF_NBUF_EXEMPT_ALWAYS:
+ /* We don't want to encrypt this frame */
+ msdu_info.htt.action.do_encrypt = 0;
+ break;
+ default:
+ msdu_info.htt.action.do_encrypt = 1;
+ cdf_assert(0);
+ break;
+ }
+
+ tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
+ pkt_download_len, ep_id,
+ &msdu_info);
+
+ if (cdf_likely(tx_desc)) {
+ /*
+ * If debug display is enabled, show the meta
+ * data being downloaded to the target via the
+ * HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+ if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu,
+ 1, ep_id))) {
+ /*
+ * The packet could not be sent.
+ * Free the descriptor, return the
+ * packet to the caller.
+ */
+ ol_tx_desc_free(pdev, tx_desc);
+ return msdu;
+ }
+ if (msdu_info.tso_info.curr_seg) {
+ msdu_info.tso_info.curr_seg =
+ msdu_info.tso_info.curr_seg->next;
+ }
+
+ if (msdu_info.tso_info.is_tso) {
+ cdf_nbuf_dec_num_frags(msdu);
+ TXRX_STATS_TSO_INC_SEG(vdev->pdev);
+ TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
+ }
+ } else {
+ TXRX_STATS_MSDU_LIST_INCR(
+ pdev, tx.dropped.host_reject, msdu);
+ /* the list of unaccepted MSDUs */
+ return msdu;
+ }
+ } /* while segments */
+
+ msdu = next;
+ if (msdu_info.tso_info.is_tso) {
+ TXRX_STATS_TSO_INC_MSDU_IDX(vdev->pdev);
+ TXRX_STATS_TSO_RESET_MSDU(vdev->pdev);
+ }
+ } /* while msdus */
+ return NULL; /* all MSDUs were accepted */
+}
+#else
+cdf_nbuf_t
+ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu = msdu_list;
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ uint32_t pkt_download_len =
+ ((struct htt_pdev_t *)(pdev->htt_pdev))->download_len;
+ uint32_t ep_id = HTT_EPID_GET(pdev->htt_pdev);
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.action.tx_comp_req = 0;
+ msdu_info.tso_info.is_tso = 0;
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ cdf_nbuf_t next;
+ struct ol_tx_desc_t *tx_desc;
+
+ msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.peer = NULL;
+
+ msdu_info.htt.info.frame_type = pdev->htt_pkt_type;
+ msdu_info.htt.info.vdev_id = vdev->vdev_id;
+ msdu_info.htt.action.cksum_offload =
+ cdf_nbuf_get_tx_cksum(msdu);
+ switch (cdf_nbuf_get_exemption_type(msdu)) {
+ case CDF_NBUF_EXEMPT_NO_EXEMPTION:
+ case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
+ /* We want to encrypt this frame */
+ msdu_info.htt.action.do_encrypt = 1;
+ break;
+ case CDF_NBUF_EXEMPT_ALWAYS:
+ /* We don't want to encrypt this frame */
+ msdu_info.htt.action.do_encrypt = 0;
+ break;
+ default:
+ msdu_info.htt.action.do_encrypt = 1;
+ cdf_assert(0);
+ break;
+ }
+
+ tx_desc = ol_tx_prepare_ll_fast(pdev, vdev, msdu,
+ pkt_download_len, ep_id,
+ &msdu_info);
+
+ if (cdf_likely(tx_desc)) {
+ /*
+ * If debug display is enabled, show the meta-data being
+ * downloaded to the target via the HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+ /*
+ * The netbuf may get linked into a different list
+ * inside the ce_send_fast function, so store the next
+ * pointer before the ce_send call.
+ */
+ next = cdf_nbuf_next(msdu);
+ if ((0 == ce_send_fast(pdev->ce_tx_hdl, &msdu, 1,
+ ep_id))) {
+ /* The packet could not be sent */
+ /* Free the descriptor, return the packet to the
+ * caller */
+ ol_tx_desc_free(pdev, tx_desc);
+ return msdu;
+ }
+ msdu = next;
+ } else {
+ TXRX_STATS_MSDU_LIST_INCR(
+ pdev, tx.dropped.host_reject, msdu);
+ return msdu; /* the list of unaccepted MSDUs */
+ }
+ }
+
+ return NULL; /* all MSDUs were accepted */
+}
+#endif /* FEATURE_TSO */
+#endif /* WLAN_FEATURE_FASTPATH */
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * ol_tx_ll_wrapper() wrapper to ol_tx_ll
+ *
+ */
+static inline cdf_nbuf_t
+ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ struct ol_softc *hif_device =
+ (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
+
+ if (cdf_likely(hif_device && hif_device->fastpath_mode_on))
+ msdu_list = ol_tx_ll_fast(vdev, msdu_list);
+ else
+ msdu_list = ol_tx_ll(vdev, msdu_list);
+
+ return msdu_list;
+}
+#else
+static inline cdf_nbuf_t
+ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ return ol_tx_ll(vdev, msdu_list);
+}
+#endif /* WLAN_FEATURE_FASTPATH */
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+
+#define OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN 400
+#define OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS 5
+static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
+{
+ int max_to_accept;
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ if (vdev->ll_pause.paused_reason) {
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ return;
+ }
+
+ /*
+ * Send as much of the backlog as possible, but leave some margin
+ * of unallocated tx descriptors that can be used for new frames
+ * being transmitted by other vdevs.
+ * Ideally there would be a scheduler, which would not only leave
+ * some margin for new frames for other vdevs, but also would
+ * fairly apportion the tx descriptors between multiple vdevs that
+ * have backlogs in their pause queues.
+ * However, the fairness benefit of having a scheduler for frames
+ * from multiple vdev's pause queues is not sufficient to outweigh
+ * the extra complexity.
+ */
+ max_to_accept = vdev->pdev->tx_desc.num_free -
+ OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
+ while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
+ cdf_nbuf_t tx_msdu;
+ max_to_accept--;
+ vdev->ll_pause.txq.depth--;
+ tx_msdu = vdev->ll_pause.txq.head;
+ if (tx_msdu) {
+ vdev->ll_pause.txq.head = cdf_nbuf_next(tx_msdu);
+ if (NULL == vdev->ll_pause.txq.head)
+ vdev->ll_pause.txq.tail = NULL;
+ cdf_nbuf_set_next(tx_msdu, NULL);
+ NBUF_UPDATE_TX_PKT_COUNT(tx_msdu,
+ NBUF_TX_PKT_TXRX_DEQUEUE);
+ tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
+ /*
+ * It is unexpected that ol_tx_ll would reject the frame
+ * since we checked that there's room for it, though
+ * there's an infinitesimal possibility that between the
+ * time we checked the room available and now, a
+ * concurrent batch of tx frames used up all the room.
+ * For simplicity, just drop the frame.
+ */
+ if (tx_msdu) {
+ cdf_nbuf_unmap(vdev->pdev->osdev, tx_msdu,
+ CDF_DMA_TO_DEVICE);
+ cdf_nbuf_tx_free(tx_msdu, NBUF_PKT_ERROR);
+ }
+ }
+ }
+ if (vdev->ll_pause.txq.depth) {
+ cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
+ cdf_softirq_timer_start(&vdev->ll_pause.timer,
+ OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+ vdev->ll_pause.is_q_timer_on = true;
+ if (vdev->ll_pause.txq.depth >= vdev->ll_pause.max_q_depth)
+ vdev->ll_pause.q_overflow_cnt++;
+ }
+
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+}
+
+static cdf_nbuf_t
+ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu_list, uint8_t start_timer)
+{
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ while (msdu_list &&
+ vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
+ cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
+ NBUF_UPDATE_TX_PKT_COUNT(msdu_list, NBUF_TX_PKT_TXRX_ENQUEUE);
+ DPTRACE(cdf_dp_trace(msdu_list,
+ CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD,
+ (uint8_t *)(cdf_nbuf_data(msdu_list)),
+ sizeof(cdf_nbuf_data(msdu_list))));
+
+ vdev->ll_pause.txq.depth++;
+ if (!vdev->ll_pause.txq.head) {
+ vdev->ll_pause.txq.head = msdu_list;
+ vdev->ll_pause.txq.tail = msdu_list;
+ } else {
+ cdf_nbuf_set_next(vdev->ll_pause.txq.tail, msdu_list);
+ }
+ vdev->ll_pause.txq.tail = msdu_list;
+
+ msdu_list = next;
+ }
+ if (vdev->ll_pause.txq.tail)
+ cdf_nbuf_set_next(vdev->ll_pause.txq.tail, NULL);
+
+ if (start_timer) {
+ cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
+ cdf_softirq_timer_start(&vdev->ll_pause.timer,
+ OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+ vdev->ll_pause.is_q_timer_on = true;
+ }
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+ return msdu_list;
+}
+
+/*
+ * Store up the tx frame in the vdev's tx queue if the vdev is paused.
+ * If there are too many frames in the tx queue, reject it.
+ */
+cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
+{
+ uint16_t eth_type;
+ uint32_t paused_reason;
+
+ if (msdu_list == NULL)
+ return NULL;
+
+ paused_reason = vdev->ll_pause.paused_reason;
+ if (paused_reason) {
+ if (cdf_unlikely((paused_reason &
+ OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED) ==
+ paused_reason)) {
+ eth_type = (((struct ethernet_hdr_t *)
+ cdf_nbuf_data(msdu_list))->
+ ethertype[0] << 8) |
+ (((struct ethernet_hdr_t *)
+ cdf_nbuf_data(msdu_list))->ethertype[1]);
+ if (ETHERTYPE_IS_EAPOL_WAPI(eth_type)) {
+ msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
+ return msdu_list;
+ }
+ }
+ msdu_list = ol_tx_vdev_pause_queue_append(vdev, msdu_list, 1);
+ } else {
+ if (vdev->ll_pause.txq.depth > 0 ||
+ vdev->pdev->tx_throttle.current_throttle_level !=
+ THROTTLE_LEVEL_0) {
+ /* not paused, but there is a backlog of frms
+ from a prior pause or throttle off phase */
+ msdu_list = ol_tx_vdev_pause_queue_append(
+ vdev, msdu_list, 0);
+ /* if throttle is disabled or phase is "on",
+ send the frame */
+ if (vdev->pdev->tx_throttle.current_throttle_level ==
+ THROTTLE_LEVEL_0 ||
+ vdev->pdev->tx_throttle.current_throttle_phase ==
+ THROTTLE_PHASE_ON) {
+ /* send as many frames as possible
+ from the vdevs backlog */
+ ol_tx_vdev_ll_pause_queue_send_base(vdev);
+ }
+ } else {
+ /* not paused, no throttle and no backlog -
+ send the new frames */
+ msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
+ }
+ }
+ return msdu_list;
+}
+
+/*
+ * Run through the transmit queues for all the vdevs and
+ * send the pending frames
+ */
+void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
+{
+ int max_to_send; /* tracks how many frames have been sent */
+ cdf_nbuf_t tx_msdu;
+ struct ol_txrx_vdev_t *vdev = NULL;
+ uint8_t more;
+
+ if (NULL == pdev)
+ return;
+
+ if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
+ return;
+
+ /* ensure that we send no more than tx_threshold frames at once */
+ max_to_send = pdev->tx_throttle.tx_threshold;
+
+ /* round robin through the vdev queues for the given pdev */
+
+ /* Potential improvement: download several frames from the same vdev
+ at a time, since it is more likely that those frames could be
+ aggregated together, remember which vdev was serviced last,
+ so the next call this function can resume the round-robin
+ traversing where the current invocation left off */
+ do {
+ more = 0;
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ if (vdev->ll_pause.txq.depth) {
+ if (vdev->ll_pause.paused_reason) {
+ cdf_spin_unlock_bh(&vdev->ll_pause.
+ mutex);
+ continue;
+ }
+
+ tx_msdu = vdev->ll_pause.txq.head;
+ if (NULL == tx_msdu) {
+ cdf_spin_unlock_bh(&vdev->ll_pause.
+ mutex);
+ continue;
+ }
+
+ max_to_send--;
+ vdev->ll_pause.txq.depth--;
+
+ vdev->ll_pause.txq.head =
+ cdf_nbuf_next(tx_msdu);
+
+ if (NULL == vdev->ll_pause.txq.head)
+ vdev->ll_pause.txq.tail = NULL;
+
+ cdf_nbuf_set_next(tx_msdu, NULL);
+ tx_msdu = ol_tx_ll_wrapper(vdev, tx_msdu);
+ /*
+ * It is unexpected that ol_tx_ll would reject
+ * the frame, since we checked that there's
+ * room for it, though there's an infinitesimal
+ * possibility that between the time we checked
+ * the room available and now, a concurrent
+ * batch of tx frames used up all the room.
+ * For simplicity, just drop the frame.
+ */
+ if (tx_msdu) {
+ cdf_nbuf_unmap(pdev->osdev, tx_msdu,
+ CDF_DMA_TO_DEVICE);
+ cdf_nbuf_tx_free(tx_msdu,
+ NBUF_PKT_ERROR);
+ }
+ }
+ /*check if there are more msdus to transmit */
+ if (vdev->ll_pause.txq.depth)
+ more = 1;
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ }
+ } while (more && max_to_send);
+
+ vdev = NULL;
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ if (vdev->ll_pause.txq.depth) {
+ cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
+ cdf_softirq_timer_start(
+ &pdev->tx_throttle.tx_timer,
+ OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ return;
+ }
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ }
+}
+
+void ol_tx_vdev_ll_pause_queue_send(void *context)
+{
+ struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)context;
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+ if (pdev->tx_throttle.current_throttle_level != THROTTLE_LEVEL_0 &&
+ pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF)
+ return;
+ ol_tx_vdev_ll_pause_queue_send_base(vdev);
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+static inline int ol_txrx_tx_is_raw(enum ol_tx_spec tx_spec)
+{
+ return
+ tx_spec &
+ (ol_tx_spec_raw | ol_tx_spec_no_aggr | ol_tx_spec_no_encrypt);
+}
+
+static inline uint8_t ol_txrx_tx_raw_subtype(enum ol_tx_spec tx_spec)
+{
+ uint8_t sub_type = 0x1; /* 802.11 MAC header present */
+
+ if (tx_spec & ol_tx_spec_no_aggr)
+ sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_AGGR_S;
+ if (tx_spec & ol_tx_spec_no_encrypt)
+ sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
+ if (tx_spec & ol_tx_spec_nwifi_no_encrypt)
+ sub_type |= 0x1 << HTT_TX_MSDU_DESC_RAW_SUBTYPE_NO_ENCRYPT_S;
+ return sub_type;
+}
+
+cdf_nbuf_t
+ol_tx_non_std_ll(ol_txrx_vdev_handle vdev,
+ enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
+{
+ cdf_nbuf_t msdu = msdu_list;
+ htt_pdev_handle htt_pdev = vdev->pdev->htt_pdev;
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.action.tx_comp_req = 0;
+
+ /*
+ * The msdu_list variable could be used instead of the msdu var,
+ * but just to clarify which operations are done on a single MSDU
+ * vs. a list of MSDUs, use a distinct variable for single MSDUs
+ * within the list.
+ */
+ while (msdu) {
+ cdf_nbuf_t next;
+ struct ol_tx_desc_t *tx_desc;
+
+ msdu_info.htt.info.ext_tid = cdf_nbuf_get_tid(msdu);
+ msdu_info.peer = NULL;
+ msdu_info.tso_info.is_tso = 0;
+
+ ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
+
+ /*
+ * The netbuf may get linked into a different list inside the
+ * ol_tx_send function, so store the next pointer before the
+ * tx_send call.
+ */
+ next = cdf_nbuf_next(msdu);
+
+ if (tx_spec != ol_tx_spec_std) {
+ if (tx_spec & ol_tx_spec_no_free) {
+ tx_desc->pkt_type = ol_tx_frm_no_free;
+ } else if (tx_spec & ol_tx_spec_tso) {
+ tx_desc->pkt_type = ol_tx_frm_tso;
+ } else if (tx_spec & ol_tx_spec_nwifi_no_encrypt) {
+ uint8_t sub_type =
+ ol_txrx_tx_raw_subtype(tx_spec);
+ htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
+ htt_pkt_type_native_wifi,
+ sub_type);
+ } else if (ol_txrx_tx_is_raw(tx_spec)) {
+ /* different types of raw frames */
+ uint8_t sub_type =
+ ol_txrx_tx_raw_subtype(tx_spec);
+ htt_tx_desc_type(htt_pdev, tx_desc->htt_tx_desc,
+ htt_pkt_type_raw, sub_type);
+ }
+ }
+ /*
+ * If debug display is enabled, show the meta-data being
+ * downloaded to the target via the HTT tx descriptor.
+ */
+ htt_tx_desc_display(tx_desc->htt_tx_desc);
+ ol_tx_send(vdev->pdev, tx_desc, msdu);
+ msdu = next;
+ }
+ return NULL; /* all MSDUs were accepted */
+}
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
+ do { \
+ if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
+ cdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
+ if (tx_msdu_info.peer) { \
+ /* remove the peer reference added above */ \
+ ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
+ } \
+ goto MSDU_LOOP_BOTTOM; \
+ } \
+ } while (0)
+#else
+#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
+#endif
+
+/* tx filtering is handled within the target FW */
+#define TX_FILTER_CHECK(tx_msdu_info) 0 /* don't filter */
+
+/**
+ * parse_ocb_tx_header() - Function to check for OCB
+ * TX control header on a packet and extract it if present
+ *
+ * @msdu: Pointer to OS packet (cdf_nbuf_t)
+ */
+#define OCB_HEADER_VERSION 1
+bool parse_ocb_tx_header(cdf_nbuf_t msdu,
+ struct ocb_tx_ctrl_hdr_t *tx_ctrl)
+{
+ struct ether_header *eth_hdr_p;
+ struct ocb_tx_ctrl_hdr_t *tx_ctrl_hdr;
+
+ /* Check if TX control header is present */
+ eth_hdr_p = (struct ether_header *) cdf_nbuf_data(msdu);
+ if (eth_hdr_p->ether_type != CDF_SWAP_U16(ETHERTYPE_OCB_TX))
+ /* TX control header is not present. Nothing to do.. */
+ return true;
+
+ /* Remove the ethernet header */
+ cdf_nbuf_pull_head(msdu, sizeof(struct ether_header));
+
+ /* Parse the TX control header */
+ tx_ctrl_hdr = (struct ocb_tx_ctrl_hdr_t *) cdf_nbuf_data(msdu);
+
+ if (tx_ctrl_hdr->version == OCB_HEADER_VERSION) {
+ if (tx_ctrl)
+ cdf_mem_copy(tx_ctrl, tx_ctrl_hdr,
+ sizeof(*tx_ctrl_hdr));
+ } else {
+ /* The TX control header is invalid. */
+ return false;
+ }
+
+ /* Remove the TX control header */
+ cdf_nbuf_pull_head(msdu, tx_ctrl_hdr->length);
+ return true;
+}
+
+cdf_nbuf_t
+ol_tx_non_std(ol_txrx_vdev_handle vdev,
+ enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list)
+{
+ return ol_tx_non_std_ll(vdev, tx_spec, msdu_list);
+}
+
+void
+ol_txrx_data_tx_cb_set(ol_txrx_vdev_handle vdev,
+ ol_txrx_data_tx_cb callback, void *ctxt)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ pdev->tx_data_callback.func = callback;
+ pdev->tx_data_callback.ctxt = ctxt;
+}
+
+void
+ol_txrx_mgmt_tx_cb_set(ol_txrx_pdev_handle pdev,
+ uint8_t type,
+ ol_txrx_mgmt_tx_cb download_cb,
+ ol_txrx_mgmt_tx_cb ota_ack_cb, void *ctxt)
+{
+ TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
+ pdev->tx_mgmt.callbacks[type].download_cb = download_cb;
+ pdev->tx_mgmt.callbacks[type].ota_ack_cb = ota_ack_cb;
+ pdev->tx_mgmt.callbacks[type].ctxt = ctxt;
+}
+
+#if defined(HELIUMPLUS_PADDR64)
+void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
+{
+ uint32_t *frag_ptr_i_p;
+ int i;
+
+ cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
+ tx_desc, tx_desc->id);
+ cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
+ tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
+ cdf_print("%s %d: Fragment Descriptor 0x%p\n",
+ __func__, __LINE__, tx_desc->htt_frag_desc);
+
+ /* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
+ is already de-referrable (=> in virtual address space) */
+ frag_ptr_i_p = tx_desc->htt_frag_desc;
+
+ /* Dump 6 words of TSO flags */
+ print_hex_dump(KERN_DEBUG, "MLE Desc:TSO Flags: ",
+ DUMP_PREFIX_NONE, 8, 4,
+ frag_ptr_i_p, 24, true);
+
+ frag_ptr_i_p += 6; /* Skip 6 words of TSO flags */
+
+ i = 0;
+ while (*frag_ptr_i_p) {
+ print_hex_dump(KERN_DEBUG, "MLE Desc:Frag Ptr: ",
+ DUMP_PREFIX_NONE, 8, 4,
+ frag_ptr_i_p, 8, true);
+ i++;
+ if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
+ break;
+ else /* jump to next pointer - skip length */
+ frag_ptr_i_p += 2;
+ }
+ return;
+}
+#endif /* HELIUMPLUS_PADDR64 */
+
+int
+ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
+ cdf_nbuf_t tx_mgmt_frm,
+ uint8_t type, uint8_t use_6mbps, uint16_t chanfreq)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ol_tx_desc_t *tx_desc;
+ struct ol_txrx_msdu_info_t tx_msdu_info;
+
+ tx_msdu_info.tso_info.is_tso = 0;
+
+ tx_msdu_info.htt.action.use_6mbps = use_6mbps;
+ tx_msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_MGMT;
+ tx_msdu_info.htt.info.vdev_id = vdev->vdev_id;
+ tx_msdu_info.htt.action.do_tx_complete =
+ pdev->tx_mgmt.callbacks[type].ota_ack_cb ? 1 : 0;
+
+ /*
+ * FIX THIS: l2_hdr_type should only specify L2 header type
+ * The Peregrine/Rome HTT layer provides the FW with a "pkt type"
+ * that is a combination of L2 header type and 802.11 frame type.
+ * If the 802.11 frame type is "mgmt", then the HTT pkt type is "mgmt".
+ * But if the 802.11 frame type is "data", then the HTT pkt type is
+ * the L2 header type (more or less): 802.3 vs. Native WiFi
+ * (basic 802.11).
+ * (Or the header type can be "raw", which is any version of the 802.11
+ * header, and also implies that some of the offloaded tx data
+ * processing steps may not apply.)
+ * For efficiency, the Peregrine/Rome HTT uses the msdu_info's
+ * l2_hdr_type field to program the HTT pkt type. Thus, this txrx SW
+ * needs to overload the l2_hdr_type to indicate whether the frame is
+ * data vs. mgmt, as well as 802.3 L2 header vs. 802.11 L2 header.
+ * To fix this, the msdu_info's l2_hdr_type should be left specifying
+ * just the L2 header type. For mgmt frames, there should be a
+ * separate function to patch the HTT pkt type to store a "mgmt" value
+ * rather than the L2 header type. Then the HTT pkt type can be
+ * programmed efficiently for data frames, and the msdu_info's
+ * l2_hdr_type field won't be confusingly overloaded to hold the 802.11
+ * frame type rather than the L2 header type.
+ */
+ /*
+ * FIX THIS: remove duplication of htt_frm_type_mgmt and
+ * htt_pkt_type_mgmt
+ * The htt module expects a "enum htt_pkt_type" value.
+ * The htt_dxe module expects a "enum htt_frm_type" value.
+ * This needs to be cleaned up, so both versions of htt use a
+ * consistent method of specifying the frame type.
+ */
+#ifdef QCA_SUPPORT_INTEGRATED_SOC
+ /* tx mgmt frames always come with a 802.11 header */
+ tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_native_wifi;
+ tx_msdu_info.htt.info.frame_type = htt_frm_type_mgmt;
+#else
+ tx_msdu_info.htt.info.l2_hdr_type = htt_pkt_type_mgmt;
+ tx_msdu_info.htt.info.frame_type = htt_pkt_type_mgmt;
+#endif
+
+ tx_msdu_info.peer = NULL;
+
+ cdf_nbuf_map_single(pdev->osdev, tx_mgmt_frm, CDF_DMA_TO_DEVICE);
+ /* For LL tx_comp_req is not used so initialized to 0 */
+ tx_msdu_info.htt.action.tx_comp_req = 0;
+ tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, &tx_msdu_info);
+ /* FIX THIS -
+ * The FW currently has trouble using the host's fragments table
+ * for management frames. Until this is fixed, rather than
+ * specifying the fragment table to the FW, specify just the
+ * address of the initial fragment.
+ */
+#if defined(HELIUMPLUS_PADDR64)
+ /* dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
+ tx_desc); */
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ if (tx_desc) {
+ /*
+ * Following the call to ol_tx_desc_ll, frag 0 is the
+ * HTT tx HW descriptor, and the frame payload is in
+ * frag 1.
+ */
+ htt_tx_desc_frags_table_set(
+ pdev->htt_pdev,
+ tx_desc->htt_tx_desc,
+ cdf_nbuf_get_frag_paddr_lo(tx_mgmt_frm, 1),
+ 0, 0);
+#if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
+ dump_frag_desc(
+ "after htt_tx_desc_frags_table_set",
+ tx_desc);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ }
+ if (!tx_desc) {
+ cdf_nbuf_unmap_single(pdev->osdev, tx_mgmt_frm,
+ CDF_DMA_TO_DEVICE);
+ return -EINVAL; /* can't accept the tx mgmt frame */
+ }
+ TXRX_STATS_MSDU_INCR(pdev, tx.mgmt, tx_mgmt_frm);
+ TXRX_ASSERT1(type < OL_TXRX_MGMT_NUM_TYPES);
+ tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
+
+ htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
+ NBUF_SET_PACKET_TRACK(tx_desc->netbuf, NBUF_TX_PKT_MGMT_TRACK);
+ ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+ htt_pkt_type_mgmt);
+
+ return 0; /* accepted the tx mgmt frame */
+}
+
+void ol_txrx_sync(ol_txrx_pdev_handle pdev, uint8_t sync_cnt)
+{
+ htt_h2t_sync_msg(pdev->htt_pdev, sync_cnt);
+}
+
+cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu, uint16_t peer_id)
+{
+ struct ol_tx_desc_t *tx_desc;
+ struct ol_txrx_msdu_info_t msdu_info;
+
+ msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
+ msdu_info.htt.info.ext_tid = HTT_TX_EXT_TID_INVALID;
+ msdu_info.peer = NULL;
+ msdu_info.htt.action.tx_comp_req = 0;
+ msdu_info.tso_info.is_tso = 0;
+
+ ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
+ HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
+
+ htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);
+
+ ol_tx_send(vdev->pdev, tx_desc, msdu);
+
+ return NULL;
+}
+
+#if defined(FEATURE_TSO)
+void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
+{
+ int i;
+ struct cdf_tso_seg_elem_t *c_element;
+
+ c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
+ pdev->tso_seg_pool.freelist = c_element;
+ for (i = 0; i < (num_seg - 1); i++) {
+ c_element->next =
+ cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
+ c_element = c_element->next;
+ c_element->next = NULL;
+ }
+ pdev->tso_seg_pool.pool_size = num_seg;
+ cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
+}
+
+void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+ int i;
+ struct cdf_tso_seg_elem_t *c_element;
+ struct cdf_tso_seg_elem_t *temp;
+
+ cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+ c_element = pdev->tso_seg_pool.freelist;
+ for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
+ temp = c_element->next;
+ cdf_mem_free(c_element);
+ c_element = temp;
+ if (!c_element)
+ break;
+ }
+
+ pdev->tso_seg_pool.freelist = NULL;
+ pdev->tso_seg_pool.num_free = 0;
+ pdev->tso_seg_pool.pool_size = 0;
+ cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+ cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
+}
+#endif /* FEATURE_TSO */
diff --git a/dp/txrx/ol_tx.h b/dp/txrx/ol_tx.h
new file mode 100644
index 000000000000..77ae56d1ae6b
--- /dev/null
+++ b/dp/txrx/ol_tx.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx.h
+ * @brief Internal definitions for the high-level tx module.
+ */
+#ifndef _OL_TX__H_
+#define _OL_TX__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_lock.h>
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+
+#include <ol_txrx_types.h> /* ol_tx_desc_t, ol_txrx_msdu_info_t */
+
+cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+#ifdef WLAN_FEATURE_FASTPATH
+cdf_nbuf_t ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+#endif
+
+cdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list);
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+#define OL_TX_LL ol_tx_ll_queue
+#else
+#define OL_TX_LL ol_tx_ll
+#endif
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+void ol_tx_vdev_ll_pause_queue_send(void *context);
+void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev);
+#else
+static inline void ol_tx_vdev_ll_pause_queue_send(void *context)
+{
+ return;
+}
+static inline
+void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif
+cdf_nbuf_t
+ol_tx_non_std_ll(ol_txrx_vdev_handle data_vdev,
+ enum ol_tx_spec tx_spec, cdf_nbuf_t msdu_list);
+
+cdf_nbuf_t
+ol_tx_reinject(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu, uint16_t peer_id);
+
+void ol_txrx_mgmt_tx_complete(void *ctxt, cdf_nbuf_t netbuf, int err);
+
+
+#if defined(FEATURE_TSO)
+void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
+void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
+#else
+static inline void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev,
+ uint32_t num_seg)
+{
+ return;
+}
+static inline void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif
+#endif /* _OL_TX__H_ */
diff --git a/dp/txrx/ol_tx_desc.c b/dp/txrx/ol_tx_desc.c
new file mode 100644
index 000000000000..591b66026bfc
--- /dev/null
+++ b/dp/txrx/ol_tx_desc.c
@@ -0,0 +1,581 @@
+/*
+ * Copyright (c) 2011, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cdf_net_types.h> /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_util.h> /* cdf_assert */
+#include <cdf_lock.h> /* cdf_spinlock */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <cdf_time.h> /* cdf_system_ticks */
+#endif
+
+#include <ol_htt_tx_api.h> /* htt_tx_desc_id */
+
+#include <ol_txrx_types.h> /* ol_txrx_pdev_t */
+#include <ol_tx_desc.h>
+#include <ol_txrx_internal.h>
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
+#endif
+#include <ol_txrx.h>
+
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+extern uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
+#endif
+
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc)
+{
+ if (tx_desc->pkt_type != 0xff) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%p",
+ __func__, tx_desc->pkt_type, pdev);
+ cdf_assert(0);
+ }
+ if ((uint32_t *) tx_desc->htt_tx_desc <
+ g_dbg_htt_desc_start_addr
+ || (uint32_t *) tx_desc->htt_tx_desc >
+ g_dbg_htt_desc_end_addr) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s Potential htt_desc curruption:0x%p pdev:0x%p\n",
+ __func__, tx_desc->htt_tx_desc, pdev);
+ cdf_assert(0);
+ }
+}
+static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
+{
+ tx_desc->pkt_type = 0xff;
+}
+#ifdef QCA_COMPUTE_TX_DELAY
+static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
+{
+ if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s Timestamp:0x%x\n",
+ __func__, tx_desc->entry_timestamp_ticks);
+ cdf_assert(0);
+ }
+ tx_desc->entry_timestamp_ticks = cdf_system_ticks();
+}
+static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
+{
+ tx_desc->entry_timestamp_ticks = 0xffffffff;
+}
+#endif
+#else
+static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc)
+{
+ return;
+}
+static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
+{
+ return;
+}
+static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
+{
+ return;
+}
+static inline void ol_tx_desc_reset_timestamp(struct ol_tx_desc_t *tx_desc)
+{
+ return;
+}
+#endif
+
+#ifndef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_tx_desc_alloc() - allocate descriptor from freelist
+ * @pdev: pdev handle
+ * @vdev: vdev handle
+ *
+ * Return: tx descriptor pointer/ NULL in case of error
+ */
+static
+struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev)
+{
+ struct ol_tx_desc_t *tx_desc = NULL;
+
+ cdf_spin_lock_bh(&pdev->tx_mutex);
+ if (pdev->tx_desc.freelist) {
+ tx_desc = ol_tx_get_desc_global_pool(pdev);
+ ol_tx_desc_sanity_checks(pdev, tx_desc);
+ ol_tx_desc_compute_delay(tx_desc);
+ }
+ cdf_spin_unlock_bh(&pdev->tx_mutex);
+ return tx_desc;
+}
+
+/**
+ * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
+ * @pdev: pdev handler
+ * @vdev: vdev handler
+ * @msdu_info: msdu handler
+ *
+ * Return: tx descriptor or NULL
+ */
+struct ol_tx_desc_t *
+ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ return ol_tx_desc_alloc(pdev, vdev);
+}
+
+#else
+/**
+ * ol_tx_desc_alloc() -allocate tx descriptor
+ * @pdev: pdev handler
+ * @vdev: vdev handler
+ * @pool: flow pool
+ *
+ * Return: tx descriptor or NULL
+ */
+static
+struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_flow_pool_t *pool)
+{
+ struct ol_tx_desc_t *tx_desc = NULL;
+
+ if (pool) {
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ if (pool->avail_desc) {
+ tx_desc = ol_tx_get_desc_flow_pool(pool);
+ if (cdf_unlikely(pool->avail_desc < pool->stop_th)) {
+ pool->status = FLOW_POOL_ACTIVE_PAUSED;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ /* pause network queues */
+ pdev->pause_cb(vdev->vdev_id,
+ WLAN_STOP_ALL_NETIF_QUEUE,
+ WLAN_DATA_FLOW_CONTROL);
+ } else {
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ }
+ ol_tx_desc_sanity_checks(pdev, tx_desc);
+ ol_tx_desc_compute_delay(tx_desc);
+ } else {
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ pdev->pool_stats.pkt_drop_no_desc++;
+ }
+ } else {
+ pdev->pool_stats.pkt_drop_no_pool++;
+ }
+
+ return tx_desc;
+}
+
+/**
+ * ol_tx_desc_alloc_wrapper() -allocate tx descriptor
+ * @pdev: pdev handler
+ * @vdev: vdev handler
+ * @msdu_info: msdu handler
+ *
+ * Return: tx descriptor or NULL
+ */
+#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+struct ol_tx_desc_t *
+ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ if (cdf_unlikely(msdu_info->htt.info.frame_type == htt_pkt_type_mgmt))
+ return ol_tx_desc_alloc(pdev, vdev, pdev->mgmt_pool);
+ else
+ return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
+}
+#else
+struct ol_tx_desc_t *
+ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ return ol_tx_desc_alloc(pdev, vdev, vdev->pool);
+}
+#endif
+#endif
+
+#ifndef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_tx_desc_free() - put descriptor to freelist
+ * @pdev: pdev handle
+ * @tx_desc: tx descriptor
+ *
+ * Return: None
+ */
+void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
+{
+ cdf_spin_lock_bh(&pdev->tx_mutex);
+#if defined(FEATURE_TSO)
+ if (tx_desc->pkt_type == ol_tx_frm_tso) {
+ if (cdf_unlikely(tx_desc->tso_desc == NULL))
+ cdf_print("%s %d TSO desc is NULL!\n",
+ __func__, __LINE__);
+ else
+ ol_tso_free_segment(pdev, tx_desc->tso_desc);
+ }
+#endif
+ ol_tx_desc_reset_pkt_type(tx_desc);
+ ol_tx_desc_reset_timestamp(tx_desc);
+
+ ol_tx_put_desc_global_pool(pdev, tx_desc);
+ cdf_spin_unlock_bh(&pdev->tx_mutex);
+}
+
+#else
+/**
+ * ol_tx_desc_free() - put descriptor to pool freelist
+ * @pdev: pdev handle
+ * @tx_desc: tx descriptor
+ *
+ * Return: None
+ */
+void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
+{
+ struct ol_tx_flow_pool_t *pool = tx_desc->pool;
+
+#if defined(FEATURE_TSO)
+ if (tx_desc->pkt_type == ol_tx_frm_tso) {
+ if (cdf_unlikely(tx_desc->tso_desc == NULL))
+ cdf_print("%s %d TSO desc is NULL!\n",
+ __func__, __LINE__);
+ else
+ ol_tso_free_segment(pdev, tx_desc->tso_desc);
+ }
+#endif
+ ol_tx_desc_reset_pkt_type(tx_desc);
+ ol_tx_desc_reset_timestamp(tx_desc);
+
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ ol_tx_put_desc_flow_pool(pool, tx_desc);
+ switch (pool->status) {
+ case FLOW_POOL_ACTIVE_PAUSED:
+ if (pool->avail_desc > pool->start_th) {
+ pdev->pause_cb(pool->member_flow_id,
+ WLAN_WAKE_ALL_NETIF_QUEUE,
+ WLAN_DATA_FLOW_CONTROL);
+ pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
+ }
+ break;
+ case FLOW_POOL_INVALID:
+ if (pool->avail_desc == pool->flow_pool_size) {
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ ol_tx_free_invalid_flow_pool(pool);
+ cdf_print("%s %d pool is INVALID State!!\n",
+ __func__, __LINE__);
+ return;
+ }
+ break;
+ case FLOW_POOL_ACTIVE_UNPAUSED:
+ break;
+ default:
+ cdf_print("%s %d pool is INACTIVE State!!\n",
+ __func__, __LINE__);
+ break;
+ };
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+}
+#endif
+
+extern void
+dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
+
+void
+dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
+{
+ cdf_print("%s: Pkt: VA 0x%p PA 0x%x len %d\n", __func__,
+ cdf_nbuf_data(nbuf), nbuf_paddr, len);
+ print_hex_dump(KERN_DEBUG, "Pkt: ", DUMP_PREFIX_NONE, 16, 4,
+ cdf_nbuf_data(nbuf), len, true);
+}
+
+const uint32_t htt_to_ce_pkt_type[] = {
+ [htt_pkt_type_raw] = tx_pkt_type_raw,
+ [htt_pkt_type_native_wifi] = tx_pkt_type_native_wifi,
+ [htt_pkt_type_ethernet] = tx_pkt_type_802_3,
+ [htt_pkt_type_mgmt] = tx_pkt_type_mgmt,
+ [htt_pkt_type_eth2] = tx_pkt_type_eth2,
+ [htt_pkt_num_types] = 0xffffffff
+};
+
+struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_tx_desc_t *tx_desc;
+ unsigned int i;
+ uint32_t num_frags;
+
+ msdu_info->htt.info.vdev_id = vdev->vdev_id;
+ msdu_info->htt.action.cksum_offload = cdf_nbuf_get_tx_cksum(netbuf);
+ switch (cdf_nbuf_get_exemption_type(netbuf)) {
+ case CDF_NBUF_EXEMPT_NO_EXEMPTION:
+ case CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE:
+ /* We want to encrypt this frame */
+ msdu_info->htt.action.do_encrypt = 1;
+ break;
+ case CDF_NBUF_EXEMPT_ALWAYS:
+ /* We don't want to encrypt this frame */
+ msdu_info->htt.action.do_encrypt = 0;
+ break;
+ default:
+ cdf_assert(0);
+ break;
+ }
+
+ /* allocate the descriptor */
+ tx_desc = ol_tx_desc_alloc_wrapper(pdev, vdev, msdu_info);
+ if (!tx_desc)
+ return NULL;
+
+ /* initialize the SW tx descriptor */
+ tx_desc->netbuf = netbuf;
+
+ if (msdu_info->tso_info.is_tso) {
+ tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
+ tx_desc->pkt_type = ol_tx_frm_tso;
+ TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
+ } else {
+ tx_desc->pkt_type = ol_tx_frm_std;
+ }
+
+ /* initialize the HW tx descriptor */
+
+ htt_tx_desc_init(pdev->htt_pdev, tx_desc->htt_tx_desc,
+ tx_desc->htt_tx_desc_paddr,
+ ol_tx_desc_id(pdev, tx_desc), netbuf, &msdu_info->htt,
+ &msdu_info->tso_info,
+ NULL, vdev->opmode == wlan_op_mode_ocb);
+
+ /*
+ * Initialize the fragmentation descriptor.
+ * Skip the prefix fragment (HTT tx descriptor) that was added
+ * during the call to htt_tx_desc_init above.
+ */
+ num_frags = cdf_nbuf_get_num_frags(netbuf);
+ /* num_frags are expected to be 2 max */
+ num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS)
+ ? CVG_NBUF_MAX_EXTRA_FRAGS
+ : num_frags;
+#if defined(HELIUMPLUS_PADDR64)
+ /*
+ * Use num_frags - 1, since 1 frag is used to store
+ * the HTT/HTC descriptor
+ * Refer to htt_tx_desc_init()
+ */
+ htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_frag_desc,
+ num_frags - 1);
+#else /* ! defined(HELIUMPLUSPADDR64) */
+ htt_tx_desc_num_frags(pdev->htt_pdev, tx_desc->htt_tx_desc,
+ num_frags - 1);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+
+ if (msdu_info->tso_info.is_tso) {
+ htt_tx_desc_fill_tso_info(pdev->htt_pdev,
+ tx_desc->htt_frag_desc, &msdu_info->tso_info);
+ TXRX_STATS_TSO_SEG_UPDATE(pdev,
+ msdu_info->tso_info.curr_seg->seg);
+ } else {
+ for (i = 1; i < num_frags; i++) {
+ cdf_size_t frag_len;
+ uint32_t frag_paddr;
+
+ frag_len = cdf_nbuf_get_frag_len(netbuf, i);
+ frag_paddr = cdf_nbuf_get_frag_paddr_lo(netbuf, i);
+#if defined(HELIUMPLUS_PADDR64)
+ htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
+ frag_paddr, frag_len);
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
+ __func__, __LINE__, tx_desc->htt_frag_desc,
+ frag_paddr, frag_len);
+ dump_pkt(netbuf, frag_paddr, 64);
+#endif /* HELIUMPLUS_DEBUG */
+#else /* ! defined(HELIUMPLUSPADDR64) */
+ htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_tx_desc, i - 1,
+ frag_paddr, frag_len);
+#endif /* defined(HELIUMPLUS_PADDR64) */
+ }
+ }
+
+#if defined(HELIUMPLUS_DEBUG)
+ dump_frag_desc("ol_tx_desc_ll()", tx_desc);
+#endif
+ return tx_desc;
+}
+
+void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
+ ol_tx_desc_list *tx_descs, int had_error)
+{
+ struct ol_tx_desc_t *tx_desc, *tmp;
+ cdf_nbuf_t msdus = NULL;
+
+ TAILQ_FOREACH_SAFE(tx_desc, tx_descs, tx_desc_list_elem, tmp) {
+ cdf_nbuf_t msdu = tx_desc->netbuf;
+
+ cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ /* restore original hdr offset */
+ OL_TX_RESTORE_HDR(tx_desc, msdu);
+#endif
+ cdf_nbuf_unmap(pdev->osdev, msdu, CDF_DMA_TO_DEVICE);
+ /* free the tx desc */
+ ol_tx_desc_free(pdev, tx_desc);
+ /* link the netbuf into a list to free as a batch */
+ cdf_nbuf_set_next(msdu, msdus);
+ msdus = msdu;
+ }
+ /* free the netbufs as a batch */
+ cdf_nbuf_tx_free(msdus, had_error);
+}
+
+void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc, int had_error)
+{
+ int mgmt_type;
+ ol_txrx_mgmt_tx_cb ota_ack_cb;
+ char *trace_str;
+
+ cdf_atomic_init(&tx_desc->ref_cnt); /* clear the ref cnt */
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ /* restore original hdr offset */
+ OL_TX_RESTORE_HDR(tx_desc, (tx_desc->netbuf));
+#endif
+ trace_str = (had_error) ? "OT:C:F:" : "OT:C:S:";
+ cdf_nbuf_trace_update(tx_desc->netbuf, trace_str);
+ if (tx_desc->pkt_type == ol_tx_frm_no_free) {
+ /* free the tx desc but don't unmap or free the frame */
+ if (pdev->tx_data_callback.func) {
+ cdf_nbuf_set_next(tx_desc->netbuf, NULL);
+ pdev->tx_data_callback.func(pdev->tx_data_callback.ctxt,
+ tx_desc->netbuf, had_error);
+ ol_tx_desc_free(pdev, tx_desc);
+ return;
+ }
+ /* let the code below unmap and free the frame */
+ }
+ cdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, CDF_DMA_TO_DEVICE);
+ /* check the frame type to see what kind of special steps are needed */
+ if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
+ (tx_desc->pkt_type != 0xff)) {
+ uint32_t frag_desc_paddr_lo = 0;
+
+#if defined(HELIUMPLUS_PADDR64)
+ frag_desc_paddr_lo = tx_desc->htt_frag_desc_paddr;
+ /* FIX THIS -
+ * The FW currently has trouble using the host's fragments
+ * table for management frames. Until this is fixed,
+ * rather than specifying the fragment table to the FW,
+ * the host SW will specify just the address of the initial
+ * fragment.
+ * Now that the mgmt frame is done, the HTT tx desc's frags
+ * table pointer needs to be reset.
+ */
+#if defined(HELIUMPLUS_DEBUG)
+ cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
+ __func__, __LINE__, tx_desc->id,
+ frag_desc_paddr_lo);
+#endif /* HELIUMPLUS_DEBUG */
+#endif /* HELIUMPLUS_PADDR64 */
+ htt_tx_desc_frags_table_set(pdev->htt_pdev,
+ tx_desc->htt_tx_desc, 0,
+ frag_desc_paddr_lo, 1);
+
+ mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
+ /*
+ * we already checked the value when the mgmt frame was
+ * provided to the txrx layer.
+ * no need to check it a 2nd time.
+ */
+ ota_ack_cb = pdev->tx_mgmt.callbacks[mgmt_type].ota_ack_cb;
+ if (ota_ack_cb) {
+ void *ctxt;
+ ctxt = pdev->tx_mgmt.callbacks[mgmt_type].ctxt;
+ ota_ack_cb(ctxt, tx_desc->netbuf, had_error);
+ }
+ /* free the netbuf */
+ cdf_nbuf_free(tx_desc->netbuf);
+ } else {
+ /* single regular frame */
+ cdf_nbuf_set_next(tx_desc->netbuf, NULL);
+ cdf_nbuf_tx_free(tx_desc->netbuf, had_error);
+ }
+ /* free the tx desc */
+ ol_tx_desc_free(pdev, tx_desc);
+}
+
+#if defined(FEATURE_TSO)
+/**
+ * htt_tso_alloc_segment() - function to allocate a TSO segment
+ * element
+ * @pdev: HTT pdev
+ * @tso_seg: This is the output. The TSO segment element.
+ *
+ * Allocates a TSO segment element from the free list held in
+ * the HTT pdev
+ *
+ * Return: none
+ */
+struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
+{
+ struct cdf_tso_seg_elem_t *tso_seg = NULL;
+
+ cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+ if (pdev->tso_seg_pool.freelist) {
+ pdev->tso_seg_pool.num_free--;
+ tso_seg = pdev->tso_seg_pool.freelist;
+ pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
+ }
+ cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+
+ return tso_seg;
+}
+
+/**
+ * ol_tso_free_segment() - function to free a TSO segment
+ * element
+ * @pdev: HTT pdev
+ * @tso_seg: The TSO segment element to be freed
+ *
+ * Returns a TSO segment element to the free list held in the
+ * HTT pdev
+ *
+ * Return: none
+ */
+
+void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
+ struct cdf_tso_seg_elem_t *tso_seg)
+{
+ cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+ tso_seg->next = pdev->tso_seg_pool.freelist;
+ pdev->tso_seg_pool.freelist = tso_seg;
+ pdev->tso_seg_pool.num_free++;
+ cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+}
+#endif
diff --git a/dp/txrx/ol_tx_desc.h b/dp/txrx/ol_tx_desc.h
new file mode 100644
index 000000000000..c96c47a05241
--- /dev/null
+++ b/dp/txrx/ol_tx_desc.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_desc.h
+ * @brief API definitions for the tx descriptor module within the data SW.
+ */
+#ifndef _OL_TX_DESC__H_
+#define _OL_TX_DESC__H_
+
+#include <cds_queue.h> /* TAILQ_HEAD */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_tx_desc_t */
+#include <ol_txrx_internal.h> /*TXRX_ASSERT2 */
+
+struct ol_tx_desc_t *
+ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_msdu_info_t *msdu_info);
+
+
+/**
+ * @brief Allocate and initialize a tx descriptor for a LL system.
+ * @details
+ * Allocate a tx descriptor pair for a new tx frame - a SW tx descriptor
+ * for private use within the host data SW, and a HTT tx descriptor for
+ * downloading tx meta-data to the target FW/HW.
+ * Fill in the fields of this pair of tx descriptors based on the
+ * information in the netbuf.
+ * For LL, this includes filling in a fragmentation descriptor to
+ * specify to the MAC HW where to find the tx frame's fragments.
+ *
+ * @param pdev - the data physical device sending the data
+ * (for accessing the tx desc pool)
+ * @param vdev - the virtual device sending the data
+ * (for specifying the transmitter address for multicast / broadcast data)
+ * @param netbuf - the tx frame
+ * @param msdu_info - tx meta-data
+ */
+struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t netbuf,
+ struct ol_txrx_msdu_info_t *msdu_info);
+
+/**
+ * @brief Use a tx descriptor ID to find the corresponding desriptor object.
+ *
+ * @param pdev - the data physical device sending the data
+ * @param tx_desc_id - the ID of the descriptor in question
+ * @return the descriptor object that has the specified ID
+ */
+static inline struct ol_tx_desc_t *ol_tx_desc_find(
+ struct ol_txrx_pdev_t *pdev, uint16_t tx_desc_id)
+{
+ void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages;
+
+ return &((union ol_tx_desc_list_elem_t *)
+ (td_base[tx_desc_id >> pdev->tx_desc.page_divider] +
+ (pdev->tx_desc.desc_reserved_size *
+ (tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc;
+}
+
+/**
+ * @brief Free a list of tx descriptors and the tx frames they refer to.
+ * @details
+ * Free a batch of "standard" tx descriptors and their tx frames.
+ * Free each tx descriptor, by returning it to the freelist.
+ * Unmap each netbuf, and free the netbufs as a batch.
+ * Irregular tx frames like TSO or managment frames that require
+ * special handling are processed by the ol_tx_desc_frame_free_nonstd
+ * function rather than this function.
+ *
+ * @param pdev - the data physical device that sent the data
+ * @param tx_descs - a list of SW tx descriptors for the tx frames
+ * @param had_error - bool indication of whether the transmission failed.
+ * This is provided to callback functions that get notified of
+ * the tx frame completion.
+ */
+void ol_tx_desc_frame_list_free(struct ol_txrx_pdev_t *pdev,
+ ol_tx_desc_list *tx_descs, int had_error);
+
+/**
+ * @brief Free a non-standard tx frame and its tx descriptor.
+ * @details
+ * Check the tx frame type (e.g. TSO vs. management) to determine what
+ * special steps, if any, need to be performed prior to freeing the
+ * tx frame and its tx descriptor.
+ * This function can also be used to free single standard tx frames.
+ * After performing any special steps based on tx frame type, free the
+ * tx descriptor, i.e. return it to the freelist, and unmap and
+ * free the netbuf referenced by the tx descriptor.
+ *
+ * @param pdev - the data physical device that sent the data
+ * @param tx_desc - the SW tx descriptor for the tx frame that was sent
+ * @param had_error - bool indication of whether the transmission failed.
+ * This is provided to callback functions that get notified of
+ * the tx frame completion.
+ */
+void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc, int had_error);
+
+/*
+ * @brief Determine the ID of a tx descriptor.
+ *
+ * @param pdev - the physical device that is sending the data
+ * @param tx_desc - the descriptor whose ID is being determined
+ * @return numeric ID that uniquely identifies the tx descriptor
+ */
+static inline uint16_t
+ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
+{
+ TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size);
+ return tx_desc->id;
+}
+
+/*
+ * @brief Retrieves the beacon headr for the vdev
+ * @param pdev - opaque pointe to scn
+ * @param vdevid - vdev id
+ * @return void pointer to the beacon header for the given vdev
+ */
+
+void *ol_ath_get_bcn_header(ol_pdev_handle pdev, A_UINT32 vdev_id);
+
+/*
+ * @brief Free a tx descriptor, without freeing the matching frame.
+ * @details
+ * This function is using during the function call that submits tx frames
+ * into the txrx layer, for cases where a tx descriptor is successfully
+ * allocated, but for other reasons the frame could not be accepted.
+ *
+ * @param pdev - the data physical device that is sending the data
+ * @param tx_desc - the descriptor being freed
+ */
+void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc);
+
+#if defined(FEATURE_TSO)
+struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev);
+
+void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
+ struct cdf_tso_seg_elem_t *tso_seg);
+#endif
+
+/**
+ * ol_tx_get_desc_global_pool() - get descriptor from global pool
+ * @pdev: pdev handler
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: tx descriptor
+ */
+static inline
+struct ol_tx_desc_t *ol_tx_get_desc_global_pool(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.freelist->tx_desc;
+ pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
+ pdev->tx_desc.num_free--;
+ return tx_desc;
+}
+
+/**
+ * ol_tx_put_desc_global_pool() - put descriptor to global pool freelist
+ * @pdev: pdev handle
+ * @tx_desc: tx descriptor
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: none
+ */
+static inline
+void ol_tx_put_desc_global_pool(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc)
+{
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
+ pdev->tx_desc.freelist;
+ pdev->tx_desc.freelist =
+ (union ol_tx_desc_list_elem_t *)tx_desc;
+ pdev->tx_desc.num_free++;
+ return;
+}
+
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool);
+/**
+ * ol_tx_get_desc_flow_pool() - get descriptor from flow pool
+ * @pool: flow pool
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: tx descriptor
+ */
+static inline
+struct ol_tx_desc_t *ol_tx_get_desc_flow_pool(struct ol_tx_flow_pool_t *pool)
+{
+ struct ol_tx_desc_t *tx_desc = &pool->freelist->tx_desc;
+ pool->freelist = pool->freelist->next;
+ pool->avail_desc--;
+ return tx_desc;
+}
+
+/**
+ * ol_tx_put_desc_flow_pool() - put descriptor to flow pool freelist
+ * @pool: flow pool
+ * @tx_desc: tx descriptor
+ *
+ * Caller needs to take lock and do sanity checks.
+ *
+ * Return: none
+ */
+static inline
+void ol_tx_put_desc_flow_pool(struct ol_tx_flow_pool_t *pool,
+ struct ol_tx_desc_t *tx_desc)
+{
+ tx_desc->pool = pool;
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next = pool->freelist;
+ pool->freelist = (union ol_tx_desc_list_elem_t *)tx_desc;
+ pool->avail_desc++;
+ return;
+}
+
+#else
+static inline int ol_tx_free_invalid_flow_pool(void *pool)
+{
+ return 0;
+}
+#endif
+
+#endif /* _OL_TX_DESC__H_ */
diff --git a/dp/txrx/ol_tx_queue.c b/dp/txrx/ol_tx_queue.c
new file mode 100644
index 000000000000..a894b1c30cb3
--- /dev/null
+++ b/dp/txrx/ol_tx_queue.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
+#include <ol_cfg.h> /* ol_cfg_addba_retry */
+#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync, ol_tx_addba_conf */
+#include <ol_ctrl_txrx_api.h> /* ol_ctrl_addba_req */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1, etc. */
+#include <ol_txrx_types.h> /* pdev stats */
+#include <ol_tx_desc.h> /* ol_tx_desc, ol_tx_desc_frame_list_free */
+#include <ol_tx.h> /* ol_tx_vdev_ll_pause_queue_send */
+#include <ol_tx_queue.h>
+#include <ol_txrx_dbg.h> /* ENABLE_TX_QUEUE_LOG */
+#include <cdf_types.h> /* bool */
+
+#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
+
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ /* TO DO: log the queue pause */
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ vdev->ll_pause.paused_reason |= reason;
+ vdev->ll_pause.q_pause_cnt++;
+ vdev->ll_pause.is_q_paused = true;
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+ DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_PAUSE,
+ NULL, 0));
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ /* TO DO: log the queue unpause */
+ /* acquire the mutex lock, since we'll be modifying the queues */
+ TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ if (vdev->ll_pause.paused_reason & reason) {
+ vdev->ll_pause.paused_reason &= ~reason;
+ if (!vdev->ll_pause.paused_reason) {
+ vdev->ll_pause.is_q_paused = false;
+ vdev->ll_pause.q_unpause_cnt++;
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ ol_tx_vdev_ll_pause_queue_send(vdev);
+ } else {
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ }
+ } else {
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ }
+ DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_UNPAUSE,
+ NULL, 0));
+ TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
+}
+
+void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
+{
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
+ vdev->ll_pause.is_q_timer_on = false;
+ while (vdev->ll_pause.txq.head) {
+ cdf_nbuf_t next =
+ cdf_nbuf_next(vdev->ll_pause.txq.head);
+ cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
+ cdf_nbuf_unmap(vdev->pdev->osdev,
+ vdev->ll_pause.txq.head,
+ CDF_DMA_TO_DEVICE);
+ cdf_nbuf_tx_free(vdev->ll_pause.txq.head,
+ NBUF_PKT_ERROR);
+ vdev->ll_pause.txq.head = next;
+ }
+ vdev->ll_pause.txq.tail = NULL;
+ vdev->ll_pause.txq.depth = 0;
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+}
+
+#endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+
+/**
+ * ol_txrx_map_to_netif_reason_type() - map to netif_reason_type
+ * @reason: reason
+ *
+ * Return: netif_reason_type
+ */
+enum netif_reason_type
+ol_txrx_map_to_netif_reason_type(uint32_t reason)
+{
+ switch (reason) {
+ case OL_TXQ_PAUSE_REASON_FW:
+ return WLAN_FW_PAUSE;
+ case OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED:
+ return WLAN_PEER_UNAUTHORISED;
+ case OL_TXQ_PAUSE_REASON_TX_ABORT:
+ return WLAN_TX_ABORT;
+ case OL_TXQ_PAUSE_REASON_VDEV_STOP:
+ return WLAN_VDEV_STOP;
+ case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
+ return WLAN_THERMAL_MITIGATION;
+ default:
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: reason not supported %d\n",
+ __func__, reason);
+ return WLAN_REASON_TYPE_MAX;
+ }
+}
+
+/**
+ * ol_txrx_vdev_pause() - pause vdev network queues
+ * @vdev: vdev handle
+ * @reason: reason
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ enum netif_reason_type netif_reason;
+
+ if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid pdev\n", __func__);
+ return;
+ }
+
+ netif_reason = ol_txrx_map_to_netif_reason_type(reason);
+ if (netif_reason == WLAN_REASON_TYPE_MAX)
+ return;
+
+ pdev->pause_cb(vdev->vdev_id, WLAN_NETIF_TX_DISABLE, netif_reason);
+}
+
+/**
+ * ol_txrx_vdev_unpause() - unpause vdev network queues
+ * @vdev: vdev handle
+ * @reason: reason
+ *
+ * Return: none
+ */
+void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ enum netif_reason_type netif_reason;
+
+ if (cdf_unlikely((!pdev) || (!pdev->pause_cb))) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid pdev\n", __func__);
+ return;
+ }
+
+ netif_reason = ol_txrx_map_to_netif_reason_type(reason);
+ if (netif_reason == WLAN_REASON_TYPE_MAX)
+ return;
+
+ pdev->pause_cb(vdev->vdev_id, WLAN_WAKE_ALL_NETIF_QUEUE,
+ netif_reason);
+
+}
+
+/**
+ * ol_txrx_pdev_pause() - pause network queues for each vdev
+ * @pdev: pdev handle
+ * @reason: reason
+ *
+ * Return: none
+ */
+void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+ struct ol_txrx_vdev_t *vdev = NULL, *tmp;
+
+ TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
+ ol_txrx_vdev_pause(vdev, reason);
+ }
+
+}
+
+/**
+ * ol_txrx_pdev_unpause() - unpause network queues for each vdev
+ * @pdev: pdev handle
+ * @reason: reason
+ *
+ * Return: none
+ */
+void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
+{
+ struct ol_txrx_vdev_t *vdev = NULL, *tmp;
+
+ TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
+ ol_txrx_vdev_unpause(vdev, reason);
+ }
+
+}
+#endif
+
+/*--- LL tx throttle queue code --------------------------------------------*/
+#if defined(QCA_SUPPORT_TX_THROTTLE)
+uint8_t ol_tx_pdev_is_target_empty(void)
+{
+ /* TM TODO */
+ return 1;
+}
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_txrx_thermal_pause() - pause due to thermal mitigation
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
+{
+ ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
+ return;
+}
+/**
+ * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
+{
+ ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION);
+ return;
+}
+#else
+/**
+ * ol_txrx_thermal_pause() - pause due to thermal mitigation
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void ol_txrx_thermal_pause(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+
+/**
+ * ol_txrx_thermal_unpause() - unpause due to thermal mitigation
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+static inline
+void ol_txrx_thermal_unpause(struct ol_txrx_pdev_t *pdev)
+{
+ ol_tx_pdev_ll_pause_queue_send_all(pdev);
+ return;
+}
+#endif
+
+void ol_tx_pdev_throttle_phase_timer(void *context)
+{
+ struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
+ int ms;
+ enum throttle_level cur_level;
+ enum throttle_phase cur_phase;
+
+ /* update the phase */
+ pdev->tx_throttle.current_throttle_phase++;
+
+ if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_MAX)
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
+
+ if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
+ /* Traffic is stopped */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "throttle phase --> OFF\n");
+ ol_txrx_thermal_pause(pdev);
+ cur_level = pdev->tx_throttle.current_throttle_level;
+ cur_phase = pdev->tx_throttle.current_throttle_phase;
+ ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
+ if (pdev->tx_throttle.current_throttle_level !=
+ THROTTLE_LEVEL_0) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "start timer %d ms\n", ms);
+ cdf_softirq_timer_start(&pdev->tx_throttle.
+ phase_timer, ms);
+ }
+ } else {
+ /* Traffic can go */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "throttle phase --> ON\n");
+ ol_txrx_thermal_unpause(pdev);
+ cur_level = pdev->tx_throttle.current_throttle_level;
+ cur_phase = pdev->tx_throttle.current_throttle_phase;
+ ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
+ if (pdev->tx_throttle.current_throttle_level !=
+ THROTTLE_LEVEL_0) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "start timer %d ms\n",
+ ms);
+ cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer,
+ ms);
+ }
+ }
+}
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+void ol_tx_pdev_throttle_tx_timer(void *context)
+{
+ struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)context;
+ ol_tx_pdev_ll_pause_queue_send_all(pdev);
+}
+#endif
+
+void ol_tx_throttle_set_level(struct ol_txrx_pdev_t *pdev, int level)
+{
+ int ms = 0;
+
+ if (level >= THROTTLE_LEVEL_MAX) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "%s invalid throttle level set %d, ignoring\n",
+ __func__, level);
+ return;
+ }
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Setting throttle level %d\n", level);
+
+ /* Set the current throttle level */
+ pdev->tx_throttle.current_throttle_level = (enum throttle_level) level;
+
+ /* Reset the phase */
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
+ ol_txrx_thermal_unpause(pdev);
+
+ /* Start with the new time */
+ ms = pdev->tx_throttle.
+ throttle_time_ms[level][THROTTLE_PHASE_OFF];
+
+ cdf_softirq_timer_cancel(&pdev->tx_throttle.phase_timer);
+
+ if (level != THROTTLE_LEVEL_0)
+ cdf_softirq_timer_start(&pdev->tx_throttle.phase_timer, ms);
+}
+
+/* This table stores the duty cycle for each level.
+ Example "on" time for level 2 with duty period 100ms is:
+ "on" time = duty_period_ms >> throttle_duty_cycle_table[2]
+ "on" time = 100 ms >> 2 = 25ms */
+static uint8_t g_throttle_duty_cycle_table[THROTTLE_LEVEL_MAX] = { 0, 1, 2, 4 };
+
+void ol_tx_throttle_init_period(struct ol_txrx_pdev_t *pdev, int period)
+{
+ int i;
+
+ /* Set the current throttle level */
+ pdev->tx_throttle.throttle_period_ms = period;
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "level OFF ON\n");
+ for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
+ pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
+ pdev->tx_throttle.throttle_period_ms >>
+ g_throttle_duty_cycle_table[i];
+ pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_OFF] =
+ pdev->tx_throttle.throttle_period_ms -
+ pdev->tx_throttle.throttle_time_ms[
+ i][THROTTLE_PHASE_ON];
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%d %d %d\n", i,
+ pdev->tx_throttle.
+ throttle_time_ms[i][THROTTLE_PHASE_OFF],
+ pdev->tx_throttle.
+ throttle_time_ms[i][THROTTLE_PHASE_ON]);
+ }
+}
+
+void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
+{
+ uint32_t throttle_period;
+
+ pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
+ pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
+ cdf_spinlock_init(&pdev->tx_throttle.mutex);
+
+ throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
+
+ ol_tx_throttle_init_period(pdev, throttle_period);
+
+ cdf_softirq_timer_init(pdev->osdev,
+ &pdev->tx_throttle.phase_timer,
+ ol_tx_pdev_throttle_phase_timer, pdev,
+ CDF_TIMER_TYPE_SW);
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+ cdf_softirq_timer_init(pdev->osdev,
+ &pdev->tx_throttle.tx_timer,
+ ol_tx_pdev_throttle_tx_timer, pdev,
+ CDF_TIMER_TYPE_SW);
+#endif
+
+ pdev->tx_throttle.tx_threshold = THROTTLE_TX_THRESHOLD;
+}
+#endif /* QCA_SUPPORT_TX_THROTTLE */
+/*--- End of LL tx throttle queue code ---------------------------------------*/
diff --git a/dp/txrx/ol_tx_queue.h b/dp/txrx/ol_tx_queue.h
new file mode 100644
index 000000000000..be809134a1c0
--- /dev/null
+++ b/dp/txrx/ol_tx_queue.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_queue.h
+ * @brief API definitions for the tx frame queue module within the data SW.
+ */
+#ifndef _OL_TX_QUEUE__H_
+#define _OL_TX_QUEUE__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+#include <cdf_types.h> /* bool */
+
+/*--- function prototypes for optional queue log feature --------------------*/
+#if defined(ENABLE_TX_QUEUE_LOG)
+
+void
+ol_tx_queue_log_enqueue(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_msdu_info_t *msdu_info,
+ int frms, int bytes);
+void
+ol_tx_queue_log_dequeue(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq, int frms, int bytes);
+void
+ol_tx_queue_log_free(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int tid, int frms, int bytes);
+#define OL_TX_QUEUE_LOG_ENQUEUE ol_tx_queue_log_enqueue
+#define OL_TX_QUEUE_LOG_DEQUEUE ol_tx_queue_log_dequeue
+#define OL_TX_QUEUE_LOG_FREE ol_tx_queue_log_free
+
+#else
+
+#define OL_TX_QUEUE_LOG_ENQUEUE(pdev, msdu_info, frms, bytes) /* no-op */
+#define OL_TX_QUEUE_LOG_DEQUEUE(pdev, txq, frms, bytes) /* no-op */
+#define OL_TX_QUEUE_LOG_FREE(pdev, txq, tid, frms, bytes) /* no-op */
+
+#endif /* TXRX_DEBUG_LEVEL > 5 */
+
+#define ol_tx_enqueue(pdev, txq, tx_desc, tx_msdu_info) /* no-op */
+#define ol_tx_dequeue(pdev, ext_tid, txq, head, num_frames, credit, bytes) 0
+#define ol_tx_queue_free(pdev, txq, tid) /* no-op */
+#define ol_tx_queue_discard(pdev, flush, tx_descs) /* no-op */
+
+void
+ol_tx_queue_log_sched(struct ol_txrx_pdev_t *pdev,
+ int credit,
+ int *num_active_tids,
+ uint32_t **active_bitmap, uint8_t **data);
+
+#define OL_TX_QUEUE_LOG_SCHED( \
+ pdev, credit, num_active_tids, active_bitmap, data)
+
+#define ol_tx_queues_display(pdev) /* no-op */
+
+#define ol_tx_queue_decs_reinit(peer, peer_id) /* no-op */
+
+#ifdef QCA_SUPPORT_TX_THROTTLE
+/**
+ * @brief - initialize the throttle context
+ * @param pdev - the physical device object, which stores the txqs
+ */
+void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
+#else
+#define ol_tx_throttle_init(pdev) /*no op */
+#endif
+#endif /* _OL_TX_QUEUE__H_ */
diff --git a/dp/txrx/ol_tx_send.c b/dp/txrx/ol_tx_send.c
new file mode 100644
index 000000000000..bc762694cecf
--- /dev/null
+++ b/dp/txrx/ol_tx_send.c
@@ -0,0 +1,968 @@
+/*
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <cdf_atomic.h> /* cdf_atomic_inc, etc. */
+#include <cdf_lock.h> /* cdf_os_spinlock */
+#include <cdf_time.h> /* cdf_system_ticks, etc. */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_net_types.h> /* ADF_NBUF_TX_EXT_TID_INVALID */
+
+#include <cds_queue.h> /* TAILQ */
+#ifdef QCA_COMPUTE_TX_DELAY
+#include <ieee80211.h> /* ieee80211_frame, etc. */
+#include <enet.h> /* ethernet_hdr_t, etc. */
+#include <ipv6_defs.h> /* ipv6_traffic_class */
+#endif
+
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle, etc. */
+#include <ol_htt_tx_api.h> /* htt_tx_compl_desc_id */
+#include <ol_txrx_htt_api.h> /* htt_tx_status */
+
+#include <ol_ctrl_txrx_api.h>
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc */
+#include <ol_tx_desc.h> /* ol_tx_desc_find, ol_tx_desc_frame_free */
+#ifdef QCA_COMPUTE_TX_DELAY
+#endif
+#include <ol_txrx_internal.h> /* OL_TX_DESC_NO_REFS, etc. */
+#include <ol_osif_txrx_api.h>
+#include <ol_tx.h> /* ol_tx_reinject */
+
+#include <ol_cfg.h> /* ol_cfg_is_high_latency */
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc */
+#endif
+
+#ifdef TX_CREDIT_RECLAIM_SUPPORT
+
+#define OL_TX_CREDIT_RECLAIM(pdev) \
+ do { \
+ if (cdf_atomic_read(&pdev->target_tx_credit) < \
+ ol_cfg_tx_credit_lwm(pdev->ctrl_pdev)) { \
+ ol_osif_ath_tasklet(pdev->osdev); \
+ } \
+ } while (0)
+
+#else
+
+#define OL_TX_CREDIT_RECLAIM(pdev)
+
+#endif /* TX_CREDIT_RECLAIM_SUPPORT */
+
+#if defined(TX_CREDIT_RECLAIM_SUPPORT)
+/*
+ * HL needs to keep track of the amount of credit available to download
+ * tx frames to the target - the download scheduler decides when to
+ * download frames, and which frames to download, based on the credit
+ * availability.
+ * LL systems that use TX_CREDIT_RECLAIM_SUPPORT also need to keep track
+ * of the target_tx_credit, to determine when to poll for tx completion
+ * messages.
+ */
+#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) \
+ cdf_atomic_add( \
+ factor * htt_tx_msdu_credit(msdu), &pdev->target_tx_credit)
+#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) \
+ OL_TX_TARGET_CREDIT_ADJUST(-1, pdev, msdu)
+#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) \
+ OL_TX_TARGET_CREDIT_ADJUST(1, pdev, msdu)
+#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) \
+ cdf_atomic_add(-1 * delta, &pdev->target_tx_credit)
+#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) \
+ cdf_atomic_add(delta, &pdev->target_tx_credit)
+#else
+/*
+ * LL does not need to keep track of target credit.
+ * Since the host tx descriptor pool size matches the target's,
+ * we know the target has space for the new tx frame if the host's
+ * tx descriptor allocation succeeded.
+ */
+#define OL_TX_TARGET_CREDIT_ADJUST(factor, pdev, msdu) /* no-op */
+#define OL_TX_TARGET_CREDIT_DECR(pdev, msdu) /* no-op */
+#define OL_TX_TARGET_CREDIT_INCR(pdev, msdu) /* no-op */
+#define OL_TX_TARGET_CREDIT_DECR_INT(pdev, delta) /* no-op */
+#define OL_TX_TARGET_CREDIT_INCR_INT(pdev, delta) /* no-op */
+#endif
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev) \
+ do { \
+ struct ol_txrx_vdev_t *vdev; \
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { \
+ if (cdf_atomic_read(&vdev->os_q_paused) && \
+ (vdev->tx_fl_hwm != 0)) { \
+ cdf_spin_lock(&pdev->tx_mutex); \
+ if (pdev->tx_desc.num_free > \
+ vdev->tx_fl_hwm) { \
+ cdf_atomic_set(&vdev->os_q_paused, 0); \
+ cdf_spin_unlock(&pdev->tx_mutex); \
+ ol_txrx_flow_control_cb(vdev, true);\
+ } \
+ else { \
+ cdf_spin_unlock(&pdev->tx_mutex); \
+ } \
+ } \
+ } \
+ } while (0)
+#else
+#define OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev)
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+static inline uint16_t
+ol_tx_send_base(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
+{
+ int msdu_credit_consumed;
+
+ TX_CREDIT_DEBUG_PRINT("TX %d bytes\n", cdf_nbuf_len(msdu));
+ TX_CREDIT_DEBUG_PRINT(" <HTT> Decrease credit %d - 1 = %d, len:%d.\n",
+ cdf_atomic_read(&pdev->target_tx_credit),
+ cdf_atomic_read(&pdev->target_tx_credit) - 1,
+ cdf_nbuf_len(msdu));
+
+ msdu_credit_consumed = htt_tx_msdu_credit(msdu);
+ OL_TX_TARGET_CREDIT_DECR_INT(pdev, msdu_credit_consumed);
+ OL_TX_CREDIT_RECLAIM(pdev);
+
+ /*
+ * When the tx frame is downloaded to the target, there are two
+ * outstanding references:
+ * 1. The host download SW (HTT, HTC, HIF)
+ * This reference is cleared by the ol_tx_send_done callback
+ * functions.
+ * 2. The target FW
+ * This reference is cleared by the ol_tx_completion_handler
+ * function.
+ * It is extremely probable that the download completion is processed
+ * before the tx completion message. However, under exceptional
+ * conditions the tx completion may be processed first. Thus, rather
+ * that assuming that reference (1) is done before reference (2),
+ * explicit reference tracking is needed.
+ * Double-increment the ref count to account for both references
+ * described above.
+ */
+
+ OL_TX_DESC_REF_INIT(tx_desc);
+ OL_TX_DESC_REF_INC(tx_desc);
+ OL_TX_DESC_REF_INC(tx_desc);
+
+ return msdu_credit_consumed;
+}
+
+void
+ol_tx_send(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu)
+{
+ int msdu_credit_consumed;
+ uint16_t id;
+ int failed;
+
+ msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
+ id = ol_tx_desc_id(pdev, tx_desc);
+ NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
+ DPTRACE(cdf_dp_trace(msdu, CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD,
+ (uint8_t *)(cdf_nbuf_data(msdu)),
+ sizeof(cdf_nbuf_data(msdu))));
+ failed = htt_tx_send_std(pdev->htt_pdev, msdu, id);
+ if (cdf_unlikely(failed)) {
+ OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
+ }
+}
+
+void
+ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
+ cdf_nbuf_t head_msdu, int num_msdus)
+{
+ cdf_nbuf_t rejected;
+ OL_TX_CREDIT_RECLAIM(pdev);
+
+ rejected = htt_tx_send_batch(pdev->htt_pdev, head_msdu, num_msdus);
+ while (cdf_unlikely(rejected)) {
+ struct ol_tx_desc_t *tx_desc;
+ uint16_t *msdu_id_storage;
+ cdf_nbuf_t next;
+
+ next = cdf_nbuf_next(rejected);
+ msdu_id_storage = ol_tx_msdu_id_storage(rejected);
+ tx_desc = ol_tx_desc_find(pdev, *msdu_id_storage);
+
+ OL_TX_TARGET_CREDIT_INCR(pdev, rejected);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
+
+ rejected = next;
+ }
+}
+
+void
+ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, enum htt_pkt_type pkt_type)
+{
+ int msdu_credit_consumed;
+ uint16_t id;
+ int failed;
+
+ msdu_credit_consumed = ol_tx_send_base(pdev, tx_desc, msdu);
+ id = ol_tx_desc_id(pdev, tx_desc);
+ NBUF_UPDATE_TX_PKT_COUNT(msdu, NBUF_TX_PKT_TXRX);
+ failed = htt_tx_send_nonstd(pdev->htt_pdev, msdu, id, pkt_type);
+ if (failed) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Error: freeing tx frame after htt_tx failed");
+ OL_TX_TARGET_CREDIT_INCR_INT(pdev, msdu_credit_consumed);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1 /* had error */);
+ }
+}
+
+static inline void
+ol_tx_download_done_base(struct ol_txrx_pdev_t *pdev,
+ A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+ struct ol_tx_desc_t *tx_desc;
+
+ tx_desc = ol_tx_desc_find(pdev, msdu_id);
+ cdf_assert(tx_desc);
+
+ /*
+ * If the download is done for
+ * the Management frame then
+ * call the download callback if registered
+ */
+ if (tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
+ int tx_mgmt_index = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
+ ol_txrx_mgmt_tx_cb download_cb =
+ pdev->tx_mgmt.callbacks[tx_mgmt_index].download_cb;
+
+ if (download_cb) {
+ download_cb(pdev->tx_mgmt.callbacks[tx_mgmt_index].ctxt,
+ tx_desc->netbuf, status != A_OK);
+ }
+ }
+
+ if (status != A_OK) {
+ OL_TX_TARGET_CREDIT_INCR(pdev, msdu);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
+ 1 /* download err */);
+ } else {
+ if (OL_TX_DESC_NO_REFS(tx_desc)) {
+ /*
+ * The decremented value was zero - free the frame.
+ * Use the tx status recorded previously during
+ * tx completion handling.
+ */
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
+ tx_desc->status !=
+ htt_tx_status_ok);
+ }
+ }
+}
+
+void
+ol_tx_download_done_ll(void *pdev,
+ A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+ ol_tx_download_done_base((struct ol_txrx_pdev_t *)pdev, status, msdu,
+ msdu_id);
+}
+
+void
+ol_tx_download_done_hl_retain(void *txrx_pdev,
+ A_STATUS status,
+ cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+ struct ol_txrx_pdev_t *pdev = txrx_pdev;
+ ol_tx_download_done_base(pdev, status, msdu, msdu_id);
+}
+
+void
+ol_tx_download_done_hl_free(void *txrx_pdev,
+ A_STATUS status, cdf_nbuf_t msdu, uint16_t msdu_id)
+{
+ struct ol_txrx_pdev_t *pdev = txrx_pdev;
+ struct ol_tx_desc_t *tx_desc;
+
+ tx_desc = ol_tx_desc_find(pdev, msdu_id);
+ cdf_assert(tx_desc);
+
+ ol_tx_download_done_base(pdev, status, msdu, msdu_id);
+
+ if ((tx_desc->pkt_type != ol_tx_frm_no_free) &&
+ (tx_desc->pkt_type < OL_TXRX_MGMT_TYPE_BASE)) {
+ cdf_atomic_add(1, &pdev->tx_queue.rsrc_cnt);
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, status != A_OK);
+ }
+}
+
+void ol_tx_target_credit_init(struct ol_txrx_pdev_t *pdev, int credit_delta)
+{
+ cdf_atomic_add(credit_delta, &pdev->orig_target_tx_credit);
+}
+
+void ol_tx_target_credit_update(struct ol_txrx_pdev_t *pdev, int credit_delta)
+{
+ TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
+ cdf_atomic_read(&pdev->target_tx_credit),
+ credit_delta,
+ cdf_atomic_read(&pdev->target_tx_credit) +
+ credit_delta);
+ cdf_atomic_add(credit_delta, &pdev->target_tx_credit);
+}
+
+#ifdef QCA_COMPUTE_TX_DELAY
+
+static void
+ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
+ enum htt_tx_status status,
+ uint16_t *desc_ids, int num_msdus);
+#define OL_TX_DELAY_COMPUTE ol_tx_delay_compute
+#else
+#define OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus) /* no-op */
+#endif /* QCA_COMPUTE_TX_DELAY */
+
+#ifndef OL_TX_RESTORE_HDR
+#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
+#endif
+/*
+ * The following macros could have been inline functions too.
+ * The only rationale for choosing macros, is to force the compiler to inline
+ * the implementation, which cannot be controlled for actual "inline" functions,
+ * since "inline" is only a hint to the compiler.
+ * In the performance path, we choose to force the inlining, in preference to
+ * type-checking offered by the actual inlined functions.
+ */
+#define ol_tx_msdu_complete_batch(_pdev, _tx_desc, _tx_descs, _status) \
+ TAILQ_INSERT_TAIL(&(_tx_descs), (_tx_desc), tx_desc_list_elem)
+#ifndef ATH_11AC_TXCOMPACT
+#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
+ _lcl_freelist, _tx_desc_last) \
+ do { \
+ cdf_atomic_init(&(_tx_desc)->ref_cnt); \
+ /* restore orginal hdr offset */ \
+ OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
+ cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), CDF_DMA_TO_DEVICE); \
+ cdf_nbuf_free((_netbuf)); \
+ ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
+ (_lcl_freelist); \
+ if (cdf_unlikely(!lcl_freelist)) { \
+ (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
+ (_tx_desc); \
+ } \
+ (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ } while (0)
+#else /*!ATH_11AC_TXCOMPACT */
+#define ol_tx_msdu_complete_single(_pdev, _tx_desc, _netbuf,\
+ _lcl_freelist, _tx_desc_last) \
+ do { \
+ /* restore orginal hdr offset */ \
+ OL_TX_RESTORE_HDR((_tx_desc), (_netbuf)); \
+ cdf_nbuf_unmap((_pdev)->osdev, (_netbuf), CDF_DMA_TO_DEVICE); \
+ cdf_nbuf_free((_netbuf)); \
+ ((union ol_tx_desc_list_elem_t *)(_tx_desc))->next = \
+ (_lcl_freelist); \
+ if (cdf_unlikely(!lcl_freelist)) { \
+ (_tx_desc_last) = (union ol_tx_desc_list_elem_t *)\
+ (_tx_desc); \
+ } \
+ (_lcl_freelist) = (union ol_tx_desc_list_elem_t *)(_tx_desc); \
+ } while (0)
+
+#endif /*!ATH_11AC_TXCOMPACT */
+
+#ifdef QCA_TX_SINGLE_COMPLETIONS
+#ifdef QCA_TX_STD_PATH_ONLY
+#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
+ _netbuf, _lcl_freelist, \
+ _tx_desc_last, _status) \
+ ol_tx_msdu_complete_single((_pdev), (_tx_desc), \
+ (_netbuf), (_lcl_freelist), \
+ _tx_desc_last)
+#else /* !QCA_TX_STD_PATH_ONLY */
+#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
+ _netbuf, _lcl_freelist, \
+ _tx_desc_last, _status) \
+ do { \
+ if (cdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
+ ol_tx_msdu_complete_single((_pdev), (_tx_desc),\
+ (_netbuf), (_lcl_freelist), \
+ (_tx_desc_last)); \
+ } else { \
+ ol_tx_desc_frame_free_nonstd( \
+ (_pdev), (_tx_desc), \
+ (_status) != htt_tx_status_ok); \
+ } \
+ } while (0)
+#endif /* !QCA_TX_STD_PATH_ONLY */
+#else /* !QCA_TX_SINGLE_COMPLETIONS */
+#ifdef QCA_TX_STD_PATH_ONLY
+#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
+ _netbuf, _lcl_freelist, \
+ _tx_desc_last, _status) \
+ ol_tx_msdus_complete_batch((_pdev), (_tx_desc), (_tx_descs), (_status))
+#else /* !QCA_TX_STD_PATH_ONLY */
+#define ol_tx_msdu_complete(_pdev, _tx_desc, _tx_descs, \
+ _netbuf, _lcl_freelist, \
+ _tx_desc_last, _status) \
+ do { \
+ if (cdf_likely((_tx_desc)->pkt_type == ol_tx_frm_std)) { \
+ ol_tx_msdu_complete_batch((_pdev), (_tx_desc), \
+ (_tx_descs), (_status)); \
+ } else { \
+ ol_tx_desc_frame_free_nonstd((_pdev), (_tx_desc), \
+ (_status) != \
+ htt_tx_status_ok); \
+ } \
+ } while (0)
+#endif /* !QCA_TX_STD_PATH_ONLY */
+#endif /* QCA_TX_SINGLE_COMPLETIONS */
+
+void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
+{
+ int i = 0;
+ struct ol_tx_desc_t *tx_desc;
+
+ for (i = 0; i < pdev->tx_desc.pool_size; i++) {
+ tx_desc = ol_tx_desc_find(pdev, i);
+ /*
+ * Confirm that each tx descriptor is "empty", i.e. it has
+ * no tx frame attached.
+ * In particular, check that there are no frames that have
+ * been given to the target to transmit, for which the
+ * target has never provided a response.
+ */
+ if (cdf_atomic_read(&tx_desc->ref_cnt)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "Warning: freeing tx frame "
+ "(no tx completion from the target)\n");
+ ol_tx_desc_frame_free_nonstd(pdev,
+ tx_desc, 1);
+ }
+ }
+}
+
+void ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits)
+{
+ ol_tx_target_credit_update(pdev, credits);
+
+ /* UNPAUSE OS Q */
+ OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+}
+
+/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
+ ol_tx_completion_handler().
+ * any change in ol_tx_completion_handler() must be mirrored in
+ ol_tx_inspect_handler().
+*/
+void
+ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
+ int num_msdus,
+ enum htt_tx_status status, void *tx_desc_id_iterator)
+{
+ int i;
+ uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
+ uint16_t tx_desc_id;
+ struct ol_tx_desc_t *tx_desc;
+ char *trace_str;
+
+ uint32_t byte_cnt = 0;
+ cdf_nbuf_t netbuf;
+
+ union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
+ union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
+ ol_tx_desc_list tx_descs;
+ TAILQ_INIT(&tx_descs);
+
+ OL_TX_DELAY_COMPUTE(pdev, status, desc_ids, num_msdus);
+
+ trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
+ for (i = 0; i < num_msdus; i++) {
+ tx_desc_id = desc_ids[i];
+ tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ tx_desc->status = status;
+ netbuf = tx_desc->netbuf;
+
+ cdf_runtime_pm_put();
+ cdf_nbuf_trace_update(netbuf, trace_str);
+ /* Per SDU update of byte count */
+ byte_cnt += cdf_nbuf_len(netbuf);
+ if (OL_TX_DESC_NO_REFS(tx_desc)) {
+ ol_tx_statistics(
+ pdev->ctrl_pdev,
+ HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
+ (tx_desc->
+ htt_tx_desc))),
+ status != htt_tx_status_ok);
+ ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
+ lcl_freelist, tx_desc_last, status);
+ }
+ NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+ tx_desc->pkt_type = 0xff;
+#ifdef QCA_COMPUTE_TX_DELAY
+ tx_desc->entry_timestamp_ticks = 0xffffffff;
+#endif
+#endif
+ }
+
+ /* One shot protected access to pdev freelist, when setup */
+ if (lcl_freelist) {
+ cdf_spin_lock(&pdev->tx_mutex);
+ tx_desc_last->next = pdev->tx_desc.freelist;
+ pdev->tx_desc.freelist = lcl_freelist;
+ pdev->tx_desc.num_free += (uint16_t) num_msdus;
+ cdf_spin_unlock(&pdev->tx_mutex);
+ } else {
+ ol_tx_desc_frame_list_free(pdev, &tx_descs,
+ status != htt_tx_status_ok);
+ }
+
+ OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+
+ /* UNPAUSE OS Q */
+ OL_TX_FLOW_CT_UNPAUSE_OS_Q(pdev);
+ /* Do one shot statistics */
+ TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
+}
+
+/*
+ * ol_tx_single_completion_handler performs the same tx completion
+ * processing as ol_tx_completion_handler, but for a single frame.
+ * ol_tx_completion_handler is optimized to handle batch completions
+ * as efficiently as possible; in contrast ol_tx_single_completion_handler
+ * handles single frames as simply and generally as possible.
+ * Thus, this ol_tx_single_completion_handler function is suitable for
+ * intermittent usage, such as for tx mgmt frames.
+ */
+void
+ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
+ enum htt_tx_status status, uint16_t tx_desc_id)
+{
+ struct ol_tx_desc_t *tx_desc;
+ cdf_nbuf_t netbuf;
+
+ tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ tx_desc->status = status;
+ netbuf = tx_desc->netbuf;
+
+ NBUF_UPDATE_TX_PKT_COUNT(netbuf, NBUF_TX_PKT_FREE);
+ /* Do one shot statistics */
+ TXRX_STATS_UPDATE_TX_STATS(pdev, status, 1, cdf_nbuf_len(netbuf));
+
+ if (OL_TX_DESC_NO_REFS(tx_desc)) {
+ ol_tx_desc_frame_free_nonstd(pdev, tx_desc,
+ status != htt_tx_status_ok);
+ }
+
+ TX_CREDIT_DEBUG_PRINT(" <HTT> Increase credit %d + %d = %d\n",
+ cdf_atomic_read(&pdev->target_tx_credit),
+ 1, cdf_atomic_read(&pdev->target_tx_credit) + 1);
+
+
+ cdf_atomic_add(1, &pdev->target_tx_credit);
+}
+
+/* WARNING: ol_tx_inspect_handler()'s bahavior is similar to that of
+ ol_tx_completion_handler().
+ * any change in ol_tx_completion_handler() must be mirrored here.
+ */
+void
+ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
+ int num_msdus, void *tx_desc_id_iterator)
+{
+ uint16_t vdev_id, i;
+ struct ol_txrx_vdev_t *vdev;
+ uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
+ uint16_t tx_desc_id;
+ struct ol_tx_desc_t *tx_desc;
+ union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
+ union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
+ cdf_nbuf_t netbuf;
+ ol_tx_desc_list tx_descs;
+ TAILQ_INIT(&tx_descs);
+
+ for (i = 0; i < num_msdus; i++) {
+ tx_desc_id = desc_ids[i];
+ tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ netbuf = tx_desc->netbuf;
+
+ /* find the "vdev" this tx_desc belongs to */
+ vdev_id = HTT_TX_DESC_VDEV_ID_GET(*((uint32_t *)
+ (tx_desc->htt_tx_desc)));
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (vdev->vdev_id == vdev_id)
+ break;
+ }
+
+ /* vdev now points to the vdev for this descriptor. */
+
+#ifndef ATH_11AC_TXCOMPACT
+ /* save this multicast packet to local free list */
+ if (cdf_atomic_dec_and_test(&tx_desc->ref_cnt))
+#endif
+ {
+ /* for this function only, force htt status to be
+ "htt_tx_status_ok"
+ * for graceful freeing of this multicast frame
+ */
+ ol_tx_msdu_complete(pdev, tx_desc, tx_descs, netbuf,
+ lcl_freelist, tx_desc_last,
+ htt_tx_status_ok);
+ }
+ }
+
+ if (lcl_freelist) {
+ cdf_spin_lock(&pdev->tx_mutex);
+ tx_desc_last->next = pdev->tx_desc.freelist;
+ pdev->tx_desc.freelist = lcl_freelist;
+ cdf_spin_unlock(&pdev->tx_mutex);
+ } else {
+ ol_tx_desc_frame_list_free(pdev, &tx_descs,
+ htt_tx_status_discard);
+ }
+ TX_CREDIT_DEBUG_PRINT(" <HTT> Increase HTT credit %d + %d = %d..\n",
+ cdf_atomic_read(&pdev->target_tx_credit),
+ num_msdus,
+ cdf_atomic_read(&pdev->target_tx_credit) +
+ num_msdus);
+
+ OL_TX_TARGET_CREDIT_ADJUST(num_msdus, pdev, NULL);
+}
+
+#ifdef QCA_COMPUTE_TX_DELAY
+
+void ol_tx_set_compute_interval(ol_txrx_pdev_handle pdev, uint32_t interval)
+{
+ pdev->tx_delay.avg_period_ticks = cdf_system_msecs_to_ticks(interval);
+}
+
+void
+ol_tx_packet_count(ol_txrx_pdev_handle pdev,
+ uint16_t *out_packet_count,
+ uint16_t *out_packet_loss_count, int category)
+{
+ *out_packet_count = pdev->packet_count[category];
+ *out_packet_loss_count = pdev->packet_loss_count[category];
+ pdev->packet_count[category] = 0;
+ pdev->packet_loss_count[category] = 0;
+}
+
+uint32_t ol_tx_delay_avg(uint64_t sum, uint32_t num)
+{
+ uint32_t sum32;
+ int shift = 0;
+ /*
+ * To avoid doing a 64-bit divide, shift the sum down until it is
+ * no more than 32 bits (and shift the denominator to match).
+ */
+ while ((sum >> 32) != 0) {
+ sum >>= 1;
+ shift++;
+ }
+ sum32 = (uint32_t) sum;
+ num >>= shift;
+ return (sum32 + (num >> 1)) / num; /* round to nearest */
+}
+
+void
+ol_tx_delay(ol_txrx_pdev_handle pdev,
+ uint32_t *queue_delay_microsec,
+ uint32_t *tx_delay_microsec, int category)
+{
+ int index;
+ uint32_t avg_delay_ticks;
+ struct ol_tx_delay_data *data;
+
+ cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
+
+ cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+ index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
+
+ data = &pdev->tx_delay.cats[category].copies[index];
+
+ if (data->avgs.transmit_num > 0) {
+ avg_delay_ticks =
+ ol_tx_delay_avg(data->avgs.transmit_sum_ticks,
+ data->avgs.transmit_num);
+ *tx_delay_microsec =
+ cdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
+ } else {
+ /*
+ * This case should only happen if there's a query
+ * within 5 sec after the first tx data frame.
+ */
+ *tx_delay_microsec = 0;
+ }
+ if (data->avgs.queue_num > 0) {
+ avg_delay_ticks =
+ ol_tx_delay_avg(data->avgs.queue_sum_ticks,
+ data->avgs.queue_num);
+ *queue_delay_microsec =
+ cdf_system_ticks_to_msecs(avg_delay_ticks * 1000);
+ } else {
+ /*
+ * This case should only happen if there's a query
+ * within 5 sec after the first tx data frame.
+ */
+ *queue_delay_microsec = 0;
+ }
+
+ cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+}
+
+void
+ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
+ uint16_t *report_bin_values, int category)
+{
+ int index, i, j;
+ struct ol_tx_delay_data *data;
+
+ cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
+
+ cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+ index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
+
+ data = &pdev->tx_delay.cats[category].copies[index];
+
+ for (i = 0, j = 0; i < QCA_TX_DELAY_HIST_REPORT_BINS - 1; i++) {
+ uint16_t internal_bin_sum = 0;
+ while (j < (1 << i))
+ internal_bin_sum += data->hist_bins_queue[j++];
+
+ report_bin_values[i] = internal_bin_sum;
+ }
+ report_bin_values[i] = data->hist_bins_queue[j]; /* overflow */
+
+ cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+}
+
+#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
+static inline uint8_t *ol_tx_dest_addr_find(struct ol_txrx_pdev_t *pdev,
+ cdf_nbuf_t tx_nbuf)
+{
+ uint8_t *hdr_ptr;
+ void *datap = cdf_nbuf_data(tx_nbuf);
+
+ if (pdev->frame_format == wlan_frm_fmt_raw) {
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+ hdr_ptr = wh->i_addr1;
+ } else if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ /* adjust hdr_ptr to RA */
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)datap;
+ hdr_ptr = wh->i_addr1;
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ hdr_ptr = datap;
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid standard frame type: %d",
+ pdev->frame_format);
+ cdf_assert(0);
+ hdr_ptr = NULL;
+ }
+ return hdr_ptr;
+}
+
+static uint8_t
+ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
+ cdf_nbuf_t msdu, struct ol_tx_desc_t *tx_desc)
+{
+ uint16_t ethertype;
+ uint8_t *dest_addr, *l3_hdr;
+ int is_mgmt, is_mcast;
+ int l2_hdr_size;
+
+ dest_addr = ol_tx_dest_addr_find(pdev, msdu);
+ if (NULL == dest_addr)
+ return ADF_NBUF_TX_EXT_TID_INVALID;
+
+ is_mcast = IEEE80211_IS_MULTICAST(dest_addr);
+ is_mgmt = tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE;
+ if (is_mgmt) {
+ return (is_mcast) ?
+ OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT :
+ HTT_TX_EXT_TID_MGMT;
+ }
+ if (is_mcast)
+ return OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST;
+
+ if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ struct ethernet_hdr_t *enet_hdr;
+ enet_hdr = (struct ethernet_hdr_t *)cdf_nbuf_data(msdu);
+ l2_hdr_size = sizeof(struct ethernet_hdr_t);
+ ethertype =
+ (enet_hdr->ethertype[0] << 8) | enet_hdr->ethertype[1];
+ if (!IS_ETHERTYPE(ethertype)) {
+ struct llc_snap_hdr_t *llc_hdr;
+ llc_hdr = (struct llc_snap_hdr_t *)
+ (cdf_nbuf_data(msdu) + l2_hdr_size);
+ l2_hdr_size += sizeof(struct llc_snap_hdr_t);
+ ethertype =
+ (llc_hdr->ethertype[0] << 8) | llc_hdr->
+ ethertype[1];
+ }
+ } else {
+ struct llc_snap_hdr_t *llc_hdr;
+ l2_hdr_size = sizeof(struct ieee80211_frame);
+ llc_hdr = (struct llc_snap_hdr_t *)(cdf_nbuf_data(msdu)
+ + l2_hdr_size);
+ l2_hdr_size += sizeof(struct llc_snap_hdr_t);
+ ethertype =
+ (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
+ }
+ l3_hdr = cdf_nbuf_data(msdu) + l2_hdr_size;
+ if (ETHERTYPE_IPV4 == ethertype) {
+ return (((struct ipv4_hdr_t *)l3_hdr)->tos >> 5) & 0x7;
+ } else if (ETHERTYPE_IPV6 == ethertype) {
+ return (ipv6_traffic_class((struct ipv6_hdr_t *)l3_hdr) >> 5) &
+ 0x7;
+ } else {
+ return ADF_NBUF_TX_EXT_TID_INVALID;
+ }
+}
+#endif
+
+static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
+{
+#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
+ struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
+ uint8_t tid;
+
+ cdf_nbuf_t msdu = tx_desc->netbuf;
+ tid = cdf_nbuf_get_tid(msdu);
+ if (tid == ADF_NBUF_TX_EXT_TID_INVALID) {
+ tid = ol_tx_delay_tid_from_l3_hdr(pdev, msdu, tx_desc);
+ if (tid == ADF_NBUF_TX_EXT_TID_INVALID) {
+ /* TID could not be determined
+ (this is not an IP frame?) */
+ return -EINVAL;
+ }
+ }
+ return tid;
+#else
+ return 0;
+#endif
+}
+
+static inline int
+ol_tx_delay_hist_bin(struct ol_txrx_pdev_t *pdev, uint32_t delay_ticks)
+{
+ int bin;
+ /*
+ * For speed, multiply and shift to approximate a divide. This causes
+ * a small error, but the approximation error should be much less
+ * than the other uncertainties in the tx delay computation.
+ */
+ bin = (delay_ticks * pdev->tx_delay.hist_internal_bin_width_mult) >>
+ pdev->tx_delay.hist_internal_bin_width_shift;
+ if (bin >= QCA_TX_DELAY_HIST_INTERNAL_BINS)
+ bin = QCA_TX_DELAY_HIST_INTERNAL_BINS - 1;
+
+ return bin;
+}
+
+static void
+ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
+ enum htt_tx_status status,
+ uint16_t *desc_ids, int num_msdus)
+{
+ int i, index, cat;
+ uint32_t now_ticks = cdf_system_ticks();
+ uint32_t tx_delay_transmit_ticks, tx_delay_queue_ticks;
+ uint32_t avg_time_ticks;
+ struct ol_tx_delay_data *data;
+
+ cdf_assert(num_msdus > 0);
+
+ /*
+ * keep static counters for total packet and lost packets
+ * reset them in ol_tx_delay(), function used to fetch the stats
+ */
+
+ cat = ol_tx_delay_category(pdev, desc_ids[0]);
+ if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
+ return;
+
+ pdev->packet_count[cat] = pdev->packet_count[cat] + num_msdus;
+ if (status != htt_tx_status_ok) {
+ for (i = 0; i < num_msdus; i++) {
+ cat = ol_tx_delay_category(pdev, desc_ids[i]);
+ if (cat < 0 || cat >= QCA_TX_DELAY_NUM_CATEGORIES)
+ return;
+ pdev->packet_loss_count[cat]++;
+ }
+ return;
+ }
+
+ /* since we may switch the ping-pong index, provide mutex w. readers */
+ cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+ index = pdev->tx_delay.cats[cat].in_progress_idx;
+
+ data = &pdev->tx_delay.cats[cat].copies[index];
+
+ if (pdev->tx_delay.tx_compl_timestamp_ticks != 0) {
+ tx_delay_transmit_ticks =
+ now_ticks - pdev->tx_delay.tx_compl_timestamp_ticks;
+ /*
+ * We'd like to account for the number of MSDUs that were
+ * transmitted together, but we don't know this. All we know
+ * is the number of MSDUs that were acked together.
+ * Since the frame error rate is small, this is nearly the same
+ * as the number of frames transmitted together.
+ */
+ data->avgs.transmit_sum_ticks += tx_delay_transmit_ticks;
+ data->avgs.transmit_num += num_msdus;
+ }
+ pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
+
+ for (i = 0; i < num_msdus; i++) {
+ int bin;
+ uint16_t id = desc_ids[i];
+ struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
+
+ tx_delay_queue_ticks =
+ now_ticks - tx_desc->entry_timestamp_ticks;
+
+ data->avgs.queue_sum_ticks += tx_delay_queue_ticks;
+ data->avgs.queue_num++;
+ bin = ol_tx_delay_hist_bin(pdev, tx_delay_queue_ticks);
+ data->hist_bins_queue[bin]++;
+ }
+
+ /* check if it's time to start a new average */
+ avg_time_ticks =
+ now_ticks - pdev->tx_delay.cats[cat].avg_start_time_ticks;
+ if (avg_time_ticks > pdev->tx_delay.avg_period_ticks) {
+ pdev->tx_delay.cats[cat].avg_start_time_ticks = now_ticks;
+ index = 1 - index;
+ pdev->tx_delay.cats[cat].in_progress_idx = index;
+ cdf_mem_zero(&pdev->tx_delay.cats[cat].copies[index],
+ sizeof(pdev->tx_delay.cats[cat].copies[index]));
+ }
+
+ cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+}
+
+#endif /* QCA_COMPUTE_TX_DELAY */
diff --git a/dp/txrx/ol_tx_send.h b/dp/txrx/ol_tx_send.h
new file mode 100644
index 000000000000..db4881202350
--- /dev/null
+++ b/dp/txrx/ol_tx_send.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2011, 2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_tx_send.h
+ * @brief API definitions for the tx sendriptor module within the data SW.
+ */
+#ifndef _OL_TX_SEND__H_
+#define _OL_TX_SEND__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_tx_send_t */
+
+/**
+ * @flush the ol tx when surprise remove.
+ *
+ */
+void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev);
+
+/**
+ * @brief Send a tx frame to the target.
+ * @details
+ *
+ * @param pdev - the phy dev
+ * @param vdev - the virtual device sending the data
+ * (for specifying the transmitter address for multicast / broadcast data)
+ * @param netbuf - the tx frame
+ */
+void
+ol_tx_send(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc, cdf_nbuf_t msdu);
+
+/**
+ * @brief Send a tx batch download to the target.
+ * @details
+ * This function is different from above in that
+ * it accepts a list of msdu's to be downloaded as a batch
+ *
+ * @param pdev - the phy dev
+ * @param msdu_list - the Head pointer to the Tx Batch
+ * @param num_msdus - Total msdus chained in msdu_list
+ */
+
+int
+ol_tx_send_batch(struct ol_txrx_pdev_t *pdev,
+ cdf_nbuf_t msdu_list, int num_msdus);
+
+/**
+ * @brief Send a tx frame with a non-std header or payload type to the target.
+ * @details
+ *
+ * @param pdev - the phy dev
+ * @param vdev - the virtual device sending the data
+ * (for specifying the transmitter address for multicast / broadcast data)
+ * @param netbuf - the tx frame
+ * @param pkt_type - what kind of non-std frame is being sent
+ */
+void
+ol_tx_send_nonstd(struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, enum htt_pkt_type pkt_type);
+#endif /* _OL_TX_SEND__H_ */
diff --git a/dp/txrx/ol_txrx.c b/dp/txrx/ol_txrx.c
new file mode 100644
index 000000000000..f1aa269bccee
--- /dev/null
+++ b/dp/txrx/ol_txrx.c
@@ -0,0 +1,3398 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*=== includes ===*/
+/* header files for OS primitives */
+#include <osdep.h> /* uint32_t, etc. */
+#include <cdf_memory.h> /* cdf_mem_malloc,free */
+#include <cdf_types.h> /* cdf_device_t, cdf_print */
+#include <cdf_lock.h> /* cdf_spinlock */
+#include <cdf_atomic.h> /* cdf_atomic_read */
+
+/* Required for WLAN_FEATURE_FASTPATH */
+#include <ce_api.h>
+/* header files for utilities */
+#include <cds_queue.h> /* TAILQ */
+
+/* header files for configuration API */
+#include <ol_cfg.h> /* ol_cfg_is_high_latency */
+#include <ol_if_athvar.h>
+
+/* header files for HTT API */
+#include <ol_htt_api.h>
+#include <ol_htt_tx_api.h>
+
+/* header files for OS shim API */
+#include <ol_osif_api.h>
+
+/* header files for our own APIs */
+#include <ol_txrx_api.h>
+#include <ol_txrx_dbg.h>
+#include <ol_txrx_ctrl_api.h>
+#include <ol_txrx_osif_api.h>
+/* header files for our internal definitions */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT, etc. */
+#include <wdi_event.h> /* WDI events */
+#include <ol_txrx_types.h> /* ol_txrx_pdev_t, etc. */
+#include <ol_ctrl_txrx_api.h>
+#include <ol_tx.h> /* ol_tx_ll */
+#include <ol_rx.h> /* ol_rx_deliver */
+#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
+#include <ol_rx_pn.h> /* ol_rx_pn_check, etc. */
+#include <ol_rx_fwd.h> /* ol_rx_fwd_check, etc. */
+#include <ol_rx_reorder_timeout.h> /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
+#include <ol_rx_reorder.h>
+#include <ol_tx_send.h> /* ol_tx_discard_target_frms */
+#include <ol_tx_desc.h> /* ol_tx_desc_frame_free */
+#include <ol_tx_queue.h>
+#include <ol_txrx.h>
+#include "wma.h"
+
+
+
+/*=== function definitions ===*/
+
+/**
+ * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
+ * wmi is enabled or not.
+ * @value: 1 for enabled/ 0 for disable
+ *
+ * Return: None
+ */
+void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ cdf_print("%s: pdev is NULL\n", __func__);
+ return;
+ }
+ pdev->is_mgmt_over_wmi_enabled = value;
+ return;
+}
+
+/**
+ * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
+ *
+ * Return: is_mgmt_over_wmi_enabled
+ */
+uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ cdf_print("%s: pdev is NULL\n", __func__);
+ return 0;
+ }
+ return pdev->is_mgmt_over_wmi_enabled;
+}
+
+
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ol_txrx_peer_handle
+ol_txrx_find_peer_by_addr_and_vdev(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev,
+ uint8_t *peer_addr, uint8_t *peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+
+ peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
+ if (!peer)
+ return NULL;
+ *peer_id = peer->local_id;
+ cdf_atomic_dec(&peer->ref_cnt);
+ return peer;
+}
+
+CDF_STATUS ol_txrx_get_vdevid(struct ol_txrx_peer_t *peer, uint8_t *vdev_id)
+{
+ if (!peer) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "peer argument is null!!");
+ return CDF_STATUS_E_FAILURE;
+ }
+
+ *vdev_id = peer->vdev->vdev_id;
+ return CDF_STATUS_SUCCESS;
+}
+
+void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
+{
+ struct ol_txrx_peer_t *peer = NULL;
+ ol_txrx_pdev_handle pdev = NULL;
+
+ if (sta_id >= WLAN_MAX_STA_COUNT) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid sta id passed");
+ return NULL;
+ }
+
+ pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "PDEV not found for sta_id [%d]", sta_id);
+ return NULL;
+ }
+
+ peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+ if (!peer) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "PEER [%d] not found", sta_id);
+ return NULL;
+ }
+
+ return peer->vdev;
+}
+
+ol_txrx_peer_handle ol_txrx_find_peer_by_addr(ol_txrx_pdev_handle pdev,
+ uint8_t *peer_addr,
+ uint8_t *peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+
+ peer = ol_txrx_peer_find_hash_find(pdev, peer_addr, 0, 1);
+ if (!peer)
+ return NULL;
+ *peer_id = peer->local_id;
+ cdf_atomic_dec(&peer->ref_cnt);
+ return peer;
+}
+
+uint16_t ol_txrx_local_peer_id(ol_txrx_peer_handle peer)
+{
+ return peer->local_id;
+}
+
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id(struct ol_txrx_pdev_t *pdev,
+ uint8_t local_peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+ if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
+ (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
+ return NULL;
+ }
+
+ cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+ peer = pdev->local_peer_ids.map[local_peer_id];
+ cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+ return peer;
+}
+
+static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
+{
+ int i;
+
+ /* point the freelist to the first ID */
+ pdev->local_peer_ids.freelist = 0;
+
+ /* link each ID to the next one */
+ for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
+ pdev->local_peer_ids.pool[i] = i + 1;
+ pdev->local_peer_ids.map[i] = NULL;
+ }
+
+ /* link the last ID to itself, to mark the end of the list */
+ i = OL_TXRX_NUM_LOCAL_PEER_IDS;
+ pdev->local_peer_ids.pool[i] = i;
+
+ cdf_spinlock_init(&pdev->local_peer_ids.lock);
+}
+
+static void
+ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int i;
+
+ cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+ i = pdev->local_peer_ids.freelist;
+ if (pdev->local_peer_ids.pool[i] == i) {
+ /* the list is empty, except for the list-end marker */
+ peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
+ } else {
+ /* take the head ID and advance the freelist */
+ peer->local_id = i;
+ pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
+ pdev->local_peer_ids.map[i] = peer;
+ }
+ cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+}
+
+static void
+ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ int i = peer->local_id;
+ if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
+ (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
+ return;
+ }
+ /* put this ID on the head of the freelist */
+ cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+ pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
+ pdev->local_peer_ids.freelist = i;
+ pdev->local_peer_ids.map[i] = NULL;
+ cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+}
+
+static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
+{
+ cdf_spinlock_destroy(&pdev->local_peer_ids.lock);
+}
+
+#else
+#define ol_txrx_local_peer_id_pool_init(pdev) /* no-op */
+#define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
+#define ol_txrx_local_peer_id_free(pdev, peer) /* no-op */
+#define ol_txrx_local_peer_id_cleanup(pdev) /* no-op */
+#endif
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * setup_fastpath_ce_handles() Update pdev with ce_handle for fastpath use.
+ *
+ * @osc: pointer to HIF context
+ * @pdev: pointer to ol pdev
+ *
+ * Return: void
+ */
+static inline void
+setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
+{
+ /*
+ * Before the HTT attach, set up the CE handles
+ * CE handles are (struct CE_state *)
+ * This is only required in the fast path
+ */
+ pdev->ce_tx_hdl = (struct CE_handle *)
+ osc->ce_id_to_state[CE_HTT_H2T_MSG];
+
+}
+
+#else /* not WLAN_FEATURE_FASTPATH */
+static inline void
+setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
+{
+}
+#endif /* WLAN_FEATURE_FASTPATH */
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_tx_set_desc_global_pool_size() - set global pool size
+ * @num_msdu_desc: total number of descriptors
+ *
+ * Return: none
+ */
+void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ cdf_print("%s: pdev is NULL\n", __func__);
+ return;
+ }
+ pdev->num_msdu_desc = num_msdu_desc;
+ if (!ol_tx_get_is_mgmt_over_wmi_enabled())
+ pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global pool size: %d\n",
+ pdev->num_msdu_desc);
+ return;
+}
+
+/**
+ * ol_tx_get_desc_global_pool_size() - get global pool size
+ * @pdev: pdev handle
+ *
+ * Return: global pool size
+ */
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+ return pdev->num_msdu_desc;
+}
+
+/**
+ * ol_tx_get_total_free_desc() - get total free descriptors
+ * @pdev: pdev handle
+ *
+ * Return: total free descriptors
+ */
+static inline
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
+{
+ struct ol_tx_flow_pool_t *pool = NULL;
+ uint32_t free_desc;
+
+ free_desc = pdev->tx_desc.num_free;
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
+ flow_pool_list_elem) {
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ free_desc += pool->avail_desc;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ }
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ return free_desc;
+}
+
+#else
+/**
+ * ol_tx_get_desc_global_pool_size() - get global pool size
+ * @pdev: pdev handle
+ *
+ * Return: global pool size
+ */
+static inline
+uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
+{
+ return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
+}
+
+/**
+ * ol_tx_get_total_free_desc() - get total free descriptors
+ * @pdev: pdev handle
+ *
+ * Return: total free descriptors
+ */
+static inline
+uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
+{
+ return pdev->tx_desc.num_free;
+}
+
+#endif
+
+/**
+ * ol_txrx_pdev_alloc() - allocate txrx pdev
+ * @ctrl_pdev: cfg pdev
+ * @htc_pdev: HTC pdev
+ * @osdev: os dev
+ *
+ * Return: txrx pdev handle
+ * NULL for failure
+ */
+ol_txrx_pdev_handle
+ol_txrx_pdev_alloc(ol_pdev_handle ctrl_pdev,
+ HTC_HANDLE htc_pdev, cdf_device_t osdev)
+{
+ struct ol_txrx_pdev_t *pdev;
+ int i;
+
+ pdev = cdf_mem_malloc(sizeof(*pdev));
+ if (!pdev)
+ goto fail0;
+ cdf_mem_zero(pdev, sizeof(*pdev));
+
+ pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(ctrl_pdev);
+
+ /* store provided params */
+ pdev->ctrl_pdev = ctrl_pdev;
+ pdev->osdev = osdev;
+
+ for (i = 0; i < htt_num_sec_types; i++)
+ pdev->sec_types[i] = (enum ol_sec_type)i;
+
+ TXRX_STATS_INIT(pdev);
+
+ TAILQ_INIT(&pdev->vdev_list);
+
+ /* do initial set up of the peer ID -> peer object lookup map */
+ if (ol_txrx_peer_find_attach(pdev))
+ goto fail1;
+
+ pdev->htt_pdev =
+ htt_pdev_alloc(pdev, ctrl_pdev, htc_pdev, osdev);
+ if (!pdev->htt_pdev)
+ goto fail2;
+
+ return pdev;
+
+fail2:
+ ol_txrx_peer_find_detach(pdev);
+
+fail1:
+ cdf_mem_free(pdev);
+
+fail0:
+ return NULL;
+}
+
+/**
+ * ol_txrx_pdev_attach() - attach txrx pdev
+ * @pdev: txrx pdev
+ *
+ * Return: 0 for success
+ */
+int
+ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
+{
+ uint16_t i;
+ uint16_t fail_idx = 0;
+ int ret = 0;
+ uint16_t desc_pool_size;
+ struct ol_softc *osc = cds_get_context(CDF_MODULE_ID_HIF);
+
+ uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
+ union ol_tx_desc_list_elem_t *c_element;
+ unsigned int sig_bit;
+ uint16_t desc_per_page;
+
+ if (!osc) {
+ ret = -EINVAL;
+ goto ol_attach_fail;
+ }
+
+ /*
+ * For LL, limit the number of host's tx descriptors to match
+ * the number of target FW tx descriptors.
+ * This simplifies the FW, by ensuring the host will never
+ * download more tx descriptors than the target has space for.
+ * The FW will drop/free low-priority tx descriptors when it
+ * starts to run low, so that in theory the host should never
+ * run out of tx descriptors.
+ */
+
+ /* initialize the counter of the target's tx buffer availability */
+ cdf_atomic_init(&pdev->target_tx_credit);
+ cdf_atomic_init(&pdev->orig_target_tx_credit);
+ /*
+ * LL - initialize the target credit outselves.
+ * HL - wait for a HTT target credit initialization during htt_attach.
+ */
+
+ cdf_atomic_add(ol_cfg_target_tx_credit(pdev->ctrl_pdev),
+ &pdev->target_tx_credit);
+
+ desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
+
+ setup_fastpath_ce_handles(osc, pdev);
+
+ ret = htt_attach(pdev->htt_pdev, desc_pool_size);
+ if (ret)
+ goto ol_attach_fail;
+
+ /* Update CE's pkt download length */
+ ce_pkt_dl_len_set((void *)osc, htt_pkt_dl_len_get(pdev->htt_pdev));
+
+ /* Attach micro controller data path offload resource */
+ if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+ if (htt_ipa_uc_attach(pdev->htt_pdev))
+ goto uc_attach_fail;
+
+ /* Calculate single element reserved size power of 2 */
+ pdev->tx_desc.desc_reserved_size = cdf_get_pwr2(desc_element_size);
+ cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
+ pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
+ if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
+ (NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Page alloc fail");
+ goto page_alloc_fail;
+ }
+ desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
+ pdev->tx_desc.offset_filter = desc_per_page - 1;
+ /* Calculate page divider to find page number */
+ sig_bit = 0;
+ while (desc_per_page) {
+ sig_bit++;
+ desc_per_page = desc_per_page >> 1;
+ }
+ pdev->tx_desc.page_divider = (sig_bit - 1);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
+ pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
+ desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
+ pdev->tx_desc.desc_pages.num_element_per_page);
+
+ /*
+ * Each SW tx desc (used only within the tx datapath SW) has a
+ * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
+ * Go ahead and allocate the HTT tx desc and link it with the SW tx
+ * desc now, to avoid doing it during time-critical transmit.
+ */
+ pdev->tx_desc.pool_size = desc_pool_size;
+ pdev->tx_desc.freelist =
+ (union ol_tx_desc_list_elem_t *)
+ (*pdev->tx_desc.desc_pages.cacheable_pages);
+ c_element = pdev->tx_desc.freelist;
+ for (i = 0; i < desc_pool_size; i++) {
+ void *htt_tx_desc;
+ void *htt_frag_desc = NULL;
+ uint32_t frag_paddr_lo = 0;
+ uint32_t paddr_lo;
+
+ if (i == (desc_pool_size - 1))
+ c_element->next = NULL;
+ else
+ c_element->next = (union ol_tx_desc_list_elem_t *)
+ ol_tx_desc_find(pdev, i + 1);
+
+ htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo, i);
+ if (!htt_tx_desc) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
+ "%s: failed to alloc HTT tx desc (%d of %d)",
+ __func__, i, desc_pool_size);
+ fail_idx = i;
+ goto desc_alloc_fail;
+ }
+
+ c_element->tx_desc.htt_tx_desc = htt_tx_desc;
+ c_element->tx_desc.htt_tx_desc_paddr = paddr_lo;
+ ret = htt_tx_frag_alloc(pdev->htt_pdev,
+ i, &frag_paddr_lo, &htt_frag_desc);
+ if (ret) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: failed to alloc HTT frag dsc (%d/%d)",
+ __func__, i, desc_pool_size);
+ /* Is there a leak here, is this handling correct? */
+ fail_idx = i;
+ goto desc_alloc_fail;
+ }
+ if (!ret && htt_frag_desc) {
+ /* Initialize the first 6 words (TSO flags)
+ of the frag descriptor */
+ memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
+ c_element->tx_desc.htt_frag_desc = htt_frag_desc;
+ c_element->tx_desc.htt_frag_desc_paddr = frag_paddr_lo;
+ }
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%s:%d - %d FRAG VA 0x%p FRAG PA 0x%x",
+ __func__, __LINE__, i,
+ c_element->tx_desc.htt_frag_desc,
+ c_element->tx_desc.htt_frag_desc_paddr);
+#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
+ c_element->tx_desc.pkt_type = 0xff;
+#ifdef QCA_COMPUTE_TX_DELAY
+ c_element->tx_desc.entry_timestamp_ticks =
+ 0xffffffff;
+#endif
+#endif
+ c_element->tx_desc.id = i;
+ cdf_atomic_init(&c_element->tx_desc.ref_cnt);
+ c_element = c_element->next;
+ fail_idx = i;
+ }
+
+ /* link SW tx descs into a freelist */
+ pdev->tx_desc.num_free = desc_pool_size;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
+ (uint32_t *) pdev->tx_desc.freelist,
+ (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
+
+ /* check what format of frames are expected to be delivered by the OS */
+ pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi)
+ pdev->htt_pkt_type = htt_pkt_type_native_wifi;
+ else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
+ pdev->htt_pkt_type = htt_pkt_type_eth2;
+ else
+ pdev->htt_pkt_type = htt_pkt_type_ethernet;
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s Invalid standard frame type: %d",
+ __func__, pdev->frame_format);
+ goto control_init_fail;
+ }
+
+ /* setup the global rx defrag waitlist */
+ TAILQ_INIT(&pdev->rx.defrag.waitlist);
+
+ /* configure where defrag timeout and duplicate detection is handled */
+ pdev->rx.flags.defrag_timeout_check =
+ pdev->rx.flags.dup_check =
+ ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ /* Need to revisit this part. Currently,hardcode to riva's caps */
+ pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
+ pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
+ /*
+ * The Riva HW de-aggregate doesn't have capability to generate 802.11
+ * header for non-first subframe of A-MSDU.
+ */
+ pdev->sw_subfrm_hdr_recovery_enable = 1;
+ /*
+ * The Riva HW doesn't have the capability to set Protected Frame bit
+ * in the MAC header for encrypted data frame.
+ */
+ pdev->sw_pf_proc_enable = 1;
+
+ if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ /* sw llc process is only needed in
+ 802.3 to 802.11 transform case */
+ pdev->sw_tx_llc_proc_enable = 1;
+ pdev->sw_rx_llc_proc_enable = 1;
+ } else {
+ pdev->sw_tx_llc_proc_enable = 0;
+ pdev->sw_rx_llc_proc_enable = 0;
+ }
+
+ switch (pdev->frame_format) {
+ case wlan_frm_fmt_raw:
+ pdev->sw_tx_encap =
+ pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
+ ? 0 : 1;
+ pdev->sw_rx_decap =
+ pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
+ ? 0 : 1;
+ break;
+ case wlan_frm_fmt_native_wifi:
+ pdev->sw_tx_encap =
+ pdev->
+ target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
+ ? 0 : 1;
+ pdev->sw_rx_decap =
+ pdev->
+ target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
+ ? 0 : 1;
+ break;
+ case wlan_frm_fmt_802_3:
+ pdev->sw_tx_encap =
+ pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
+ ? 0 : 1;
+ pdev->sw_rx_decap =
+ pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
+ ? 0 : 1;
+ break;
+ default:
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
+ pdev->frame_format,
+ pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
+ goto control_init_fail;
+ }
+#endif
+
+ /*
+ * Determine what rx processing steps are done within the host.
+ * Possibilities:
+ * 1. Nothing - rx->tx forwarding and rx PN entirely within target.
+ * (This is unlikely; even if the target is doing rx->tx forwarding,
+ * the host should be doing rx->tx forwarding too, as a back up for
+ * the target's rx->tx forwarding, in case the target runs short on
+ * memory, and can't store rx->tx frames that are waiting for
+ * missing prior rx frames to arrive.)
+ * 2. Just rx -> tx forwarding.
+ * This is the typical configuration for HL, and a likely
+ * configuration for LL STA or small APs (e.g. retail APs).
+ * 3. Both PN check and rx -> tx forwarding.
+ * This is the typical configuration for large LL APs.
+ * Host-side PN check without rx->tx forwarding is not a valid
+ * configuration, since the PN check needs to be done prior to
+ * the rx->tx forwarding.
+ */
+ if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
+ /* PN check, rx-tx forwarding and rx reorder is done by
+ the target */
+ if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
+ pdev->rx_opt_proc = ol_rx_in_order_deliver;
+ else
+ pdev->rx_opt_proc = ol_rx_fwd_check;
+ } else {
+ if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
+ if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
+ /*
+ * PN check done on host,
+ * rx->tx forwarding not done at all.
+ */
+ pdev->rx_opt_proc = ol_rx_pn_check_only;
+ } else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
+ /*
+ * Both PN check and rx->tx forwarding done
+ * on host.
+ */
+ pdev->rx_opt_proc = ol_rx_pn_check;
+ } else {
+#define TRACESTR01 "invalid config: if rx PN check is on the host,"\
+"rx->tx forwarding check needs to also be on the host"
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_ERROR,
+ "%s: %s", __func__, TRACESTR01);
+#undef TRACESTR01
+ goto control_init_fail;
+ }
+ } else {
+ /* PN check done on target */
+ if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
+ ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
+ /*
+ * rx->tx forwarding done on host (possibly as
+ * back-up for target-side primary rx->tx
+ * forwarding)
+ */
+ pdev->rx_opt_proc = ol_rx_fwd_check;
+ } else {
+ /* rx->tx forwarding either done in target,
+ * or not done at all */
+ pdev->rx_opt_proc = ol_rx_deliver;
+ }
+ }
+ }
+
+ /* initialize mutexes for tx desc alloc and peer lookup */
+ cdf_spinlock_init(&pdev->tx_mutex);
+ cdf_spinlock_init(&pdev->peer_ref_mutex);
+ cdf_spinlock_init(&pdev->rx.mutex);
+ cdf_spinlock_init(&pdev->last_real_peer_mutex);
+ OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
+
+ if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
+ goto reorder_trace_attach_fail;
+
+ if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
+ goto pn_trace_attach_fail;
+
+#ifdef PERE_IP_HDR_ALIGNMENT_WAR
+ pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
+#endif
+
+ /*
+ * WDI event attach
+ */
+ wdi_event_attach(pdev);
+
+ /*
+ * Initialize rx PN check characteristics for different security types.
+ */
+ cdf_mem_set(&pdev->rx_pn[0], sizeof(pdev->rx_pn), 0);
+
+ /* TKIP: 48-bit TSC, CCMP: 48-bit PN */
+ pdev->rx_pn[htt_sec_type_tkip].len =
+ pdev->rx_pn[htt_sec_type_tkip_nomic].len =
+ pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
+ pdev->rx_pn[htt_sec_type_tkip].cmp =
+ pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
+ pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
+
+ /* WAPI: 128-bit PN */
+ pdev->rx_pn[htt_sec_type_wapi].len = 128;
+ pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
+
+ OL_RX_REORDER_TIMEOUT_INIT(pdev);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Created pdev %p\n", pdev);
+
+ pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
+
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+#define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
+
+/* #if 1 -- TODO: clean this up */
+#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT \
+ /* avg = 100% * new + 0% * old */ \
+ (1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
+/*
+#else
+#define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
+ //avg = 25% * new + 25% * old
+ (1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
+#endif
+*/
+ pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
+ pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
+#endif
+
+ ol_txrx_local_peer_id_pool_init(pdev);
+
+ pdev->cfg.ll_pause_txq_limit =
+ ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
+
+#ifdef QCA_COMPUTE_TX_DELAY
+ cdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
+ cdf_spinlock_init(&pdev->tx_delay.mutex);
+
+ /* initialize compute interval with 5 seconds (ESE default) */
+ pdev->tx_delay.avg_period_ticks = cdf_system_msecs_to_ticks(5000);
+ {
+ uint32_t bin_width_1000ticks;
+ bin_width_1000ticks =
+ cdf_system_msecs_to_ticks
+ (QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
+ * 1000);
+ /*
+ * Compute a factor and shift that together are equal to the
+ * inverse of the bin_width time, so that rather than dividing
+ * by the bin width time, approximately the same result can be
+ * obtained much more efficiently by a multiply + shift.
+ * multiply_factor >> shift = 1 / bin_width_time, so
+ * multiply_factor = (1 << shift) / bin_width_time.
+ *
+ * Pick the shift semi-arbitrarily.
+ * If we knew statically what the bin_width would be, we could
+ * choose a shift that minimizes the error.
+ * Since the bin_width is determined dynamically, simply use a
+ * shift that is about half of the uint32_t size. This should
+ * result in a relatively large multiplier value, which
+ * minimizes error from rounding the multiplier to an integer.
+ * The rounding error only becomes significant if the tick units
+ * are on the order of 1 microsecond. In most systems, it is
+ * expected that the tick units will be relatively low-res,
+ * on the order of 1 millisecond. In such systems the rounding
+ * error is negligible.
+ * It would be more accurate to dynamically try out different
+ * shifts and choose the one that results in the smallest
+ * rounding error, but that extra level of fidelity is
+ * not needed.
+ */
+ pdev->tx_delay.hist_internal_bin_width_shift = 16;
+ pdev->tx_delay.hist_internal_bin_width_mult =
+ ((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
+ 1000 + (bin_width_1000ticks >> 1)) /
+ bin_width_1000ticks;
+ }
+#endif /* QCA_COMPUTE_TX_DELAY */
+
+ /* Thermal Mitigation */
+ ol_tx_throttle_init(pdev);
+ ol_tso_seg_list_init(pdev, desc_pool_size);
+ ol_tx_register_flow_control(pdev);
+
+ return 0; /* success */
+
+pn_trace_attach_fail:
+ OL_RX_REORDER_TRACE_DETACH(pdev);
+
+reorder_trace_attach_fail:
+ cdf_spinlock_destroy(&pdev->tx_mutex);
+ cdf_spinlock_destroy(&pdev->peer_ref_mutex);
+ cdf_spinlock_destroy(&pdev->rx.mutex);
+ cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
+ OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
+
+control_init_fail:
+desc_alloc_fail:
+ for (i = 0; i < fail_idx; i++)
+ htt_tx_desc_free(pdev->htt_pdev,
+ (ol_tx_desc_find(pdev, i))->htt_tx_desc);
+
+ cdf_mem_multi_pages_free(pdev->osdev,
+ &pdev->tx_desc.desc_pages, 0, true);
+
+page_alloc_fail:
+ if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+ htt_ipa_uc_detach(pdev->htt_pdev);
+uc_attach_fail:
+ htt_detach(pdev->htt_pdev);
+
+ol_attach_fail:
+ return ret; /* fail */
+}
+
+A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
+{
+ return htt_attach_target(pdev->htt_pdev);
+}
+
+void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
+{
+ int i;
+
+ /*checking to ensure txrx pdev structure is not NULL */
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
+ return;
+ }
+ /* preconditions */
+ TXRX_ASSERT2(pdev);
+
+ /* check that the pdev has no vdevs allocated */
+ TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
+
+ OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
+
+#ifdef QCA_SUPPORT_TX_THROTTLE
+ /* Thermal Mitigation */
+ cdf_softirq_timer_cancel(&pdev->tx_throttle.phase_timer);
+ cdf_softirq_timer_free(&pdev->tx_throttle.phase_timer);
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+ cdf_softirq_timer_cancel(&pdev->tx_throttle.tx_timer);
+ cdf_softirq_timer_free(&pdev->tx_throttle.tx_timer);
+#endif
+#endif
+ ol_tso_seg_list_deinit(pdev);
+ ol_tx_deregister_flow_control(pdev);
+
+ if (force) {
+ /*
+ * The assertion above confirms that all vdevs within this pdev
+ * were detached. However, they may not have actually been
+ * deleted.
+ * If the vdev had peers which never received a PEER_UNMAP msg
+ * from the target, then there are still zombie peer objects,
+ * and the vdev parents of the zombie peers are also zombies,
+ * hanging around until their final peer gets deleted.
+ * Go through the peer hash table and delete any peers left.
+ * As a side effect, this will complete the deletion of any
+ * vdevs that are waiting for their peers to finish deletion.
+ */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Force delete for pdev %p\n",
+ pdev);
+ ol_txrx_peer_find_hash_erase(pdev);
+ }
+
+ /* Stop the communication between HTT and target at first */
+ htt_detach_target(pdev->htt_pdev);
+
+ for (i = 0; i < pdev->tx_desc.pool_size; i++) {
+ void *htt_tx_desc;
+ struct ol_tx_desc_t *tx_desc;
+
+ tx_desc = ol_tx_desc_find(pdev, i);
+ /*
+ * Confirm that each tx descriptor is "empty", i.e. it has
+ * no tx frame attached.
+ * In particular, check that there are no frames that have
+ * been given to the target to transmit, for which the
+ * target has never provided a response.
+ */
+ if (cdf_atomic_read(&tx_desc->ref_cnt)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
+ "Warning: freeing tx frame (no compltn)\n");
+ ol_tx_desc_frame_free_nonstd(pdev,
+ tx_desc, 1);
+ }
+ htt_tx_desc = tx_desc->htt_tx_desc;
+ htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
+ }
+
+ cdf_mem_multi_pages_free(pdev->osdev,
+ &pdev->tx_desc.desc_pages, 0, true);
+ pdev->tx_desc.freelist = NULL;
+
+ /* Detach micro controller data path offload resource */
+ if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+ htt_ipa_uc_detach(pdev->htt_pdev);
+
+ htt_detach(pdev->htt_pdev);
+ htt_pdev_free(pdev->htt_pdev);
+
+ ol_txrx_peer_find_detach(pdev);
+
+ cdf_spinlock_destroy(&pdev->tx_mutex);
+ cdf_spinlock_destroy(&pdev->peer_ref_mutex);
+ cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
+ cdf_spinlock_destroy(&pdev->rx.mutex);
+#ifdef QCA_SUPPORT_TX_THROTTLE
+ /* Thermal Mitigation */
+ cdf_spinlock_destroy(&pdev->tx_throttle.mutex);
+#endif
+ OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
+
+ OL_RX_REORDER_TRACE_DETACH(pdev);
+ OL_RX_PN_TRACE_DETACH(pdev);
+ /*
+ * WDI event detach
+ */
+ wdi_event_detach(pdev);
+ ol_txrx_local_peer_id_cleanup(pdev);
+
+#ifdef QCA_COMPUTE_TX_DELAY
+ cdf_spinlock_destroy(&pdev->tx_delay.mutex);
+#endif
+}
+
+ol_txrx_vdev_handle
+ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
+ uint8_t *vdev_mac_addr,
+ uint8_t vdev_id, enum wlan_op_mode op_mode)
+{
+ struct ol_txrx_vdev_t *vdev;
+
+ /* preconditions */
+ TXRX_ASSERT2(pdev);
+ TXRX_ASSERT2(vdev_mac_addr);
+
+ vdev = cdf_mem_malloc(sizeof(*vdev));
+ if (!vdev)
+ return NULL; /* failure */
+
+ /* store provided params */
+ vdev->pdev = pdev;
+ vdev->vdev_id = vdev_id;
+ vdev->opmode = op_mode;
+
+ vdev->delete.pending = 0;
+ vdev->safemode = 0;
+ vdev->drop_unenc = 1;
+ vdev->num_filters = 0;
+
+ cdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
+ OL_TXRX_MAC_ADDR_LEN);
+
+ TAILQ_INIT(&vdev->peer_list);
+ vdev->last_real_peer = NULL;
+
+#ifdef QCA_IBSS_SUPPORT
+ vdev->ibss_peer_num = 0;
+ vdev->ibss_peer_heart_beat_timer = 0;
+#endif
+
+ cdf_spinlock_init(&vdev->ll_pause.mutex);
+ vdev->ll_pause.paused_reason = 0;
+ vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
+ vdev->ll_pause.txq.depth = 0;
+ cdf_softirq_timer_init(pdev->osdev,
+ &vdev->ll_pause.timer,
+ ol_tx_vdev_ll_pause_queue_send, vdev,
+ CDF_TIMER_TYPE_SW);
+ cdf_atomic_init(&vdev->os_q_paused);
+ cdf_atomic_set(&vdev->os_q_paused, 0);
+ vdev->tx_fl_lwm = 0;
+ vdev->tx_fl_hwm = 0;
+ vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
+ cdf_spinlock_init(&vdev->flow_control_lock);
+ vdev->osif_flow_control_cb = NULL;
+ vdev->osif_fc_ctx = NULL;
+
+ /* Default MAX Q depth for every VDEV */
+ vdev->ll_pause.max_q_depth =
+ ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
+ /* add this vdev into the pdev's list */
+ TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ vdev,
+ vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+
+ /*
+ * We've verified that htt_op_mode == wlan_op_mode,
+ * so no translation is needed.
+ */
+ htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
+
+ return vdev;
+}
+
+void ol_txrx_osif_vdev_register(ol_txrx_vdev_handle vdev,
+ void *osif_vdev,
+ struct ol_txrx_osif_ops *txrx_ops)
+{
+ vdev->osif_dev = osif_vdev;
+ txrx_ops->tx.std = vdev->tx = OL_TX_LL;
+ txrx_ops->tx.non_std = ol_tx_non_std_ll;
+}
+
+void ol_txrx_set_curchan(ol_txrx_pdev_handle pdev, uint32_t chan_mhz)
+{
+ return;
+}
+
+void ol_txrx_set_safemode(ol_txrx_vdev_handle vdev, uint32_t val)
+{
+ vdev->safemode = val;
+}
+
+void
+ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
+ void *filters, uint32_t num)
+{
+ cdf_mem_copy(vdev->privacy_filters, filters,
+ num * sizeof(struct privacy_exemption));
+ vdev->num_filters = num;
+}
+
+void ol_txrx_set_drop_unenc(ol_txrx_vdev_handle vdev, uint32_t val)
+{
+ vdev->drop_unenc = val;
+}
+
+void
+ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
+ ol_txrx_vdev_delete_cb callback, void *context)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+ /* preconditions */
+ TXRX_ASSERT2(vdev);
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ cdf_softirq_timer_cancel(&vdev->ll_pause.timer);
+ cdf_softirq_timer_free(&vdev->ll_pause.timer);
+ vdev->ll_pause.is_q_timer_on = false;
+ while (vdev->ll_pause.txq.head) {
+ cdf_nbuf_t next = cdf_nbuf_next(vdev->ll_pause.txq.head);
+ cdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
+ cdf_nbuf_unmap(pdev->osdev, vdev->ll_pause.txq.head,
+ CDF_DMA_TO_DEVICE);
+ cdf_nbuf_tx_free(vdev->ll_pause.txq.head, NBUF_PKT_ERROR);
+ vdev->ll_pause.txq.head = next;
+ }
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+ cdf_spinlock_destroy(&vdev->ll_pause.mutex);
+
+ cdf_spin_lock_bh(&vdev->flow_control_lock);
+ vdev->osif_flow_control_cb = NULL;
+ vdev->osif_fc_ctx = NULL;
+ cdf_spin_unlock_bh(&vdev->flow_control_lock);
+ cdf_spinlock_destroy(&vdev->flow_control_lock);
+
+ /* remove the vdev from its parent pdev's list */
+ TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
+
+ /*
+ * Use peer_ref_mutex while accessing peer_list, in case
+ * a peer is in the process of being removed from the list.
+ */
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ /* check that the vdev has no peers allocated */
+ if (!TAILQ_EMPTY(&vdev->peer_list)) {
+ /* debug print - will be removed later */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)"
+ "until deletion finishes for all its peers\n",
+ __func__, vdev,
+ vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+ /* indicate that the vdev needs to be deleted */
+ vdev->delete.pending = 1;
+ vdev->delete.callback = callback;
+ vdev->delete.context = context;
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return;
+ }
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ __func__, vdev,
+ vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+
+ htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
+
+ /*
+ * Doesn't matter if there are outstanding tx frames -
+ * they will be freed once the target sends a tx completion
+ * message for them.
+ */
+ cdf_mem_free(vdev);
+ if (callback)
+ callback(context);
+}
+
+/**
+ * ol_txrx_flush_rx_frames() - flush cached rx frames
+ * @peer: peer
+ * @drop: set flag to drop frames
+ *
+ * Return: None
+ */
+void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
+ bool drop)
+{
+ struct ol_rx_cached_buf *cache_buf;
+ CDF_STATUS ret;
+ ol_rx_callback_fp data_rx = NULL;
+ void *cds_ctx = cds_get_global_context();
+
+ if (cdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
+ cdf_atomic_dec(&peer->flush_in_progress);
+ return;
+ }
+
+ cdf_assert(cds_ctx);
+ cdf_spin_lock_bh(&peer->peer_info_lock);
+ if (peer->state >= ol_txrx_peer_state_conn)
+ data_rx = peer->osif_rx;
+ else
+ drop = true;
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+
+ cdf_spin_lock_bh(&peer->bufq_lock);
+ cache_buf = list_entry((&peer->cached_bufq)->next,
+ typeof(*cache_buf), list);
+ while (!list_empty(&peer->cached_bufq)) {
+ list_del(&cache_buf->list);
+ cdf_spin_unlock_bh(&peer->bufq_lock);
+ if (drop) {
+ cdf_nbuf_free(cache_buf->buf);
+ } else {
+ /* Flush the cached frames to HDD */
+ ret = data_rx(cds_ctx, cache_buf->buf, peer->local_id);
+ if (ret != CDF_STATUS_SUCCESS)
+ cdf_nbuf_free(cache_buf->buf);
+ }
+ cdf_mem_free(cache_buf);
+ cdf_spin_lock_bh(&peer->bufq_lock);
+ cache_buf = list_entry((&peer->cached_bufq)->next,
+ typeof(*cache_buf), list);
+ }
+ cdf_spin_unlock_bh(&peer->bufq_lock);
+ cdf_atomic_dec(&peer->flush_in_progress);
+}
+
+ol_txrx_peer_handle
+ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
+ ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
+{
+ struct ol_txrx_peer_t *peer;
+ struct ol_txrx_peer_t *temp_peer;
+ uint8_t i;
+ int differs;
+ bool wait_on_deletion = false;
+ unsigned long rc;
+
+ /* preconditions */
+ TXRX_ASSERT2(pdev);
+ TXRX_ASSERT2(vdev);
+ TXRX_ASSERT2(peer_mac_addr);
+
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ /* check for duplicate exsisting peer */
+ TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
+ if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
+ (union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
+ vdev->vdev_id,
+ peer_mac_addr[0], peer_mac_addr[1],
+ peer_mac_addr[2], peer_mac_addr[3],
+ peer_mac_addr[4], peer_mac_addr[5]);
+ if (cdf_atomic_read(&temp_peer->delete_in_progress)) {
+ vdev->wait_on_peer_id = temp_peer->local_id;
+ cdf_event_init(&vdev->wait_delete_comp);
+ wait_on_deletion = true;
+ } else {
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return NULL;
+ }
+ }
+ }
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+
+ if (wait_on_deletion) {
+ /* wait for peer deletion */
+ rc = cdf_wait_single_event(&vdev->wait_delete_comp,
+ cdf_system_msecs_to_ticks(PEER_DELETION_TIMEOUT));
+ if (!rc) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "timedout waiting for peer(%d) deletion\n",
+ vdev->wait_on_peer_id);
+ vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
+ return NULL;
+ }
+ }
+
+ peer = cdf_mem_malloc(sizeof(*peer));
+ if (!peer)
+ return NULL; /* failure */
+ cdf_mem_zero(peer, sizeof(*peer));
+
+ /* store provided params */
+ peer->vdev = vdev;
+ cdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
+ OL_TXRX_MAC_ADDR_LEN);
+
+ INIT_LIST_HEAD(&peer->cached_bufq);
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ /* add this peer into the vdev's list */
+ TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ /* check whether this is a real peer (peer mac addr != vdev mac addr) */
+ if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
+ vdev->last_real_peer = peer;
+
+ peer->rx_opt_proc = pdev->rx_opt_proc;
+
+ ol_rx_peer_init(pdev, peer);
+
+ /* initialize the peer_id */
+ for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
+ peer->peer_ids[i] = HTT_INVALID_PEER;
+
+
+ peer->osif_rx = NULL;
+ cdf_spinlock_init(&peer->peer_info_lock);
+ cdf_spinlock_init(&peer->bufq_lock);
+
+ cdf_atomic_init(&peer->delete_in_progress);
+ cdf_atomic_init(&peer->flush_in_progress);
+
+ cdf_atomic_init(&peer->ref_cnt);
+
+ /* keep one reference for attach */
+ cdf_atomic_inc(&peer->ref_cnt);
+
+ /* keep one reference for ol_rx_peer_map_handler */
+ cdf_atomic_inc(&peer->ref_cnt);
+
+ peer->valid = 1;
+
+ ol_txrx_peer_find_hash_add(pdev, peer);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
+ "vdev %p created peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ vdev, peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+ /*
+ * For every peer MAp message search and set if bss_peer
+ */
+ differs =
+ cdf_mem_compare(peer->mac_addr.raw, vdev->mac_addr.raw,
+ OL_TXRX_MAC_ADDR_LEN);
+ if (!differs)
+ peer->bss_peer = 1;
+
+ /*
+ * The peer starts in the "disc" state while association is in progress.
+ * Once association completes, the peer will get updated to "auth" state
+ * by a call to ol_txrx_peer_state_update if the peer is in open mode,
+ * or else to the "conn" state. For non-open mode, the peer will
+ * progress to "auth" state once the authentication completes.
+ */
+ peer->state = ol_txrx_peer_state_invalid;
+ ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
+ ol_txrx_peer_state_disc);
+
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+ peer->rssi_dbm = HTT_RSSI_INVALID;
+#endif
+
+ ol_txrx_local_peer_id_alloc(pdev, peer);
+
+ return peer;
+}
+
+/*
+ * Discarding tx filter - removes all data frames (disconnected state)
+ */
+static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ return A_ERROR;
+}
+
+/*
+ * Non-autentication tx filter - filters out data frames that are not
+ * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
+ * data frames (connected state)
+ */
+static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ return
+ (tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
+ tx_msdu_info->htt.info.ethertype ==
+ ETHERTYPE_WAI) ? A_OK : A_ERROR;
+}
+
+/*
+ * Pass-through tx filter - lets all data frames through (authenticated state)
+ */
+static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ return A_OK;
+}
+
+CDF_STATUS
+ol_txrx_peer_state_update(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac,
+ enum ol_txrx_peer_state state)
+{
+ struct ol_txrx_peer_t *peer;
+
+ if (cdf_unlikely(!pdev)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
+ cdf_assert(0);
+ return CDF_STATUS_E_INVAL;
+ }
+
+ peer = ol_txrx_peer_find_hash_find(pdev, peer_mac, 0, 1);
+ if (NULL == peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null for peer_mac"
+ " 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", __FUNCTION__,
+ peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
+ peer_mac[4], peer_mac[5]);
+ return CDF_STATUS_E_INVAL;
+ }
+
+ /* TODO: Should we send WMI command of the connection state? */
+ /* avoid multiple auth state change. */
+ if (peer->state == state) {
+#ifdef TXRX_PRINT_VERBOSE_ENABLE
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO3,
+ "%s: no state change, returns directly\n",
+ __func__);
+#endif
+ cdf_atomic_dec(&peer->ref_cnt);
+ return CDF_STATUS_SUCCESS;
+ }
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: change from %d to %d\n",
+ __func__, peer->state, state);
+
+ peer->tx_filter = (state == ol_txrx_peer_state_auth)
+ ? ol_tx_filter_pass_thru
+ : ((state == ol_txrx_peer_state_conn)
+ ? ol_tx_filter_non_auth
+ : ol_tx_filter_discard);
+
+ if (peer->vdev->pdev->cfg.host_addba) {
+ if (state == ol_txrx_peer_state_auth) {
+ int tid;
+ /*
+ * Pause all regular (non-extended) TID tx queues until
+ * data arrives and ADDBA negotiation has completed.
+ */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
+ "%s: pause peer and unpause mgmt/non-qos\n",
+ __func__);
+ ol_txrx_peer_pause(peer); /* pause all tx queues */
+ /* unpause mgmt and non-QoS tx queues */
+ for (tid = OL_TX_NUM_QOS_TIDS;
+ tid < OL_TX_NUM_TIDS; tid++)
+ ol_txrx_peer_tid_unpause(peer, tid);
+ }
+ }
+ cdf_atomic_dec(&peer->ref_cnt);
+
+ /* Set the state after the Pause to avoid the race condiction
+ with ADDBA check in tx path */
+ peer->state = state;
+ return CDF_STATUS_SUCCESS;
+}
+
+void
+ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
+{
+ peer->keyinstalled = val;
+}
+
+void
+ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
+ uint8_t *peer_mac,
+ union ol_txrx_peer_update_param_t *param,
+ enum ol_txrx_peer_update_select_t select)
+{
+ struct ol_txrx_peer_t *peer;
+
+ peer = ol_txrx_peer_find_hash_find(vdev->pdev, peer_mac, 0, 1);
+ if (!peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "%s: peer is null",
+ __func__);
+ return;
+ }
+
+ switch (select) {
+ case ol_txrx_peer_update_qos_capable:
+ {
+ /* save qos_capable here txrx peer,
+ * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
+ */
+ peer->qos_capable = param->qos_capable;
+ /*
+ * The following function call assumes that the peer has a
+ * single ID. This is currently true, and
+ * is expected to remain true.
+ */
+ htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
+ peer->peer_ids[0],
+ peer->qos_capable);
+ break;
+ }
+ case ol_txrx_peer_update_uapsdMask:
+ {
+ peer->uapsd_mask = param->uapsd_mask;
+ htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
+ peer->peer_ids[0],
+ peer->uapsd_mask);
+ break;
+ }
+ case ol_txrx_peer_update_peer_security:
+ {
+ enum ol_sec_type sec_type = param->sec_type;
+ enum htt_sec_type peer_sec_type = htt_sec_type_none;
+
+ switch (sec_type) {
+ case ol_sec_type_none:
+ peer_sec_type = htt_sec_type_none;
+ break;
+ case ol_sec_type_wep128:
+ peer_sec_type = htt_sec_type_wep128;
+ break;
+ case ol_sec_type_wep104:
+ peer_sec_type = htt_sec_type_wep104;
+ break;
+ case ol_sec_type_wep40:
+ peer_sec_type = htt_sec_type_wep40;
+ break;
+ case ol_sec_type_tkip:
+ peer_sec_type = htt_sec_type_tkip;
+ break;
+ case ol_sec_type_tkip_nomic:
+ peer_sec_type = htt_sec_type_tkip_nomic;
+ break;
+ case ol_sec_type_aes_ccmp:
+ peer_sec_type = htt_sec_type_aes_ccmp;
+ break;
+ case ol_sec_type_wapi:
+ peer_sec_type = htt_sec_type_wapi;
+ break;
+ default:
+ peer_sec_type = htt_sec_type_none;
+ break;
+ }
+
+ peer->security[txrx_sec_ucast].sec_type =
+ peer->security[txrx_sec_mcast].sec_type =
+ peer_sec_type;
+
+ break;
+ }
+ default:
+ {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "ERROR: unknown param %d in %s", select,
+ __func__);
+ break;
+ }
+ }
+ cdf_atomic_dec(&peer->ref_cnt);
+}
+
+uint8_t
+ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
+{
+
+ struct ol_txrx_peer_t *peer;
+ peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
+ if (peer)
+ return peer->uapsd_mask;
+ return 0;
+}
+
+uint8_t
+ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
+{
+
+ struct ol_txrx_peer_t *peer_t =
+ ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
+ if (peer_t != NULL)
+ return peer_t->qos_capable;
+ return 0;
+}
+
+void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
+{
+ struct ol_txrx_vdev_t *vdev;
+ struct ol_txrx_pdev_t *pdev;
+ int i;
+
+ /* preconditions */
+ TXRX_ASSERT2(peer);
+
+ vdev = peer->vdev;
+ if (NULL == vdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "The vdev is not present anymore\n");
+ return;
+ }
+
+ pdev = vdev->pdev;
+ if (NULL == pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "The pdev is not present anymore\n");
+ return;
+ }
+
+ /*
+ * Check for the reference count before deleting the peer
+ * as we noticed that sometimes we are re-entering this
+ * function again which is leading to dead-lock.
+ * (A double-free should never happen, so assert if it does.)
+ */
+
+ if (0 == cdf_atomic_read(&(peer->ref_cnt))) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "The Peer is not present anymore\n");
+ cdf_assert(0);
+ return;
+ }
+
+ /*
+ * Hold the lock all the way from checking if the peer ref count
+ * is zero until the peer references are removed from the hash
+ * table and vdev list (if the peer ref count is zero).
+ * This protects against a new HL tx operation starting to use the
+ * peer object just after this function concludes it's done being used.
+ * Furthermore, the lock needs to be held while checking whether the
+ * vdev's list of peers is empty, to make sure that list is not modified
+ * concurrently with the empty check.
+ */
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ if (cdf_atomic_dec_and_test(&peer->ref_cnt)) {
+ u_int16_t peer_id;
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+
+ peer_id = peer->local_id;
+ /* remove the reference to the peer from the hash table */
+ ol_txrx_peer_find_hash_remove(pdev, peer);
+
+ /* remove the peer from its parent vdev's list */
+ TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
+
+ /* cleanup the Rx reorder queues for this peer */
+ ol_rx_peer_cleanup(vdev, peer);
+
+ /* peer is removed from peer_list */
+ cdf_atomic_set(&peer->delete_in_progress, 0);
+
+ /*
+ * Set wait_delete_comp event if the current peer id matches
+ * with registered peer id.
+ */
+ if (peer_id == vdev->wait_on_peer_id) {
+ cdf_event_set(&vdev->wait_delete_comp);
+ vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
+ }
+
+ /* check whether the parent vdev has no peers left */
+ if (TAILQ_EMPTY(&vdev->peer_list)) {
+ /*
+ * Check if the parent vdev was waiting for its peers
+ * to be deleted, in order for it to be deleted too.
+ */
+ if (vdev->delete.pending) {
+ ol_txrx_vdev_delete_cb vdev_delete_cb =
+ vdev->delete.callback;
+ void *vdev_delete_context =
+ vdev->delete.context;
+
+ /*
+ * Now that there are no references to the peer,
+ * we can release the peer reference lock.
+ */
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: deleting vdev object %p "
+ "(%02x:%02x:%02x:%02x:%02x:%02x)"
+ " - its last peer is done\n",
+ __func__, vdev,
+ vdev->mac_addr.raw[0],
+ vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2],
+ vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4],
+ vdev->mac_addr.raw[5]);
+ /* all peers are gone, go ahead and delete it */
+ cdf_mem_free(vdev);
+ if (vdev_delete_cb)
+ vdev_delete_cb(vdev_delete_context);
+ } else {
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ }
+ } else {
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ }
+
+ /*
+ * 'array' is allocated in addba handler and is supposed to be
+ * freed in delba handler. There is the case (for example, in
+ * SSR) where delba handler is not called. Because array points
+ * to address of 'base' by default and is reallocated in addba
+ * handler later, only free the memory when the array does not
+ * point to base.
+ */
+ for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
+ if (peer->tids_rx_reorder[i].array !=
+ &peer->tids_rx_reorder[i].base) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s, delete reorder arr, tid:%d\n",
+ __func__, i);
+ cdf_mem_free(peer->tids_rx_reorder[i].array);
+ ol_rx_reorder_init(&peer->tids_rx_reorder[i],
+ (uint8_t) i);
+ }
+ }
+
+ cdf_mem_free(peer);
+ } else {
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ }
+}
+
+void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
+{
+ struct ol_txrx_vdev_t *vdev = peer->vdev;
+
+ /* redirect peer's rx delivery function to point to a discard func */
+ peer->rx_opt_proc = ol_rx_discard;
+
+ peer->valid = 0;
+
+ ol_txrx_local_peer_id_free(peer->vdev->pdev, peer);
+
+ /* debug print to dump rx reorder state */
+ /* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ __func__, peer,
+ peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+ ol_txrx_flush_rx_frames(peer, 1);
+
+ if (peer->vdev->last_real_peer == peer)
+ peer->vdev->last_real_peer = NULL;
+
+ cdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
+ if (vdev->last_real_peer == peer)
+ vdev->last_real_peer = NULL;
+ cdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
+ htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
+
+ cdf_spinlock_destroy(&peer->peer_info_lock);
+ cdf_spinlock_destroy(&peer->bufq_lock);
+ /* set delete_in_progress to identify that wma
+ * is waiting for unmap massage for this peer */
+ cdf_atomic_set(&peer->delete_in_progress, 1);
+ /*
+ * Remove the reference added during peer_attach.
+ * The peer will still be left allocated until the
+ * PEER_UNMAP message arrives to remove the other
+ * reference, added by the PEER_MAP message.
+ */
+ ol_txrx_peer_unref_delete(peer);
+}
+
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
+{
+ struct ol_txrx_peer_t *peer;
+ peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
+ if (peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Delete extra reference %p\n", __func__, peer);
+ /* release the extra reference */
+ ol_txrx_peer_unref_delete(peer);
+ }
+ return peer;
+}
+
+/**
+ * ol_txrx_dump_tx_desc() - dump tx desc total and free count
+ * @txrx_pdev: Pointer to txrx pdev
+ *
+ * Return: none
+ */
+static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
+{
+ struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
+ uint32_t total;
+
+ total = ol_tx_get_desc_global_pool_size(pdev);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "total tx credit %d num_free %d",
+ total, pdev->tx_desc.num_free);
+
+ return;
+}
+
+/**
+ * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
+ * @timeout: timeout in ms
+ *
+ * Wait for tx queue to be empty, return timeout error if
+ * queue doesn't empty before timeout occurs.
+ *
+ * Return:
+ * CDF_STATUS_SUCCESS if the queue empties,
+ * CDF_STATUS_E_TIMEOUT in case of timeout,
+ * CDF_STATUS_E_FAULT in case of missing handle
+ */
+CDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
+{
+ ol_txrx_pdev_handle txrx_pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ if (txrx_pdev == NULL) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: txrx context is null", __func__);
+ return CDF_STATUS_E_FAULT;
+ }
+
+ while (ol_txrx_get_tx_pending(txrx_pdev)) {
+ cdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
+ if (timeout <= 0) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: tx frames are pending", __func__);
+ ol_txrx_dump_tx_desc(txrx_pdev);
+ return CDF_STATUS_E_TIMEOUT;
+ }
+ timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
+ }
+ return CDF_STATUS_SUCCESS;
+}
+
+#ifndef QCA_WIFI_3_0_EMU
+#define SUSPEND_DRAIN_WAIT 500
+#else
+#define SUSPEND_DRAIN_WAIT 3000
+#endif
+
+/**
+ * ol_txrx_bus_suspend() - bus suspend
+ *
+ * Ensure that ol_txrx is ready for bus suspend
+ *
+ * Return: CDF_STATUS
+ */
+CDF_STATUS ol_txrx_bus_suspend(void)
+{
+ return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
+}
+
+/**
+ * ol_txrx_bus_resume() - bus resume
+ *
+ * Dummy function for symetry
+ *
+ * Return: CDF_STATUS_SUCCESS
+ */
+CDF_STATUS ol_txrx_bus_resume(void)
+{
+ return CDF_STATUS_SUCCESS;
+}
+
+int ol_txrx_get_tx_pending(ol_txrx_pdev_handle pdev_handle)
+{
+ struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
+ uint32_t total;
+
+ total = ol_tx_get_desc_global_pool_size(pdev);
+
+ return total - ol_tx_get_total_free_desc(pdev);
+}
+
+void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
+{
+ ol_tx_desc_list tx_descs;
+ /* First let hif do the cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
+ * then let htt do the cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
+ * which is tha same with normal data send complete path*/
+ htt_tx_pending_discard(pdev_handle->htt_pdev);
+
+ TAILQ_INIT(&tx_descs);
+ ol_tx_queue_discard(pdev_handle, true, &tx_descs);
+ /* Discard Frames in Discard List */
+ ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
+
+ ol_tx_discard_target_frms(pdev_handle);
+}
+
+/*--- debug features --------------------------------------------------------*/
+
+unsigned g_txrx_print_level = TXRX_PRINT_LEVEL_ERR; /* default */
+
+void ol_txrx_print_level_set(unsigned level)
+{
+#ifndef TXRX_PRINT_ENABLE
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
+ "The driver is compiled without TXRX prints enabled.\n"
+ "To enable them, recompile with TXRX_PRINT_ENABLE defined");
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "TXRX printout level changed from %d to %d",
+ g_txrx_print_level, level);
+ g_txrx_print_level = level;
+#endif
+}
+
+struct ol_txrx_stats_req_internal {
+ struct ol_txrx_stats_req base;
+ int serviced; /* state of this request */
+ int offset;
+};
+
+static inline
+uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
+{
+ return (uint64_t) ((size_t) req);
+}
+
+static inline
+struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
+{
+ return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
+}
+
+#ifdef ATH_PERF_PWR_OFFLOAD
+void
+ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
+ uint8_t cfg_stats_type, uint32_t cfg_val)
+{
+ uint64_t dummy_cookie = 0;
+ htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
+ 0 /* reset mask */,
+ cfg_stats_type, cfg_val, dummy_cookie);
+}
+
+A_STATUS
+ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
+ bool response_expected)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ uint64_t cookie;
+ struct ol_txrx_stats_req_internal *non_volatile_req;
+
+ if (!pdev ||
+ req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
+ req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
+ return A_ERROR;
+ }
+
+ /*
+ * Allocate a non-transient stats request object.
+ * (The one provided as an argument is likely allocated on the stack.)
+ */
+ non_volatile_req = cdf_mem_malloc(sizeof(*non_volatile_req));
+ if (!non_volatile_req)
+ return A_NO_MEMORY;
+
+ /* copy the caller's specifications */
+ non_volatile_req->base = *req;
+ non_volatile_req->serviced = 0;
+ non_volatile_req->offset = 0;
+
+ /* use the non-volatile request object's address as the cookie */
+ cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
+
+ if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
+ req->stats_type_upload_mask,
+ req->stats_type_reset_mask,
+ HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
+ cookie)) {
+ cdf_mem_free(non_volatile_req);
+ return A_ERROR;
+ }
+
+ if (req->wait.blocking)
+ while (cdf_semaphore_acquire(pdev->osdev, req->wait.sem_ptr))
+ ;
+
+ if (response_expected == false)
+ cdf_mem_free(non_volatile_req);
+
+ return A_OK;
+}
+#endif
+void
+ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
+ uint64_t cookie, uint8_t *stats_info_list)
+{
+ enum htt_dbg_stats_type type;
+ enum htt_dbg_stats_status status;
+ int length;
+ uint8_t *stats_data;
+ struct ol_txrx_stats_req_internal *req;
+ int more = 0;
+
+ req = ol_txrx_u64_to_stats_ptr(cookie);
+
+ do {
+ htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
+ &length, &stats_data);
+ if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
+ break;
+ if (status == HTT_DBG_STATS_STATUS_PRESENT ||
+ status == HTT_DBG_STATS_STATUS_PARTIAL) {
+ uint8_t *buf;
+ int bytes = 0;
+
+ if (status == HTT_DBG_STATS_STATUS_PARTIAL)
+ more = 1;
+ if (req->base.print.verbose || req->base.print.concise)
+ /* provide the header along with the data */
+ htt_t2h_stats_print(stats_info_list,
+ req->base.print.concise);
+
+ switch (type) {
+ case HTT_DBG_STATS_WAL_PDEV_TXRX:
+ bytes = sizeof(struct wlan_dbg_stats);
+ if (req->base.copy.buf) {
+ int lmt;
+
+ lmt = sizeof(struct wlan_dbg_stats);
+ if (req->base.copy.byte_limit < lmt)
+ lmt = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, lmt);
+ }
+ break;
+ case HTT_DBG_STATS_RX_REORDER:
+ bytes = sizeof(struct rx_reorder_stats);
+ if (req->base.copy.buf) {
+ int lmt;
+
+ lmt = sizeof(struct rx_reorder_stats);
+ if (req->base.copy.byte_limit < lmt)
+ lmt = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, lmt);
+ }
+ break;
+ case HTT_DBG_STATS_RX_RATE_INFO:
+ bytes = sizeof(wlan_dbg_rx_rate_info_t);
+ if (req->base.copy.buf) {
+ int lmt;
+
+ lmt = sizeof(wlan_dbg_rx_rate_info_t);
+ if (req->base.copy.byte_limit < lmt)
+ lmt = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, lmt);
+ }
+ break;
+
+ case HTT_DBG_STATS_TX_RATE_INFO:
+ bytes = sizeof(wlan_dbg_tx_rate_info_t);
+ if (req->base.copy.buf) {
+ int lmt;
+
+ lmt = sizeof(wlan_dbg_tx_rate_info_t);
+ if (req->base.copy.byte_limit < lmt)
+ lmt = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, lmt);
+ }
+ break;
+
+ case HTT_DBG_STATS_TX_PPDU_LOG:
+ bytes = 0;
+ /* TO DO: specify how many bytes are present */
+ /* TO DO: add copying to the requestor's buf */
+
+ case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
+ bytes = sizeof(struct rx_remote_buffer_mgmt_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit = sizeof(struct rx_remote_buffer_mgmt_stats);
+ if (req->base.copy.byte_limit < limit) {
+ limit = req->base.copy.byte_limit;
+ }
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ case HTT_DBG_STATS_TXBF_INFO:
+ bytes = sizeof(struct wlan_dbg_txbf_data_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit = sizeof(struct wlan_dbg_txbf_data_stats);
+ if (req->base.copy.byte_limit < limit)
+ limit = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ case HTT_DBG_STATS_SND_INFO:
+ bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit = sizeof(struct wlan_dbg_txbf_snd_stats);
+ if (req->base.copy.byte_limit < limit)
+ limit = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ case HTT_DBG_STATS_TX_SELFGEN_INFO:
+ bytes = sizeof(struct wlan_dbg_tx_selfgen_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit = sizeof(struct wlan_dbg_tx_selfgen_stats);
+ if (req->base.copy.byte_limit < limit)
+ limit = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ case HTT_DBG_STATS_ERROR_INFO:
+ bytes =
+ sizeof(struct wlan_dbg_wifi2_error_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit =
+ sizeof(struct wlan_dbg_wifi2_error_stats);
+ if (req->base.copy.byte_limit < limit)
+ limit = req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
+ bytes =
+ sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
+ if (req->base.copy.buf) {
+ int limit;
+
+ limit = sizeof(struct
+ rx_txbf_musu_ndpa_pkts_stats);
+ if (req->base.copy.byte_limit < limit)
+ limit =
+ req->base.copy.byte_limit;
+ buf = req->base.copy.buf + req->offset;
+ cdf_mem_copy(buf, stats_data, limit);
+ }
+ break;
+
+ default:
+ break;
+ }
+ buf = req->base.copy.buf
+ ? req->base.copy.buf
+ : stats_data;
+ if (req->base.callback.fp)
+ req->base.callback.fp(req->base.callback.ctxt,
+ type, buf, bytes);
+ }
+ stats_info_list += length;
+ } while (1);
+
+ if (!more) {
+ if (req->base.wait.blocking)
+ cdf_semaphore_release(pdev->osdev,
+ req->base.wait.sem_ptr);
+ cdf_mem_free(req);
+ }
+}
+
+#ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
+int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
+{
+ if (debug_specs & TXRX_DBG_MASK_OBJS) {
+#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
+ ol_txrx_pdev_display(vdev->pdev, 0);
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
+ "The pdev,vdev,peer display functions are disabled.\n"
+ "To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
+#endif
+ }
+ if (debug_specs & TXRX_DBG_MASK_STATS) {
+ ol_txrx_stats_display(vdev->pdev);
+ }
+ if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
+#if defined(ENABLE_TXRX_PROT_ANALYZE)
+ ol_txrx_prot_ans_display(vdev->pdev);
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
+ "txrx protocol analysis is disabled.\n"
+ "To enable it, recompile with "
+ "ENABLE_TXRX_PROT_ANALYZE defined");
+#endif
+ }
+ if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
+#if defined(ENABLE_RX_REORDER_TRACE)
+ ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
+ "rx reorder seq num trace is disabled.\n"
+ "To enable it, recompile with "
+ "ENABLE_RX_REORDER_TRACE defined");
+#endif
+
+ }
+ return 0;
+}
+#endif
+
+int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
+ int max_subfrms_ampdu, int max_subfrms_amsdu)
+{
+ return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
+ max_subfrms_ampdu, max_subfrms_amsdu);
+}
+
+#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
+void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
+{
+ struct ol_txrx_vdev_t *vdev;
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*s%s:\n", indent, " ", "txrx pdev");
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*spdev object: %p", indent + 4, " ", pdev);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*svdev list:", indent + 4, " ");
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ ol_txrx_vdev_display(vdev, indent + 8);
+ }
+ ol_txrx_peer_find_display(pdev, indent + 4);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*stx desc pool: %d elems @ %p", indent + 4, " ",
+ pdev->tx_desc.pool_size, pdev->tx_desc.array);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW, " ");
+ htt_display(pdev->htt_pdev, indent);
+}
+
+void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
+{
+ struct ol_txrx_peer_t *peer;
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*stxrx vdev: %p\n", indent, " ", vdev);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*sMAC addr: %d:%d:%d:%d:%d:%d",
+ indent + 4, " ",
+ vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+ vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+ vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*speer list:", indent + 4, " ");
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ ol_txrx_peer_display(peer, indent + 8);
+ }
+}
+
+void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
+{
+ int i;
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*stxrx peer: %p", indent, " ", peer);
+ for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+ if (peer->peer_ids[i] != HTT_INVALID_PEER) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*sID: %d", indent + 4, " ",
+ peer->peer_ids[i]);
+ }
+ }
+}
+#endif /* TXRX_DEBUG_LEVEL */
+
+#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
+void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
+{
+ int msdu_idx;
+ int seg_idx;
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "TSO pkts %lld, bytes %lld\n",
+ pdev->stats.pub.tx.tso.tso_pkts.pkts,
+ pdev->stats.pub.tx.tso.tso_pkts.bytes);
+
+ for (msdu_idx = 0; msdu_idx < NUM_MAX_TSO_MSDUS; msdu_idx++) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "curr msdu idx: %d curr seg idx: %d num segs %d\n",
+ TXRX_STATS_TSO_MSDU_IDX(pdev),
+ TXRX_STATS_TSO_SEG_IDX(pdev),
+ TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx));
+ for (seg_idx = 0;
+ ((seg_idx < TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, msdu_idx)) &&
+ (seg_idx < NUM_MAX_TSO_SEGS));
+ seg_idx++) {
+ struct cdf_tso_seg_t tso_seg =
+ TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx);
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "msdu idx: %d seg idx: %d\n",
+ msdu_idx, seg_idx);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "tso_enable: %d\n",
+ tso_seg.tso_flags.tso_enable);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "fin %d syn %d rst %d psh %d ack %d\n"
+ "urg %d ece %d cwr %d ns %d\n",
+ tso_seg.tso_flags.fin, tso_seg.tso_flags.syn,
+ tso_seg.tso_flags.rst, tso_seg.tso_flags.psh,
+ tso_seg.tso_flags.ack, tso_seg.tso_flags.urg,
+ tso_seg.tso_flags.ece, tso_seg.tso_flags.cwr,
+ tso_seg.tso_flags.ns);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "tcp_seq_num: 0x%x ip_id: %d\n",
+ tso_seg.tso_flags.tcp_seq_num,
+ tso_seg.tso_flags.ip_id);
+ }
+ }
+}
+#endif
+
+/**
+ * ol_txrx_stats() - update ol layer stats
+ * @vdev_id: vdev_id
+ * @buffer: pointer to buffer
+ * @buf_len: length of the buffer
+ *
+ * Return: length of string
+ */
+int
+ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned buf_len)
+{
+ uint32_t len = 0;
+
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (!vdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: vdev is NULL", __func__);
+ snprintf(buffer, buf_len, "vdev not found");
+ return len;
+ }
+
+ len = scnprintf(buffer, buf_len,
+ "\nTXRX stats:\n"
+ "\nllQueue State : %s"
+ "\n pause %u unpause %u"
+ "\n overflow %u"
+ "\nllQueue timer state : %s\n",
+ ((vdev->ll_pause.is_q_paused == false) ? "UNPAUSED" : "PAUSED"),
+ vdev->ll_pause.q_pause_cnt,
+ vdev->ll_pause.q_unpause_cnt,
+ vdev->ll_pause.q_overflow_cnt,
+ ((vdev->ll_pause.is_q_timer_on == false)
+ ? "NOT-RUNNING" : "RUNNING"));
+ return len;
+}
+
+void ol_txrx_stats_display(ol_txrx_pdev_handle pdev)
+{
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR, "txrx stats:");
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ " tx: sent %lld msdus (%lld B), "
+ " rejected %lld (%lld B), dropped %lld (%lld B)",
+ pdev->stats.pub.tx.delivered.pkts,
+ pdev->stats.pub.tx.delivered.bytes,
+ pdev->stats.pub.tx.dropped.host_reject.pkts,
+ pdev->stats.pub.tx.dropped.host_reject.bytes,
+ pdev->stats.pub.tx.dropped.download_fail.pkts
+ + pdev->stats.pub.tx.dropped.target_discard.pkts
+ + pdev->stats.pub.tx.dropped.no_ack.pkts,
+ pdev->stats.pub.tx.dropped.download_fail.bytes
+ + pdev->stats.pub.tx.dropped.target_discard.bytes
+ + pdev->stats.pub.tx.dropped.no_ack.bytes);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ " download fail: %lld (%lld B), "
+ "target discard: %lld (%lld B), "
+ "no ack: %lld (%lld B)",
+ pdev->stats.pub.tx.dropped.download_fail.pkts,
+ pdev->stats.pub.tx.dropped.download_fail.bytes,
+ pdev->stats.pub.tx.dropped.target_discard.pkts,
+ pdev->stats.pub.tx.dropped.target_discard.bytes,
+ pdev->stats.pub.tx.dropped.no_ack.pkts,
+ pdev->stats.pub.tx.dropped.no_ack.bytes);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Tx completion per interrupt:\n"
+ "Single Packet %d\n"
+ " 2-10 Packets %d\n"
+ "11-20 Packets %d\n"
+ "21-30 Packets %d\n"
+ "31-40 Packets %d\n"
+ "41-50 Packets %d\n"
+ "51-60 Packets %d\n"
+ " 60+ Packets %d\n",
+ pdev->stats.pub.tx.comp_histogram.pkts_1,
+ pdev->stats.pub.tx.comp_histogram.pkts_2_10,
+ pdev->stats.pub.tx.comp_histogram.pkts_11_20,
+ pdev->stats.pub.tx.comp_histogram.pkts_21_30,
+ pdev->stats.pub.tx.comp_histogram.pkts_31_40,
+ pdev->stats.pub.tx.comp_histogram.pkts_41_50,
+ pdev->stats.pub.tx.comp_histogram.pkts_51_60,
+ pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ " rx: %lld ppdus, %lld mpdus, %lld msdus, %lld bytes, %lld errs",
+ pdev->stats.priv.rx.normal.ppdus,
+ pdev->stats.priv.rx.normal.mpdus,
+ pdev->stats.pub.rx.delivered.pkts,
+ pdev->stats.pub.rx.delivered.bytes,
+ pdev->stats.priv.rx.err.mpdu_bad);
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ " fwd to stack %d, fwd to fw %d, fwd to stack & fw %d\n",
+ pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
+ pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
+ pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
+}
+
+void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
+{
+ cdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
+}
+
+#if defined(ENABLE_TXRX_PROT_ANALYZE)
+
+void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
+{
+ ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
+ ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
+}
+
+#endif /* ENABLE_TXRX_PROT_ANALYZE */
+
+#ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
+int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
+{
+ return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
+ OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
+}
+#endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
+
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+A_STATUS
+ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
+ ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
+{
+ cdf_assert(pdev && peer && stats);
+ cdf_spin_lock_bh(&pdev->peer_stat_mutex);
+ cdf_mem_copy(stats, &peer->stats, sizeof(*stats));
+ cdf_spin_unlock_bh(&pdev->peer_stat_mutex);
+ return A_OK;
+}
+#endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
+
+void ol_vdev_rx_set_intrabss_fwd(ol_txrx_vdev_handle vdev, bool val)
+{
+ if (NULL == vdev)
+ return;
+
+ vdev->disable_intrabss_fwd = val;
+}
+
+#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
+
+/**
+ * ol_txrx_get_vdev_from_sta_id() - get vdev from sta_id
+ * @sta_id: sta_id
+ *
+ * Return: vdev handle
+ * NULL if not found.
+ */
+static ol_txrx_vdev_handle ol_txrx_get_vdev_from_sta_id(uint8_t sta_id)
+{
+ struct ol_txrx_peer_t *peer = NULL;
+ ol_txrx_pdev_handle pdev = NULL;
+
+ if (sta_id >= WLAN_MAX_STA_COUNT) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid sta id passed");
+ return NULL;
+ }
+
+ pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "PDEV not found for sta_id [%d]", sta_id);
+ return NULL;
+ }
+
+ peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+
+ if (!peer) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "PEER [%d] not found", sta_id);
+ return NULL;
+ }
+
+ return peer->vdev;
+}
+
+/**
+ * ol_txrx_register_tx_flow_control() - register tx flow control callback
+ * @vdev_id: vdev_id
+ * @flowControl: flow control callback
+ * @osif_fc_ctx: callback context
+ *
+ * Return: 0 for sucess or error code
+ */
+int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
+ ol_txrx_tx_flow_control_fp flowControl,
+ void *osif_fc_ctx)
+{
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (NULL == vdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid vdev_id %d", __func__, vdev_id);
+ return -EINVAL;
+ }
+
+ cdf_spin_lock_bh(&vdev->flow_control_lock);
+ vdev->osif_flow_control_cb = flowControl;
+ vdev->osif_fc_ctx = osif_fc_ctx;
+ cdf_spin_unlock_bh(&vdev->flow_control_lock);
+ return 0;
+}
+
+/**
+ * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control callback
+ * @vdev_id: vdev_id
+ *
+ * Return: 0 for success or error code
+ */
+int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
+{
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (NULL == vdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid vdev_id", __func__);
+ return -EINVAL;
+ }
+
+ cdf_spin_lock_bh(&vdev->flow_control_lock);
+ vdev->osif_flow_control_cb = NULL;
+ vdev->osif_fc_ctx = NULL;
+ cdf_spin_unlock_bh(&vdev->flow_control_lock);
+ return 0;
+}
+
+/**
+ * ol_txrx_get_tx_resource() - if tx resource less than low_watermark
+ * @sta_id: sta id
+ * @low_watermark: low watermark
+ * @high_watermark_offset: high watermark offset value
+ *
+ * Return: true/false
+ */
+bool
+ol_txrx_get_tx_resource(uint8_t sta_id,
+ unsigned int low_watermark,
+ unsigned int high_watermark_offset)
+{
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_sta_id(sta_id);
+ if (NULL == vdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid sta_id %d", __func__, sta_id);
+ /* Return true so caller do not understand that resource
+ * is less than low_watermark.
+ * sta_id validation will be done in ol_tx_send_data_frame
+ * and if sta_id is not registered then host will drop
+ * packet.
+ */
+ return true;
+ }
+
+ cdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+ if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
+ vdev->tx_fl_lwm = (uint16_t) low_watermark;
+ vdev->tx_fl_hwm =
+ (uint16_t) (low_watermark + high_watermark_offset);
+ /* Not enough free resource, stop TX OS Q */
+ cdf_atomic_set(&vdev->os_q_paused, 1);
+ cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+ return false;
+ }
+ cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+ return true;
+}
+
+/**
+ * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
+ * @vdev_id: vdev id
+ * @pause_q_depth: pause queue depth
+ *
+ * Return: 0 for success or error code
+ */
+int
+ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
+{
+ ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (NULL == vdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid vdev_id %d", __func__, vdev_id);
+ return -EINVAL;
+ }
+
+ cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+ vdev->ll_pause.max_q_depth = pause_q_depth;
+ cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+
+ return 0;
+}
+
+/**
+ * ol_txrx_flow_control_cb() - call osif flow control callback
+ * @vdev: vdev handle
+ * @tx_resume: tx resume flag
+ *
+ * Return: none
+ */
+inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
+ bool tx_resume)
+{
+ cdf_spin_lock_bh(&vdev->flow_control_lock);
+ if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
+ vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
+ cdf_spin_unlock_bh(&vdev->flow_control_lock);
+
+ return;
+}
+#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
+
+#ifdef IPA_OFFLOAD
+/**
+ * ol_txrx_ipa_uc_get_resource() - Client request resource information
+ * @pdev: handle to the HTT instance
+ * @ce_sr_base_paddr: copy engine source ring base physical address
+ * @ce_sr_ring_size: copy engine source ring size
+ * @ce_reg_paddr: copy engine register physical address
+ * @tx_comp_ring_base_paddr: tx comp ring base physical address
+ * @tx_comp_ring_size: tx comp ring size
+ * @tx_num_alloc_buffer: number of allocated tx buffer
+ * @rx_rdy_ring_base_paddr: rx ready ring base physical address
+ * @rx_rdy_ring_size: rx ready ring size
+ * @rx_proc_done_idx_paddr: rx process done index physical address
+ * @rx_proc_done_idx_vaddr: rx process done index virtual address
+ * @rx2_rdy_ring_base_paddr: rx done ring base physical address
+ * @rx2_rdy_ring_size: rx done ring size
+ * @rx2_proc_done_idx_paddr: rx done index physical address
+ * @rx2_proc_done_idx_vaddr: rx done index virtual address
+ *
+ * OL client will reuqest IPA UC related resource information
+ * Resource information will be distributted to IPA module
+ * All of the required resources should be pre-allocated
+ *
+ * Return: none
+ */
+void
+ol_txrx_ipa_uc_get_resource(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t *ce_sr_base_paddr,
+ uint32_t *ce_sr_ring_size,
+ cdf_dma_addr_t *ce_reg_paddr,
+ cdf_dma_addr_t *tx_comp_ring_base_paddr,
+ uint32_t *tx_comp_ring_size,
+ uint32_t *tx_num_alloc_buffer,
+ cdf_dma_addr_t *rx_rdy_ring_base_paddr,
+ uint32_t *rx_rdy_ring_size,
+ cdf_dma_addr_t *rx_proc_done_idx_paddr,
+ void **rx_proc_done_idx_vaddr,
+ cdf_dma_addr_t *rx2_rdy_ring_base_paddr,
+ uint32_t *rx2_rdy_ring_size,
+ cdf_dma_addr_t *rx2_proc_done_idx2_paddr,
+ void **rx2_proc_done_idx2_vaddr)
+{
+ htt_ipa_uc_get_resource(pdev->htt_pdev,
+ ce_sr_base_paddr,
+ ce_sr_ring_size,
+ ce_reg_paddr,
+ tx_comp_ring_base_paddr,
+ tx_comp_ring_size,
+ tx_num_alloc_buffer,
+ rx_rdy_ring_base_paddr,
+ rx_rdy_ring_size, rx_proc_done_idx_paddr,
+ rx_proc_done_idx_vaddr,
+ rx2_rdy_ring_base_paddr,
+ rx2_rdy_ring_size, rx2_proc_done_idx2_paddr,
+ rx2_proc_done_idx2_vaddr);
+}
+
+/**
+ * ol_txrx_ipa_uc_set_doorbell_paddr() - Client set IPA UC doorbell register
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
+ * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
+ *
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * Return: none
+ */
+void
+ol_txrx_ipa_uc_set_doorbell_paddr(ol_txrx_pdev_handle pdev,
+ cdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
+ cdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
+{
+ htt_ipa_uc_set_doorbell_paddr(pdev->htt_pdev,
+ ipa_tx_uc_doorbell_paddr,
+ ipa_rx_uc_doorbell_paddr);
+}
+
+/**
+ * ol_txrx_ipa_uc_set_active() - Client notify IPA UC data path active or not
+ * @pdev: handle to the HTT instance
+ * @ipa_uc_tx_doorbell_paddr: tx comp doorbell physical address
+ * @ipa_uc_rx_doorbell_paddr: rx ready doorbell physical address
+ *
+ * IPA UC let know doorbell register physical address
+ * WLAN firmware will use this physical address to notify IPA UC
+ *
+ * Return: none
+ */
+void
+ol_txrx_ipa_uc_set_active(ol_txrx_pdev_handle pdev, bool uc_active, bool is_tx)
+{
+ htt_h2t_ipa_uc_set_active(pdev->htt_pdev, uc_active, is_tx);
+}
+
+/**
+ * ol_txrx_ipa_uc_fw_op_event_handler() - opcode event handler
+ * @context: pdev context
+ * @rxpkt: received packet
+ * @staid: peer id
+ *
+ * Return: None
+ */
+void ol_txrx_ipa_uc_fw_op_event_handler(void *context,
+ void *rxpkt,
+ uint16_t staid)
+{
+ ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
+
+ if (cdf_unlikely(!pdev)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid context", __func__);
+ cdf_mem_free(rxpkt);
+ return;
+ }
+
+ if (pdev->ipa_uc_op_cb) {
+ pdev->ipa_uc_op_cb(rxpkt, pdev->osif_dev);
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: ipa_uc_op_cb NULL", __func__);
+ cdf_mem_free(rxpkt);
+ }
+}
+
+#ifdef QCA_CONFIG_SMP
+/**
+ * ol_txrx_ipa_uc_op_response() - Handle OP command response from firmware
+ * @pdev: handle to the HTT instance
+ * @op_msg: op response message from firmware
+ *
+ * Return: none
+ */
+void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev, uint8_t *op_msg)
+{
+ p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
+ struct cds_ol_rx_pkt *pkt;
+
+ if (cdf_unlikely(!sched_ctx))
+ return;
+
+ pkt = cds_alloc_ol_rx_pkt(sched_ctx);
+ if (cdf_unlikely(!pkt)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Not able to allocate context", __func__);
+ return;
+ }
+
+ pkt->callback = (cds_ol_rx_thread_cb) ol_txrx_ipa_uc_fw_op_event_handler;
+ pkt->context = pdev;
+ pkt->Rxpkt = (void *)op_msg;
+ pkt->staId = 0;
+ cds_indicate_rxpkt(sched_ctx, pkt);
+}
+#else
+void ol_txrx_ipa_uc_op_response(ol_txrx_pdev_handle pdev,
+ uint8_t *op_msg)
+{
+ if (pdev->ipa_uc_op_cb) {
+ pdev->ipa_uc_op_cb(op_msg, pdev->osif_dev);
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: IPA callback function is not registered", __func__);
+ cdf_mem_free(op_msg);
+ return;
+ }
+}
+#endif
+
+/**
+ * ol_txrx_ipa_uc_register_op_cb() - Register OP handler function
+ * @pdev: handle to the HTT instance
+ * @op_cb: handler function pointer
+ * @osif_dev: register client context
+ *
+ * Return: none
+ */
+void ol_txrx_ipa_uc_register_op_cb(ol_txrx_pdev_handle pdev,
+ ipa_uc_op_cb_type op_cb, void *osif_dev)
+{
+ pdev->ipa_uc_op_cb = op_cb;
+ pdev->osif_dev = osif_dev;
+}
+
+/**
+ * ol_txrx_ipa_uc_get_stat() - Get firmware wdi status
+ * @pdev: handle to the HTT instance
+ *
+ * Return: none
+ */
+void ol_txrx_ipa_uc_get_stat(ol_txrx_pdev_handle pdev)
+{
+ htt_h2t_ipa_uc_get_stats(pdev->htt_pdev);
+}
+#endif /* IPA_UC_OFFLOAD */
+
+void ol_txrx_display_stats(uint16_t value)
+{
+ ol_txrx_pdev_handle pdev;
+
+ pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: pdev is NULL", __func__);
+ return;
+ }
+
+ switch (value) {
+ case WLAN_TXRX_STATS:
+ ol_txrx_stats_display(pdev);
+ break;
+ case WLAN_TXRX_TSO_STATS:
+#if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
+ ol_txrx_stats_display_tso(pdev);
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: TSO not supported", __func__);
+#endif
+ break;
+ case WLAN_DUMP_TX_FLOW_POOL_INFO:
+ ol_tx_dump_flow_pool_info();
+ break;
+ case WLAN_TXRX_DESC_STATS:
+ cdf_nbuf_tx_desc_count_display();
+ break;
+ default:
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Unknown value", __func__);
+ break;
+ }
+}
+
+void ol_txrx_clear_stats(uint16_t value)
+{
+ ol_txrx_pdev_handle pdev;
+
+ pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: pdev is NULL", __func__);
+ return;
+ }
+
+ switch (value) {
+ case WLAN_TXRX_STATS:
+ ol_txrx_stats_clear(pdev);
+ break;
+ case WLAN_DUMP_TX_FLOW_POOL_INFO:
+ ol_tx_clear_flow_pool_stats();
+ break;
+ case WLAN_TXRX_DESC_STATS:
+ cdf_nbuf_tx_desc_count_clear();
+ break;
+ default:
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Unknown value", __func__);
+ break;
+ }
+}
+
+/**
+ * ol_rx_data_cb() - data rx callback
+ * @peer: peer
+ * @buf_list: buffer list
+ *
+ * Return: None
+ */
+static void ol_rx_data_cb(struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t buf_list)
+{
+ void *cds_ctx = cds_get_global_context();
+ cdf_nbuf_t buf, next_buf;
+ CDF_STATUS ret;
+ ol_rx_callback_fp data_rx = NULL;
+
+ if (cdf_unlikely(!cds_ctx))
+ goto free_buf;
+
+ cdf_spin_lock_bh(&peer->peer_info_lock);
+ if (cdf_unlikely(!(peer->state >= ol_txrx_peer_state_conn))) {
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+ goto free_buf;
+ }
+ data_rx = peer->osif_rx;
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+
+ cdf_spin_lock_bh(&peer->bufq_lock);
+ if (!list_empty(&peer->cached_bufq)) {
+ cdf_spin_unlock_bh(&peer->bufq_lock);
+ /* Flush the cached frames to HDD before passing new rx frame */
+ ol_txrx_flush_rx_frames(peer, 0);
+ } else
+ cdf_spin_unlock_bh(&peer->bufq_lock);
+
+ buf = buf_list;
+ while (buf) {
+ next_buf = cdf_nbuf_queue_next(buf);
+ cdf_nbuf_set_next(buf, NULL); /* Add NULL terminator */
+ ret = data_rx(cds_ctx, buf, peer->local_id);
+ if (ret != CDF_STATUS_SUCCESS) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Frame Rx to HDD failed");
+ cdf_nbuf_free(buf);
+ }
+ buf = next_buf;
+ }
+ return;
+
+free_buf:
+ TXRX_PRINT(TXRX_PRINT_LEVEL_WARN, "%s:Dropping frames", __func__);
+ buf = buf_list;
+ while (buf) {
+ next_buf = cdf_nbuf_queue_next(buf);
+ cdf_nbuf_free(buf);
+ buf = next_buf;
+ }
+}
+
+/**
+ * ol_rx_data_process() - process rx frame
+ * @peer: peer
+ * @rx_buf_list: rx buffer list
+ *
+ * Return: None
+ */
+void ol_rx_data_process(struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t rx_buf_list)
+{
+ /* Firmware data path active response will use shim RX thread
+ * T2H MSG running on SIRQ context,
+ * IPA kernel module API should not be called on SIRQ CTXT */
+ cdf_nbuf_t buf, next_buf;
+ ol_rx_callback_fp data_rx = NULL;
+ ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ if ((!peer) || (!pdev)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "peer/pdev is NULL");
+ goto drop_rx_buf;
+ }
+
+ cdf_spin_lock_bh(&peer->peer_info_lock);
+ if (peer->state >= ol_txrx_peer_state_conn)
+ data_rx = peer->osif_rx;
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+
+ /*
+ * If there is a data frame from peer before the peer is
+ * registered for data service, enqueue them on to pending queue
+ * which will be flushed to HDD once that station is registered.
+ */
+ if (!data_rx) {
+ struct ol_rx_cached_buf *cache_buf;
+ buf = rx_buf_list;
+ while (buf) {
+ next_buf = cdf_nbuf_queue_next(buf);
+ cache_buf = cdf_mem_malloc(sizeof(*cache_buf));
+ if (!cache_buf) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Failed to allocate buf to cache the rx frames");
+ cdf_nbuf_free(buf);
+ } else {
+ /* Add NULL terminator */
+ cdf_nbuf_set_next(buf, NULL);
+ cache_buf->buf = buf;
+ cdf_spin_lock_bh(&peer->bufq_lock);
+ list_add_tail(&cache_buf->list,
+ &peer->cached_bufq);
+ cdf_spin_unlock_bh(&peer->bufq_lock);
+ }
+ buf = next_buf;
+ }
+ } else {
+#ifdef QCA_CONFIG_SMP
+ /*
+ * If the kernel is SMP, schedule rx thread to
+ * better use multicores.
+ */
+ if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
+ ol_rx_data_cb(peer, rx_buf_list);
+ } else {
+ p_cds_sched_context sched_ctx =
+ get_cds_sched_ctxt();
+ struct cds_ol_rx_pkt *pkt;
+
+ if (unlikely(!sched_ctx))
+ goto drop_rx_buf;
+
+ pkt = cds_alloc_ol_rx_pkt(sched_ctx);
+ if (!pkt) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "No available Rx message buffer");
+ goto drop_rx_buf;
+ }
+ pkt->callback = (cds_ol_rx_thread_cb)
+ ol_rx_data_cb;
+ pkt->context = (void *)peer;
+ pkt->Rxpkt = (void *)rx_buf_list;
+ pkt->staId = peer->local_id;
+ cds_indicate_rxpkt(sched_ctx, pkt);
+ }
+#else /* QCA_CONFIG_SMP */
+ ol_rx_data_cb(peer, rx_buf_list, 0);
+#endif /* QCA_CONFIG_SMP */
+ }
+
+ return;
+
+drop_rx_buf:
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
+ buf = rx_buf_list;
+ while (buf) {
+ next_buf = cdf_nbuf_queue_next(buf);
+ cdf_nbuf_free(buf);
+ buf = next_buf;
+ }
+}
+
+/**
+ * ol_txrx_register_peer() - register peer
+ * @rxcb: rx callback
+ * @sta_desc: sta descriptor
+ *
+ * Return: CDF Status
+ */
+CDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
+ struct ol_txrx_desc_type *sta_desc)
+{
+ struct ol_txrx_peer_t *peer;
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ union ol_txrx_peer_update_param_t param;
+ struct privacy_exemption privacy_filter;
+
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Pdev is NULL");
+ return CDF_STATUS_E_INVAL;
+ }
+
+ if (sta_desc->sta_id >= WLAN_MAX_STA_COUNT) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id :%d",
+ sta_desc->sta_id);
+ return CDF_STATUS_E_INVAL;
+ }
+
+ peer = ol_txrx_peer_find_by_local_id(pdev, sta_desc->sta_id);
+ if (!peer)
+ return CDF_STATUS_E_FAULT;
+
+ cdf_spin_lock_bh(&peer->peer_info_lock);
+ peer->osif_rx = rxcb;
+ peer->state = ol_txrx_peer_state_conn;
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+
+ param.qos_capable = sta_desc->is_qos_enabled;
+ ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
+ ol_txrx_peer_update_qos_capable);
+
+ if (sta_desc->is_wapi_supported) {
+ /*Privacy filter to accept unencrypted WAI frames */
+ privacy_filter.ether_type = ETHERTYPE_WAI;
+ privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
+ privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
+ ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
+ }
+
+ ol_txrx_flush_rx_frames(peer, 0);
+ return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * ol_txrx_clear_peer() - clear peer
+ * @sta_id: sta id
+ *
+ * Return: CDF Status
+ */
+CDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
+{
+ struct ol_txrx_peer_t *peer;
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
+ __func__);
+ return CDF_STATUS_E_FAILURE;
+ }
+
+ if (sta_id >= WLAN_MAX_STA_COUNT) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid sta id %d", sta_id);
+ return CDF_STATUS_E_INVAL;
+ }
+
+#ifdef QCA_CONFIG_SMP
+ {
+ p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
+ /* Drop pending Rx frames in CDS */
+ if (sched_ctx)
+ cds_drop_rxpkt_by_staid(sched_ctx, sta_id);
+ }
+#endif
+
+ peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+ if (!peer)
+ return CDF_STATUS_E_FAULT;
+
+ /* Purge the cached rx frame queue */
+ ol_txrx_flush_rx_frames(peer, 1);
+
+ cdf_spin_lock_bh(&peer->peer_info_lock);
+ peer->osif_rx = NULL;
+ peer->state = ol_txrx_peer_state_disc;
+ cdf_spin_unlock_bh(&peer->peer_info_lock);
+
+ return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * ol_txrx_register_ocb_peer - Function to register the OCB peer
+ * @cds_ctx: Pointer to the global OS context
+ * @mac_addr: MAC address of the self peer
+ * @peer_id: Pointer to the peer ID
+ *
+ * Return: CDF_STATUS_SUCCESS on success, CDF_STATUS_E_FAILURE on failure
+ */
+CDF_STATUS ol_txrx_register_ocb_peer(void *cds_ctx, uint8_t *mac_addr,
+ uint8_t *peer_id)
+{
+ ol_txrx_pdev_handle pdev;
+ ol_txrx_peer_handle peer;
+
+ if (!cds_ctx) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Invalid context",
+ __func__);
+ return CDF_STATUS_E_FAILURE;
+ }
+
+ pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find pdev!",
+ __func__);
+ return CDF_STATUS_E_FAILURE;
+ }
+
+ peer = ol_txrx_find_peer_by_addr(pdev, mac_addr, peer_id);
+ if (!peer) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Unable to find OCB peer!",
+ __func__);
+ return CDF_STATUS_E_FAILURE;
+ }
+
+ ol_txrx_set_ocb_peer(pdev, peer);
+
+ /* Set peer state to connected */
+ ol_txrx_peer_state_update(pdev, peer->mac_addr.raw,
+ ol_txrx_peer_state_auth);
+
+ return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * ol_txrx_set_ocb_peer - Function to store the OCB peer
+ * @pdev: Handle to the HTT instance
+ * @peer: Pointer to the peer
+ */
+void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ if (pdev == NULL)
+ return;
+
+ pdev->ocb_peer = peer;
+ pdev->ocb_peer_valid = (NULL != peer);
+}
+
+/**
+ * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
+ * @pdev: Handle to the HTT instance
+ * @peer: Pointer to the returned peer
+ *
+ * Return: true if the peer is valid, false if not
+ */
+bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t **peer)
+{
+ int rc;
+
+ if ((pdev == NULL) || (peer == NULL)) {
+ rc = false;
+ goto exit;
+ }
+
+ if (pdev->ocb_peer_valid) {
+ *peer = pdev->ocb_peer;
+ rc = true;
+ } else {
+ rc = false;
+ }
+
+exit:
+ return rc;
+}
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_txrx_register_pause_cb() - register pause callback
+ * @pause_cb: pause callback
+ *
+ * Return: CDF status
+ */
+CDF_STATUS ol_txrx_register_pause_cb(ol_tx_pause_callback_fp pause_cb)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ if (!pdev || !pause_cb) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "pdev or pause_cb is NULL");
+ return CDF_STATUS_E_INVAL;
+ }
+ pdev->pause_cb = pause_cb;
+ return CDF_STATUS_SUCCESS;
+}
+#endif
+
+#if defined(FEATURE_LRO)
+/**
+ * ol_txrx_lro_flush_handler() - LRO flush handler
+ * @context: dev handle
+ * @rxpkt: rx data
+ * @staid: station id
+ *
+ * This function handles an LRO flush indication.
+ * If the rx thread is enabled, it will be invoked by the rx
+ * thread else it will be called in the tasklet context
+ *
+ * Return: none
+ */
+void ol_txrx_lro_flush_handler(void *context,
+ void *rxpkt,
+ uint16_t staid)
+{
+ ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)context;
+
+ if (cdf_unlikely(!pdev)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid context", __func__);
+ cdf_assert(0);
+ return;
+ }
+
+ if (pdev->lro_info.lro_flush_cb)
+ pdev->lro_info.lro_flush_cb(pdev->lro_info.lro_data);
+ else
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: lro_flush_cb NULL", __func__);
+}
+
+/**
+ * ol_txrx_lro_flush() - LRO flush callback
+ * @data: opaque data pointer
+ *
+ * This is the callback registered with CE to trigger
+ * an LRO flush
+ *
+ * Return: none
+ */
+void ol_txrx_lro_flush(void *data)
+{
+ p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
+ struct cds_ol_rx_pkt *pkt;
+ ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)data;
+
+ if (cdf_unlikely(!sched_ctx))
+ return;
+
+ if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
+ ol_txrx_lro_flush_handler((void *)pdev, NULL, 0);
+ } else {
+ pkt = cds_alloc_ol_rx_pkt(sched_ctx);
+ if (cdf_unlikely(!pkt)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: Not able to allocate context", __func__);
+ return;
+ }
+
+ pkt->callback =
+ (cds_ol_rx_thread_cb) ol_txrx_lro_flush_handler;
+ pkt->context = pdev;
+ pkt->Rxpkt = NULL;
+ pkt->staId = 0;
+ cds_indicate_rxpkt(sched_ctx, pkt);
+ }
+}
+
+/**
+ * ol_register_lro_flush_cb() - register the LRO flush callback
+ * @handler: callback function
+ * @data: opaque data pointer to be passed back
+ *
+ * Store the LRO flush callback provided and in turn
+ * register OL's LRO flush handler with CE
+ *
+ * Return: none
+ */
+void ol_register_lro_flush_cb(void (handler)(void *), void *data)
+{
+ struct ol_softc *hif_device =
+ (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ pdev->lro_info.lro_flush_cb = handler;
+ pdev->lro_info.lro_data = data;
+
+ ce_lro_flush_cb_register(hif_device, ol_txrx_lro_flush, pdev);
+}
+
+/**
+ * ol_deregister_lro_flush_cb() - deregister the LRO flush
+ * callback
+ *
+ * Remove the LRO flush callback provided and in turn
+ * deregister OL's LRO flush handler with CE
+ *
+ * Return: none
+ */
+void ol_deregister_lro_flush_cb(void)
+{
+ struct ol_softc *hif_device =
+ (struct ol_softc *)cds_get_context(CDF_MODULE_ID_HIF);
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ ce_lro_flush_cb_deregister(hif_device);
+
+ pdev->lro_info.lro_flush_cb = NULL;
+ pdev->lro_info.lro_data = NULL;
+}
+#endif /* FEATURE_LRO */
diff --git a/dp/txrx/ol_txrx.h b/dp/txrx/ol_txrx.h
new file mode 100644
index 000000000000..4e77867ce37f
--- /dev/null
+++ b/dp/txrx/ol_txrx.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_TXRX__H_
+#define _OL_TXRX__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <ol_txrx_types.h> /* ol_txrx_vdev_t, etc. */
+#include <ol_ctrl_api.h> /* ol_pdev_handle */
+#include "cds_sched.h"
+
+void ol_txrx_peer_unref_delete(struct ol_txrx_peer_t *peer);
+
+#ifndef OL_TX_AVG_FRM_BYTES
+#define OL_TX_AVG_FRM_BYTES 1000
+#endif
+
+/**
+ * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
+ * @vdev_id: vdev_id
+ *
+ * Return: vdev handle
+ * NULL if not found.
+ */
+static inline ol_txrx_vdev_handle ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
+{
+ ol_txrx_pdev_handle pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ ol_txrx_vdev_handle vdev = NULL;
+
+ if (cdf_unlikely(!pdev)) {
+ return NULL;
+ }
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (vdev->vdev_id == vdev_id)
+ break;
+ }
+
+ return vdev;
+}
+
+#endif /* _OL_TXRX__H_ */
diff --git a/dp/txrx/ol_txrx_encap.c b/dp/txrx/ol_txrx_encap.c
new file mode 100644
index 000000000000..a7471da73748
--- /dev/null
+++ b/dp/txrx/ol_txrx_encap.c
@@ -0,0 +1,593 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_encap.c
+ * @brief Provide functions to encap/decap on txrx frames.
+ * @details
+ * This file contains functions for data frame encap/decap:
+ * ol_tx_encap: encap outgoing data frames.
+ * ol_rx_decap: decap incoming data frames.
+ */
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cds_ieee80211_common.h> /* ieee80211_frame */
+#include <net.h> /* struct llc, struct ether_header, etc. */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h> /* struct ol_txrx_vdev_t, ol_txrx_pdev_t,etc. */
+#include <ol_txrx_encap.h> /* struct ol_rx_decap_info_t */
+
+#define OL_TX_COPY_NATIVE_WIFI_HEADER(wh, msdu, hdsize, localbuf) \
+ do { \
+ wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu); \
+ if ((wh->i_fc[1] & \
+ IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) { \
+ hdsize = sizeof(struct ieee80211_frame_addr4); \
+ } else { \
+ hdsize = sizeof(struct ieee80211_frame); \
+ } \
+ if (cdf_nbuf_len(msdu) < hdsize) { \
+ return A_ERROR; \
+ } \
+ cdf_mem_copy(localbuf, wh, hdsize); \
+ wh = (struct ieee80211_frame *)localbuf; \
+ } while (0)
+
+static inline A_STATUS
+ol_tx_copy_native_wifi_header(cdf_nbuf_t msdu,
+ uint8_t *hdsize, uint8_t *localbuf)
+{
+ struct ieee80211_frame *wh =
+ (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+ if ((wh->i_fc[1] &
+ IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS) {
+ *hdsize = sizeof(struct ieee80211_frame_addr4);
+ } else {
+ *hdsize = sizeof(struct ieee80211_frame);
+ }
+ if (cdf_nbuf_len(msdu) < *hdsize)
+ return A_ERROR;
+
+ cdf_mem_copy(localbuf, wh, *hdsize);
+ return A_OK;
+}
+
+static inline A_STATUS
+ol_tx_encap_from_native_wifi(struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu,
+ struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ uint8_t localbuf[sizeof(struct ieee80211_qosframe_htc_addr4)];
+ struct ieee80211_frame *wh;
+ uint8_t hdsize, new_hdsize;
+ struct ieee80211_qoscntl *qos_cntl;
+ struct ol_txrx_peer_t *peer;
+
+ if (tx_msdu_info->htt.info.frame_type != htt_frm_type_data)
+ return A_OK;
+
+ peer = tx_msdu_info->peer;
+ /*
+ * for unicast,the peer should not be NULL.
+ * for multicast, the peer is AP.
+ */
+ if (tx_msdu_info->htt.info.is_unicast && peer->qos_capable) {
+ if (A_OK !=
+ ol_tx_copy_native_wifi_header(msdu, &hdsize, localbuf))
+ return A_ERROR;
+ wh = (struct ieee80211_frame *)localbuf;
+
+ /*add qos cntl */
+ qos_cntl = (struct ieee80211_qoscntl *)(localbuf + hdsize);
+ qos_cntl->i_qos[0] =
+ tx_msdu_info->htt.info.ext_tid & IEEE80211_QOS_TID;
+
+#ifdef NEVERDEFINED
+ if (wmmParam[ac].wmep_noackPolicy)
+ qos_cntl->i_qos[0] |= 1 << IEEE80211_QOS_ACKPOLICY_S;
+#endif
+
+ qos_cntl->i_qos[1] = 0;
+ wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
+ /* count for qos field */
+ new_hdsize =
+ hdsize + sizeof(struct ieee80211_qosframe) -
+ sizeof(struct ieee80211_frame);
+
+ /*add ht control field if needed */
+
+ /* copy new hd to bd */
+ cdf_mem_copy((void *)
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
+ new_hdsize), localbuf,
+ new_hdsize);
+ cdf_nbuf_pull_head(msdu, hdsize);
+ tx_msdu_info->htt.info.l3_hdr_offset = new_hdsize;
+ tx_desc->orig_l2_hdr_bytes = hdsize;
+ }
+ /* Set Protected Frame bit in MAC header */
+ if (vdev->pdev->sw_pf_proc_enable
+ && tx_msdu_info->htt.action.do_encrypt) {
+ if (tx_desc->orig_l2_hdr_bytes) {
+ wh = (struct ieee80211_frame *)
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
+ tx_msdu_info->htt.info.
+ l3_hdr_offset);
+ } else {
+ if (A_OK !=
+ ol_tx_copy_native_wifi_header(msdu, &hdsize,
+ localbuf))
+ return A_ERROR;
+ wh = (struct ieee80211_frame *)
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
+ hdsize);
+ cdf_mem_copy((void *)wh, localbuf, hdsize);
+ cdf_nbuf_pull_head(msdu, hdsize);
+ tx_msdu_info->htt.info.l3_hdr_offset = hdsize;
+ tx_desc->orig_l2_hdr_bytes = hdsize;
+ }
+ wh->i_fc[1] |= IEEE80211_FC1_WEP;
+ }
+ return A_OK;
+}
+
+static inline A_STATUS
+ol_tx_encap_from_8023(struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *tx_msdu_info)
+{
+ uint8_t localbuf[sizeof(struct ieee80211_qosframe_htc_addr4)
+ + sizeof(struct llc_snap_hdr_t)];
+ struct llc_snap_hdr_t *llc_hdr;
+ struct ethernet_hdr_t *eth_hdr;
+ struct ieee80211_frame *wh;
+ uint8_t hdsize, new_l2_hdsize, new_hdsize;
+ struct ieee80211_qoscntl *qos_cntl;
+ const uint8_t ethernet_II_llc_snap_header_prefix[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+ struct ol_txrx_peer_t *peer;
+ uint16_t ether_type;
+
+ if (tx_msdu_info->htt.info.frame_type != htt_frm_type_data)
+ return A_OK;
+
+ /*
+ * for unicast,the peer should not be NULL.
+ * for multicast, the peer is AP.
+ */
+ peer = tx_msdu_info->peer;
+
+ eth_hdr = (struct ethernet_hdr_t *)cdf_nbuf_data(msdu);
+ hdsize = sizeof(struct ethernet_hdr_t);
+ wh = (struct ieee80211_frame *)localbuf;
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
+ *(uint16_t *) wh->i_dur = 0;
+ new_hdsize = 0;
+
+ switch (vdev->opmode) {
+ case wlan_op_mode_ap:
+ /* DA , BSSID , SA */
+ cdf_mem_copy(wh->i_addr1, eth_hdr->dest_addr,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr2, &vdev->mac_addr.raw,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr3, eth_hdr->src_addr,
+ IEEE80211_ADDR_LEN);
+ wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
+ new_hdsize = sizeof(struct ieee80211_frame);
+ break;
+ case wlan_op_mode_ibss:
+ /* DA, SA, BSSID */
+ cdf_mem_copy(wh->i_addr1, eth_hdr->dest_addr,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr2, eth_hdr->src_addr,
+ IEEE80211_ADDR_LEN);
+ /* need to check the bssid behaviour for IBSS vdev */
+ cdf_mem_copy(wh->i_addr3, &vdev->mac_addr.raw,
+ IEEE80211_ADDR_LEN);
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ new_hdsize = sizeof(struct ieee80211_frame);
+ break;
+ case wlan_op_mode_sta:
+ /* BSSID, SA , DA */
+ cdf_mem_copy(wh->i_addr1, &peer->mac_addr.raw,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr2, eth_hdr->src_addr,
+ IEEE80211_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr3, eth_hdr->dest_addr,
+ IEEE80211_ADDR_LEN);
+ wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
+ new_hdsize = sizeof(struct ieee80211_frame);
+ break;
+ case wlan_op_mode_monitor:
+ default:
+ return A_ERROR;
+ }
+ /*add qos cntl */
+ if (tx_msdu_info->htt.info.is_unicast && peer->qos_capable) {
+ qos_cntl = (struct ieee80211_qoscntl *)(localbuf + new_hdsize);
+ qos_cntl->i_qos[0] =
+ tx_msdu_info->htt.info.ext_tid & IEEE80211_QOS_TID;
+ wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
+#ifdef NEVERDEFINED
+ if (wmmParam[ac].wmep_noackPolicy)
+ qos_cntl->i_qos[0] |= 1 << IEEE80211_QOS_ACKPOLICY_S;
+#endif
+ qos_cntl->i_qos[1] = 0;
+ new_hdsize += sizeof(struct ieee80211_qoscntl);
+
+ /*add ht control field if needed */
+ }
+ /* Set Protected Frame bit in MAC header */
+ if (vdev->pdev->sw_pf_proc_enable
+ && tx_msdu_info->htt.action.do_encrypt) {
+ wh->i_fc[1] |= IEEE80211_FC1_WEP;
+ }
+ new_l2_hdsize = new_hdsize;
+ /* add llc snap if needed */
+ if (vdev->pdev->sw_tx_llc_proc_enable) {
+ llc_hdr = (struct llc_snap_hdr_t *)(localbuf + new_hdsize);
+ ether_type =
+ (eth_hdr->ethertype[0] << 8) | (eth_hdr->ethertype[1]);
+ if (ether_type >= IEEE8023_MAX_LEN) {
+ cdf_mem_copy(llc_hdr,
+ ethernet_II_llc_snap_header_prefix,
+ sizeof
+ (ethernet_II_llc_snap_header_prefix));
+ if (ether_type == ETHERTYPE_AARP
+ || ether_type == ETHERTYPE_IPX) {
+ llc_hdr->org_code[2] = BTEP_SNAP_ORGCODE_2;
+ /* 0xf8; bridge tunnel header */
+ }
+ llc_hdr->ethertype[0] = eth_hdr->ethertype[0];
+ llc_hdr->ethertype[1] = eth_hdr->ethertype[1];
+ new_hdsize += sizeof(struct llc_snap_hdr_t);
+ } else {
+ /*llc ready, and it's in payload pdu,
+ do we need to move to BD pdu? */
+ }
+ }
+ cdf_mem_copy((void *)
+ htt_tx_desc_mpdu_header(tx_desc->htt_tx_desc,
+ new_l2_hdsize), localbuf,
+ new_hdsize);
+ cdf_nbuf_pull_head(msdu, hdsize);
+ tx_msdu_info->htt.info.l3_hdr_offset = new_l2_hdsize;
+ tx_desc->orig_l2_hdr_bytes = hdsize;
+ return A_OK;
+}
+
+A_STATUS
+ol_tx_encap(struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ return ol_tx_encap_from_native_wifi(vdev, tx_desc, msdu,
+ msdu_info);
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ return ol_tx_encap_from_8023(vdev, tx_desc, msdu, msdu_info);
+ } else {
+ /* todo for other types */
+ return A_ERROR;
+ }
+}
+
+static inline void
+ol_rx_decap_to_native_wifi(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu,
+ struct ol_rx_decap_info_t *info,
+ struct ethernet_hdr_t *ethr_hdr)
+{
+ struct ieee80211_frame_addr4 *wh;
+ uint16_t hdsize;
+
+ /*
+ * we need to remove Qos control field and HT control.
+ * MSFT: http://msdn.microsoft.com/en-us/library/windows/
+ * hardware/ff552608(v=vs.85).aspx
+ */
+ wh = (struct ieee80211_frame_addr4 *)info->hdr;
+ if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
+ IEEE80211_FC1_DIR_DSTODS)
+ hdsize = sizeof(struct ieee80211_frame_addr4);
+ else
+ hdsize = sizeof(struct ieee80211_frame);
+
+ wh = (struct ieee80211_frame_addr4 *)cdf_nbuf_push_head(msdu, hdsize);
+ TXRX_ASSERT2(wh != NULL);
+ TXRX_ASSERT2(hdsize <= info->hdr_len);
+ cdf_mem_copy((uint8_t *) wh, info->hdr, hdsize);
+
+ /* amsdu subfrm handling if ethr_hdr is not NULL */
+ if (ethr_hdr != NULL) {
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ cdf_mem_copy(wh->i_addr1, ethr_hdr->dest_addr,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr2, ethr_hdr->src_addr,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ cdf_mem_copy(wh->i_addr2, ethr_hdr->src_addr,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr3, ethr_hdr->dest_addr,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ cdf_mem_copy(wh->i_addr1, ethr_hdr->dest_addr,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr3, ethr_hdr->src_addr,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ cdf_mem_copy(wh->i_addr3, ethr_hdr->dest_addr,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(wh->i_addr4, ethr_hdr->src_addr,
+ ETHERNET_ADDR_LEN);
+ break;
+ }
+ }
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
+ wh->i_fc[1] &= ~IEEE80211_FC1_ORDER;
+ wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
+ }
+}
+
+static inline void
+ol_rx_decap_to_8023(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu,
+ struct ol_rx_decap_info_t *info,
+ struct ethernet_hdr_t *ethr_hdr)
+{
+ struct llc_snap_hdr_t *llc_hdr;
+ uint16_t ether_type;
+ uint16_t l2_hdr_space;
+ struct ieee80211_frame_addr4 *wh;
+ uint8_t local_buf[ETHERNET_HDR_LEN];
+ uint8_t *buf;
+
+ /*
+ * populate Ethernet header,
+ * if ethr_hdr is null, rx frame is 802.11 format(HW ft disabled)
+ * if ethr_hdr is not null, rx frame is "subfrm of amsdu".
+ */
+ buf = (uint8_t *) cdf_nbuf_data(msdu);
+ llc_hdr = (struct llc_snap_hdr_t *)buf;
+ ether_type = (llc_hdr->ethertype[0] << 8) | llc_hdr->ethertype[1];
+ /* do llc remove if needed */
+ l2_hdr_space = 0;
+ if (IS_SNAP(llc_hdr)) {
+ if (IS_BTEP(llc_hdr)) {
+ /* remove llc */
+ l2_hdr_space += sizeof(struct llc_snap_hdr_t);
+ llc_hdr = NULL;
+ } else if (IS_RFC1042(llc_hdr)) {
+ if (!(ether_type == ETHERTYPE_AARP ||
+ ether_type == ETHERTYPE_IPX)) {
+ /* remove llc */
+ l2_hdr_space += sizeof(struct llc_snap_hdr_t);
+ llc_hdr = NULL;
+ }
+ }
+ }
+ if (l2_hdr_space > ETHERNET_HDR_LEN)
+ buf = cdf_nbuf_pull_head(msdu, l2_hdr_space - ETHERNET_HDR_LEN);
+ else if (l2_hdr_space < ETHERNET_HDR_LEN)
+ buf = cdf_nbuf_push_head(msdu, ETHERNET_HDR_LEN - l2_hdr_space);
+
+ /* normal msdu(non-subfrm of A-MSDU) if ethr_hdr is null */
+ if (ethr_hdr == NULL) {
+ /* mpdu hdr should be present in info,
+ re-create ethr_hdr based on mpdu hdr */
+ TXRX_ASSERT2(info->hdr_len != 0);
+ wh = (struct ieee80211_frame_addr4 *)info->hdr;
+ ethr_hdr = (struct ethernet_hdr_t *)local_buf;
+ switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+ case IEEE80211_FC1_DIR_NODS:
+ cdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_TODS:
+ cdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(ethr_hdr->src_addr, wh->i_addr2,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_FROMDS:
+ cdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr1,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(ethr_hdr->src_addr, wh->i_addr3,
+ ETHERNET_ADDR_LEN);
+ break;
+ case IEEE80211_FC1_DIR_DSTODS:
+ cdf_mem_copy(ethr_hdr->dest_addr, wh->i_addr3,
+ ETHERNET_ADDR_LEN);
+ cdf_mem_copy(ethr_hdr->src_addr, wh->i_addr4,
+ ETHERNET_ADDR_LEN);
+ break;
+ }
+ }
+ if (llc_hdr == NULL) {
+ ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
+ ethr_hdr->ethertype[1] = (ether_type) & 0xff;
+ } else {
+ uint32_t pktlen =
+ cdf_nbuf_len(msdu) - sizeof(ethr_hdr->ethertype);
+ TXRX_ASSERT2(pktlen <= ETHERNET_MTU);
+ ether_type = (uint16_t) pktlen;
+ ether_type = cdf_nbuf_len(msdu) - sizeof(struct ethernet_hdr_t);
+ ethr_hdr->ethertype[0] = (ether_type >> 8) & 0xff;
+ ethr_hdr->ethertype[1] = (ether_type) & 0xff;
+ }
+ cdf_mem_copy(buf, ethr_hdr, ETHERNET_HDR_LEN);
+}
+
+static inline A_STATUS
+ol_rx_decap_subfrm_amsdu(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ uint8_t *subfrm_hdr;
+ uint8_t localbuf[ETHERNET_HDR_LEN];
+ struct ethernet_hdr_t *ether_hdr = (struct ethernet_hdr_t *)localbuf;
+
+ subfrm_hdr = (uint8_t *) cdf_nbuf_data(msdu);
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ /* decap to native wifi */
+ cdf_mem_copy(ether_hdr, subfrm_hdr, ETHERNET_HDR_LEN);
+ cdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
+ ol_rx_decap_to_native_wifi(vdev, msdu, info, ether_hdr);
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ if (pdev->sw_rx_llc_proc_enable) {
+ /* remove llc snap hdr if it's necessary according to
+ * 802.11 table P-3
+ */
+ cdf_mem_copy(ether_hdr, subfrm_hdr, ETHERNET_HDR_LEN);
+ cdf_nbuf_pull_head(msdu, ETHERNET_HDR_LEN);
+ ol_rx_decap_to_8023(vdev, msdu, info, ether_hdr);
+ } else {
+ /* subfrm of A-MSDU is already in 802.3 format.
+ * if target HW or FW has done LLC rmv process,
+ * we do nothing here.
+ */
+ }
+ } else {
+ /* todo for othertype */
+ }
+ return A_OK;
+
+}
+
+static inline A_STATUS
+ol_rx_decap_msdu(struct ol_txrx_vdev_t *vdev,
+ cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+{
+ struct ol_txrx_pdev_t *pdev = vdev->pdev;
+ struct ieee80211_frame *wh;
+ wh = (struct ieee80211_frame *)cdf_nbuf_data(msdu);
+
+ if (pdev->frame_format == wlan_frm_fmt_native_wifi) {
+ /* Decap to native wifi because according to MSFT(
+ * MSFT: http://msdn.microsoft.com/en-us/library/windows/
+ * hardware/ff552608(v=vs.85).aspx),
+ * we need to remove Qos and HTC field before indicate to OS.
+ */
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ info->hdr_len = ol_txrx_ieee80211_hdrsize(wh);
+ TXRX_ASSERT2(info->hdr_len <= sizeof(info->hdr));
+ cdf_mem_copy(info->hdr, /* use info->hdr as temp buf. */
+ wh, info->hdr_len);
+ cdf_nbuf_pull_head(msdu, info->hdr_len);
+ ol_rx_decap_to_native_wifi(vdev, msdu, info, NULL);
+ /* 802.11 hdr^ eth_hdr^ */
+ }
+ } else if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ if (pdev->sw_rx_llc_proc_enable) {
+ info->hdr_len = ol_txrx_ieee80211_hdrsize(wh);
+ TXRX_ASSERT2(info->hdr_len <= sizeof(info->hdr));
+ cdf_mem_copy(info->hdr, /* use info->hdr as temp buf. */
+ wh, info->hdr_len);
+ cdf_nbuf_pull_head(msdu, info->hdr_len);
+ /* remove llc snap hdr if it's necessary according to
+ * 802.11 table P-3
+ */
+ ol_rx_decap_to_8023(vdev, msdu, info, /* 802.11 hdr */
+ NULL); /* ethernet hdr */
+ } else {
+ /* Subfrm of A-MSDU is already in 802.3 format.
+ * And if target HW or FW has done LLC rmv process (
+ * sw_rx_lc_proc_enable == 0), we do nothing here.
+ */
+ }
+ } else {
+ /* todo for othertype */
+ }
+ return A_OK;
+
+}
+
+A_STATUS
+ol_rx_decap(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+{
+ A_STATUS status;
+ uint8_t *mpdu_hdr;
+
+ if (!info->is_subfrm) {
+ if (info->is_msdu_cmpl_mpdu && !info->is_first_subfrm) {
+ /* It's normal MSDU. */
+ } else {
+ /* It's a first subfrm of A-MSDU and
+ may also be the last subfrm of A-MSDU */
+ info->is_subfrm = 1;
+ info->hdr_len = 0;
+ if (vdev->pdev->sw_subfrm_hdr_recovery_enable) {
+ /* we save the first subfrm mpdu hdr for
+ * subsequent subfrm 802.11 header recovery
+ * in certain chip(such as Riva).
+ */
+ mpdu_hdr = cdf_nbuf_data(msdu);
+ info->hdr_len =
+ ol_txrx_ieee80211_hdrsize(mpdu_hdr);
+ TXRX_ASSERT2(info->hdr_len <=
+ sizeof(info->hdr));
+ cdf_mem_copy(info->hdr, mpdu_hdr,
+ info->hdr_len);
+ cdf_nbuf_pull_head(msdu, info->hdr_len);
+ }
+ }
+ }
+
+ if (info->is_subfrm && vdev->pdev->sw_subfrm_hdr_recovery_enable) {
+ /*
+ * This case is enabled for some HWs (such as Riva). The HW
+ * de-aggregate doesn't have capability to generate 802.11
+ * header for non-first subframe of A-MSDU. That means sw needs
+ * to cache the first subfrm mpdu header to generate the
+ * subsequent subfrm's 802.11 header.
+ */
+ TXRX_ASSERT2(info->hdr_len != 0);
+ status = ol_rx_decap_subfrm_amsdu(vdev, msdu, info);
+ } else {
+ status = ol_rx_decap_msdu(vdev, msdu, info);
+ }
+
+ if (info->is_msdu_cmpl_mpdu)
+ info->is_subfrm = info->is_first_subfrm = info->hdr_len = 0;
+
+ return status;
+}
+#endif
diff --git a/dp/txrx/ol_txrx_encap.h b/dp/txrx/ol_txrx_encap.h
new file mode 100644
index 000000000000..5795b3bbd4f3
--- /dev/null
+++ b/dp/txrx/ol_txrx_encap.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2012, 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_encap.h
+ * @brief definitions for txrx encap/decap function and struct
+ */
+#ifndef _OL_TXRX_ENCAP__H_
+#define _OL_TXRX_ENCAP__H_
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cds_ieee80211_common.h> /* ieee80211_qosframe_htc_addr4 */
+#include <ol_txrx_types.h> /* ol_tx_desc_t, ol_txrx_msdu_info_t */
+
+/**
+ * @brief Encap outgoing frm from OS dependent format to Target
+ * acceptable frm format
+ * @details
+ * For native wifi format, the function will add Qos control field
+ * based on peer's QOS capbabilities .
+ * For 802.3 format, the function will transform to 802.11 format
+ * with or without QOS control field based on peer's QOS capabilites.
+ * @param vdev - handle to vdev object
+ * @param tx_desc - tx desc struct,some fields will be updated.
+ * @param msdu - cdf_nbuf_t
+ * @param msdu_info - informations from tx classification.
+ * @return
+ * A_OK: encap operation sucessful
+ * other: operation failed,the msdu need be dropped.
+ */
+A_STATUS
+ol_tx_encap(struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info);
+
+struct ol_rx_decap_info_t {
+ uint8_t hdr[sizeof(struct ieee80211_qosframe_htc_addr4)];
+ int hdr_len;
+ uint8_t is_subfrm:1, is_first_subfrm:1, is_msdu_cmpl_mpdu:1;
+};
+
+/**
+ * @brief decap incoming frm from Target to Host OS
+ * acceptable frm format
+ * @details
+ * For native wifi format, the function will remove Qos control field
+ * and HT control field if any.
+ * For 802.3 format, the function will will do llc snap header process
+ * if Target haven't done that.
+ * @param vdev - handle to vdev object
+ * @param peer - the peer object.
+ * @param msdu - cdf_nbuf_t
+ * @param info - ol_rx_decap_info_t: context info for decap
+ * @return
+ * A_OK: decap operation sucessful
+ * other: operation failed,the msdu need be dropped.
+ */
+A_STATUS
+ol_rx_decap(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info);
+
+static inline A_STATUS
+OL_TX_ENCAP(struct ol_txrx_vdev_t *vdev,
+ struct ol_tx_desc_t *tx_desc,
+ cdf_nbuf_t msdu, struct ol_txrx_msdu_info_t *msdu_info)
+{
+ if (vdev->pdev->sw_tx_encap)
+ return ol_tx_encap(vdev, tx_desc, msdu, msdu_info);
+ return A_OK;
+}
+
+static inline A_STATUS
+OL_RX_DECAP(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ cdf_nbuf_t msdu, struct ol_rx_decap_info_t *info)
+{
+ if (vdev->pdev->sw_rx_decap)
+ return ol_rx_decap(vdev, peer, msdu, info);
+ return A_OK;
+}
+
+#define OL_TX_RESTORE_HDR(__tx_desc, __msdu) \
+ do { \
+ if (__tx_desc->orig_l2_hdr_bytes != 0) \
+ cdf_nbuf_push_head(__msdu, \
+ __tx_desc->orig_l2_hdr_bytes); \
+ } while (0)
+#else
+#define OL_TX_ENCAP(vdev, tx_desc, msdu, msdu_info) A_OK
+#define OL_RX_DECAP(vdev, peer, msdu, info) A_OK
+#define OL_TX_RESTORE_HDR(__tx_desc, __msdu)
+#endif
+#endif /* _OL_TXRX_ENCAP__H_ */
diff --git a/dp/txrx/ol_txrx_event.c b/dp/txrx/ol_txrx_event.c
new file mode 100644
index 000000000000..3410a14083a0
--- /dev/null
+++ b/dp/txrx/ol_txrx_event.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "ol_txrx_types.h"
+
+#ifdef WDI_EVENT_ENABLE
+
+static inline wdi_event_subscribe *wdi_event_next_sub(wdi_event_subscribe *
+ wdi_sub)
+{
+ if (!wdi_sub) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid subscriber in %s\n", __func__);
+ return NULL;
+ }
+ return wdi_sub->priv.next;
+}
+
+static inline void
+wdi_event_del_subs(wdi_event_subscribe *wdi_sub, int event_index)
+{
+ wdi_event_notify deallocate_sub;
+ while (wdi_sub) {
+ wdi_event_subscribe *next = wdi_event_next_sub(wdi_sub);
+ /*
+ * Context is NULL for static allocation of subs
+ * In dynamic allocation case notify the user
+ */
+ if (wdi_sub->context) {
+ deallocate_sub = wdi_sub->context;
+ deallocate_sub(WDI_EVENT_SUB_DEALLOCATE,
+ WDI_EVENT_BASE + event_index);
+ }
+ wdi_sub = next;
+ }
+ /* cdf_mem_free(wdi_sub); */
+}
+
+static inline void
+wdi_event_iter_sub(struct ol_txrx_pdev_t *pdev,
+ uint32_t event_index,
+ wdi_event_subscribe *wdi_sub, void *data)
+{
+ enum WDI_EVENT event = event_index + WDI_EVENT_BASE;
+
+ if (wdi_sub) {
+ do {
+ wdi_sub->callback(pdev, event, data);
+ } while ((wdi_sub = wdi_event_next_sub(wdi_sub)));
+ }
+}
+
+void
+wdi_event_handler(enum WDI_EVENT event,
+ struct ol_txrx_pdev_t *txrx_pdev, void *data)
+{
+ uint32_t event_index;
+ wdi_event_subscribe *wdi_sub;
+ /*
+ * Input validation
+ */
+ if (!event) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid WDI event in %s\n", __func__);
+ return;
+ }
+ if (!txrx_pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid pdev in WDI event handler\n");
+ return;
+ }
+ /*
+ * There can be NULL data, so no validation for the data
+ * Subscribers must do the sanity based on the requirements
+ */
+ event_index = event - WDI_EVENT_BASE;
+
+ wdi_sub = txrx_pdev->wdi_event_list[event_index];
+
+ /* Find the subscriber */
+ wdi_event_iter_sub(txrx_pdev, event_index, wdi_sub, data);
+}
+
+A_STATUS
+wdi_event_sub(struct ol_txrx_pdev_t *txrx_pdev,
+ wdi_event_subscribe *event_cb_sub, enum WDI_EVENT event)
+{
+ uint32_t event_index;
+ wdi_event_subscribe *wdi_sub;
+ /* Input validation */
+ if (!txrx_pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid txrx_pdev in %s", __func__);
+ return A_ERROR;
+ }
+ if (!event_cb_sub) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid callback in %s", __func__);
+ return A_ERROR;
+ }
+ if ((!event) || (event >= WDI_EVENT_LAST) || (event < WDI_EVENT_BASE)) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid event in %s", __func__);
+ return A_ERROR;
+ }
+ /* Input validation */
+ event_index = event - WDI_EVENT_BASE;
+
+ wdi_sub = txrx_pdev->wdi_event_list[event_index];
+ /*
+ * Check if it is the first subscriber of the event
+ */
+ if (!wdi_sub) {
+ wdi_sub = event_cb_sub;
+ wdi_sub->priv.next = NULL;
+ wdi_sub->priv.prev = NULL;
+ txrx_pdev->wdi_event_list[event_index] = wdi_sub;
+ return A_OK;
+ }
+ event_cb_sub->priv.next = wdi_sub;
+ event_cb_sub->priv.prev = NULL;
+ wdi_sub->priv.prev = event_cb_sub;
+ txrx_pdev->wdi_event_list[event_index] = event_cb_sub;
+
+ return A_OK;
+}
+
+A_STATUS
+wdi_event_unsub(struct ol_txrx_pdev_t *txrx_pdev,
+ wdi_event_subscribe *event_cb_sub, enum WDI_EVENT event)
+{
+ uint32_t event_index = event - WDI_EVENT_BASE;
+
+ /* Input validation */
+ if (!event_cb_sub) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid callback in %s", __func__);
+ return A_ERROR;
+ }
+ if (!event_cb_sub->priv.prev) {
+ txrx_pdev->wdi_event_list[event_index] =
+ event_cb_sub->priv.next;
+ } else {
+ event_cb_sub->priv.prev->priv.next = event_cb_sub->priv.next;
+ }
+ if (event_cb_sub->priv.next)
+ event_cb_sub->priv.next->priv.prev = event_cb_sub->priv.prev;
+
+ /* cdf_mem_free(event_cb_sub); */
+
+ return A_OK;
+}
+
+A_STATUS wdi_event_attach(struct ol_txrx_pdev_t *txrx_pdev)
+{
+ /* Input validation */
+ if (!txrx_pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid device in %s\nWDI event attach failed",
+ __func__);
+ return A_ERROR;
+ }
+ /* Separate subscriber list for each event */
+ txrx_pdev->wdi_event_list = (wdi_event_subscribe **)
+ cdf_mem_malloc(
+ sizeof(wdi_event_subscribe *) *
+ WDI_NUM_EVENTS);
+ if (!txrx_pdev->wdi_event_list) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Insufficient memory for the WDI event lists\n");
+ return A_NO_MEMORY;
+ }
+ return A_OK;
+}
+
+A_STATUS wdi_event_detach(struct ol_txrx_pdev_t *txrx_pdev)
+{
+ int i;
+ wdi_event_subscribe *wdi_sub;
+ if (!txrx_pdev) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "Invalid device in %s\nWDI detach failed",
+ __func__);
+ return A_ERROR;
+ }
+ if (!txrx_pdev->wdi_event_list) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+ "%s: wdi_event_list is NULL", __func__);
+ return A_ERROR;
+ }
+
+ for (i = 0; i < WDI_NUM_EVENTS; i++) {
+ wdi_sub = txrx_pdev->wdi_event_list[i];
+ if (wdi_sub) {
+ /* Delete all the subscribers */
+ wdi_event_del_subs(wdi_sub, i);
+ }
+ }
+ /* txrx_pdev->wdi_event_list would be non-null */
+ cdf_mem_free(txrx_pdev->wdi_event_list);
+ return A_OK;
+}
+
+#endif /* WDI_EVENT_ENABLE */
diff --git a/dp/txrx/ol_txrx_flow_control.c b/dp/txrx/ol_txrx_flow_control.c
new file mode 100644
index 000000000000..77fd41ee1ed4
--- /dev/null
+++ b/dp/txrx/ol_txrx_flow_control.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/* OS abstraction libraries */
+#include <cdf_nbuf.h> /* cdf_nbuf_t, etc. */
+#include <cdf_atomic.h> /* cdf_atomic_read, etc. */
+#include <cdf_util.h> /* cdf_unlikely */
+
+/* APIs for other modules */
+#include <htt.h> /* HTT_TX_EXT_TID_MGMT */
+#include <ol_htt_tx_api.h> /* htt_tx_desc_tid */
+#include <ol_txrx_api.h> /* ol_txrx_vdev_handle */
+#include <ol_txrx_ctrl_api.h> /* ol_txrx_sync */
+
+/* internal header files relevant for all systems */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT1 */
+#include <ol_txrx_types.h> /* pdev stats */
+#include <ol_tx_desc.h> /* ol_tx_desc */
+#include <ol_tx_send.h> /* ol_tx_send */
+#include <ol_txrx.h>
+
+/* internal header files relevant only for HL systems */
+#include <ol_tx_queue.h> /* ol_tx_enqueue */
+
+/* internal header files relevant only for specific systems (Pronto) */
+#include <ol_txrx_encap.h> /* OL_TX_ENCAP, etc */
+#include <ol_tx.h>
+#include <ol_cfg.h>
+
+#define INVALID_FLOW_ID 0xFF
+#define MAX_INVALID_BIN 3
+
+#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+#define TX_FLOW_MGMT_POOL_ID 0xEF
+#define TX_FLOW_MGMT_POOL_SIZE 32
+
+/**
+ * ol_tx_register_global_mgmt_pool() - register global pool for mgmt packets
+ * @pdev: pdev handler
+ *
+ * Return: none
+ */
+static void
+ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
+{
+ pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
+ TX_FLOW_MGMT_POOL_SIZE);
+ if (!pdev->mgmt_pool) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Management pool creation failed\n");
+ }
+ return;
+}
+
+/**
+ * ol_tx_deregister_global_mgmt_pool() - Deregister global pool for mgmt packets
+ * @pdev: pdev handler
+ *
+ * Return: none
+ */
+static void
+ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
+{
+ ol_tx_delete_flow_pool(pdev->mgmt_pool);
+ return;
+}
+#else
+static inline void
+ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+static inline void
+ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
+{
+ return;
+}
+#endif
+
+/**
+ * ol_tx_register_flow_control() - Register fw based tx flow control
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+ cdf_spinlock_init(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
+
+ if (!ol_tx_get_is_mgmt_over_wmi_enabled())
+ ol_tx_register_global_mgmt_pool(pdev);
+}
+
+/**
+ * ol_tx_deregister_flow_control() - Deregister fw based tx flow control
+ * @pdev: pdev handle
+ *
+ * Return: none
+ */
+void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
+{
+ if (!ol_tx_get_is_mgmt_over_wmi_enabled())
+ ol_tx_deregister_global_mgmt_pool(pdev);
+
+ cdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
+ if (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "flow pool list is not empty!!!\n");
+ }
+}
+
+/**
+ * ol_tx_dump_flow_pool_info() - dump global_pool and flow_pool info
+ *
+ * Return: none
+ */
+void ol_tx_dump_flow_pool_info(void)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *pool = NULL;
+ struct ol_tx_flow_pool_t tmp_pool;
+
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global Pool\n");
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Total %d :: Available %d\n",
+ pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid flow_pool %d\n",
+ pdev->tx_desc.num_invalid_bin);
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool map received %d\n",
+ pdev->pool_stats.pool_map_count);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool unmap received %d\n",
+ pdev->pool_stats.pool_unmap_count);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Pkt dropped due to unavailablity of pool %d\n",
+ pdev->pool_stats.pkt_drop_no_pool);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Pkt dropped due to unavailablity of descriptors %d\n",
+ pdev->pool_stats.pkt_drop_no_desc);
+
+ /*
+ * Nested spin lock.
+ * Always take in below order.
+ * flow_pool_list_lock -> flow_pool_lock
+ */
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
+ flow_pool_list_elem) {
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ cdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Flow_pool_id %d :: status %d\n",
+ tmp_pool.flow_pool_id, tmp_pool.status);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Total %d :: Available %d :: Deficient %d\n",
+ tmp_pool.flow_pool_size, tmp_pool.avail_desc,
+ tmp_pool.deficient_desc);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Start_TH %d :: Stop_TH %d\n",
+ tmp_pool.start_th, tmp_pool.stop_th);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "Member flow_id %d :: flow_type %d\n",
+ tmp_pool.member_flow_id, tmp_pool.flow_type);
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ }
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ return;
+}
+
+/**
+ * ol_tx_clear_flow_pool_stats() - clear flow pool statistics
+ *
+ * Return: none
+ */
+void ol_tx_clear_flow_pool_stats(void)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: pdev is null\n",
+ __func__);
+ return;
+ }
+ cdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
+}
+
+/**
+ * ol_tx_move_desc_n() - Move n descriptors from src_pool to dst_pool.
+ * @src_pool: source pool
+ * @dst_pool: destination pool
+ * @desc_move_count: descriptor move count
+ *
+ * Return: actual descriptors moved
+ */
+static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
+ struct ol_tx_flow_pool_t *dst_pool,
+ int desc_move_count)
+{
+ uint16_t count = 0, i;
+ struct ol_tx_desc_t *tx_desc;
+ union ol_tx_desc_list_elem_t *temp_list = NULL;
+
+ /* Take descriptors from source pool and put it in temp_list */
+ cdf_spin_lock_bh(&src_pool->flow_pool_lock);
+ for (i = 0; i < desc_move_count; i++) {
+ tx_desc = ol_tx_get_desc_flow_pool(src_pool);
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
+ temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
+
+ }
+ cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
+
+ /* Take descriptors from temp_list and put it in destination pool */
+ cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+ for (i = 0; i < desc_move_count; i++) {
+ if (dst_pool->deficient_desc)
+ dst_pool->deficient_desc--;
+ else
+ break;
+ tx_desc = &temp_list->tx_desc;
+ temp_list = temp_list->next;
+ ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
+ count++;
+ }
+ cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+
+ /* If anything is there in temp_list put it back to source pool */
+ cdf_spin_lock_bh(&src_pool->flow_pool_lock);
+ while (temp_list) {
+ tx_desc = &temp_list->tx_desc;
+ temp_list = temp_list->next;
+ ol_tx_put_desc_flow_pool(src_pool, tx_desc);
+ }
+ cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
+
+ return count;
+}
+
+
+/**
+ * ol_tx_distribute_descs_to_deficient_pools() - Distribute descriptors
+ * @src_pool: source pool
+ *
+ * Distribute all descriptors of source pool to all
+ * deficient pools as per flow_pool_list.
+ *
+ * Return: 0 for sucess
+ */
+int
+ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *dst_pool = NULL;
+ uint16_t desc_count = src_pool->avail_desc;
+ uint16_t desc_move_count = 0;
+
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pdev is NULL\n", __func__);
+ return -EINVAL;
+ }
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
+ flow_pool_list_elem) {
+ cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+ if (dst_pool->deficient_desc) {
+ desc_move_count =
+ (dst_pool->deficient_desc > desc_count) ?
+ desc_count : dst_pool->deficient_desc;
+ cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+ desc_move_count = ol_tx_move_desc_n(src_pool,
+ dst_pool, desc_move_count);
+ desc_count -= desc_move_count;
+ cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+ }
+ cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+ if (desc_count == 0)
+ break;
+ }
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ return 0;
+}
+
+
+/**
+ * ol_tx_create_flow_pool() - create flow pool
+ * @flow_pool_id: flow pool id
+ * @flow_pool_size: flow pool size
+ *
+ * Return: flow_pool pointer / NULL for error
+ */
+struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
+ uint16_t flow_pool_size)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *pool;
+ uint16_t size = 0, i;
+ struct ol_tx_desc_t *tx_desc;
+ union ol_tx_desc_list_elem_t *temp_list = NULL;
+ uint32_t stop_threshold =
+ ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
+ uint32_t start_threshold = stop_threshold +
+ ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
+
+ if (!pdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pdev is NULL\n", __func__);
+ return NULL;
+ }
+
+ pool = cdf_mem_malloc(sizeof(*pool));
+ if (!pool) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: malloc failed\n", __func__);
+ return NULL;
+ }
+
+ pool->flow_pool_id = flow_pool_id;
+ pool->flow_pool_size = flow_pool_size;
+ pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
+ pool->start_th = (start_threshold * flow_pool_size)/100;
+ pool->stop_th = (stop_threshold * flow_pool_size)/100;
+ cdf_spinlock_init(&pool->flow_pool_lock);
+
+ /* Take TX descriptor from global_pool and put it in temp_list*/
+ cdf_spin_lock_bh(&pdev->tx_mutex);
+ if (pdev->tx_desc.num_free >= pool->flow_pool_size)
+ size = pool->flow_pool_size;
+ else
+ size = pdev->tx_desc.num_free;
+
+ for (i = 0; i < size; i++) {
+ tx_desc = ol_tx_get_desc_global_pool(pdev);
+ tx_desc->pool = pool;
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
+ temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
+
+ }
+ cdf_spin_unlock_bh(&pdev->tx_mutex);
+
+ /* put temp_list to flow_pool */
+ pool->freelist = temp_list;
+ pool->avail_desc = size;
+ pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
+
+ /* Add flow_pool to flow_pool_list */
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
+ flow_pool_list_elem);
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ return pool;
+}
+
+/**
+ * ol_tx_delete_flow_pool() - delete flow pool
+ * @pool: flow pool pointer
+ *
+ * Delete flow_pool if all tx descriptors are available.
+ * Otherwise put it in FLOW_POOL_INVALID state.
+ *
+ * Return: 0 for success or error
+ */
+int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ uint16_t i, size;
+ union ol_tx_desc_list_elem_t *temp_list = NULL;
+ struct ol_tx_desc_t *tx_desc = NULL;
+
+ if (!pool) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pool is NULL\n", __func__);
+ return -ENOMEM;
+ }
+
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ if (pool->avail_desc == pool->flow_pool_size)
+ pool->status = FLOW_POOL_INACTIVE;
+ else
+ pool->status = FLOW_POOL_INVALID;
+
+ /* Take all free descriptors and put it in temp_list */
+ temp_list = pool->freelist;
+ size = pool->avail_desc;
+ pool->freelist = NULL;
+ pool->avail_desc = 0;
+
+ if (pool->status == FLOW_POOL_INACTIVE) {
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ /* Free flow_pool */
+ cdf_spinlock_destroy(&pool->flow_pool_lock);
+ cdf_mem_free(pool);
+ } else { /* FLOW_POOL_INVALID case*/
+ pool->flow_pool_size -= size;
+ pool->flow_pool_id = INVALID_FLOW_ID;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+ pdev->tx_desc.num_invalid_bin++;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid pool created %d\n",
+ __func__, pdev->tx_desc.num_invalid_bin);
+ if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
+ ASSERT(0);
+
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
+ flow_pool_list_elem);
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ }
+
+ /* put free descriptors to global pool */
+ cdf_spin_lock_bh(&pdev->tx_mutex);
+ for (i = 0; i < size; i++) {
+ tx_desc = &temp_list->tx_desc;
+ temp_list = temp_list->next;
+
+ ol_tx_put_desc_global_pool(pdev, tx_desc);
+ }
+ cdf_spin_unlock_bh(&pdev->tx_mutex);
+
+ return 0;
+}
+
+
+/**
+ * ol_tx_free_invalid_flow_pool() - free invalid pool
+ * @pool: pool
+ *
+ * Return: 0 for success or failure
+ */
+int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+
+ if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Invalid pool/pdev\n", __func__);
+ return -EINVAL;
+ }
+
+ /* direclty distribute to other deficient pools */
+ ol_tx_distribute_descs_to_deficient_pools(pool);
+
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ pool->flow_pool_size = pool->avail_desc;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+ pdev->tx_desc.num_invalid_bin--;
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid pool deleted %d\n",
+ __func__, pdev->tx_desc.num_invalid_bin);
+
+ return ol_tx_delete_flow_pool(pool);
+}
+
+/**
+ * ol_tx_get_flow_pool() - get flow_pool from flow_pool_id
+ * @flow_pool_id: flow pool id
+ *
+ * Return: flow_pool ptr / NULL if not found
+ */
+struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *pool = NULL;
+ bool is_found = false;
+
+ cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+ TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
+ flow_pool_list_elem) {
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ if (pool->flow_pool_id == flow_pool_id) {
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ is_found = true;
+ break;
+ }
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+ }
+ cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+
+ if (is_found == false)
+ pool = NULL;
+
+ return pool;
+
+}
+
+
+/**
+ * ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
+ * @pool: flow_pool
+ * @vdev_id: flow_id /vdev_id
+ *
+ * Return: none
+ */
+void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
+ uint8_t vdev_id)
+{
+ ol_txrx_vdev_handle vdev;
+
+ vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (!vdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid vdev_id %d\n",
+ __func__, vdev_id);
+ return;
+ }
+
+ vdev->pool = pool;
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ pool->member_flow_id = vdev_id;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+ return;
+}
+
+/**
+ * ol_tx_flow_pool_vdev_unmap() - Unmap flow_pool from vdev
+ * @pool: flow_pool
+ * @vdev_id: flow_id /vdev_id
+ *
+ * Return: none
+ */
+void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
+ uint8_t vdev_id)
+{
+ ol_txrx_vdev_handle vdev;
+
+ vdev = ol_txrx_get_vdev_from_vdev_id(vdev_id);
+ if (!vdev) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: invalid vdev_id %d\n",
+ __func__, vdev_id);
+ return;
+ }
+
+ vdev->pool = NULL;
+ cdf_spin_lock_bh(&pool->flow_pool_lock);
+ pool->member_flow_id = INVALID_FLOW_ID;
+ cdf_spin_unlock_bh(&pool->flow_pool_lock);
+
+ return;
+}
+
+/**
+ * ol_tx_flow_pool_map_handler() - Map flow_id with pool of descriptors
+ * @flow_id: flow id
+ * @flow_type: flow type
+ * @flow_pool_id: pool id
+ * @flow_pool_size: pool size
+ *
+ * Process below target to host message
+ * HTT_T2H_MSG_TYPE_FLOW_POOL_MAP
+ *
+ * Return: none
+ */
+void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
+ uint8_t flow_pool_id, uint16_t flow_pool_size)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *pool;
+ uint8_t pool_create = 0;
+ enum htt_flow_type type = flow_type;
+
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
+ __func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
+
+ if (cdf_unlikely(!pdev)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pdev is NULL", __func__);
+ return;
+ }
+ pdev->pool_stats.pool_map_count++;
+
+ pool = ol_tx_get_flow_pool(flow_pool_id);
+ if (!pool) {
+ pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
+ if (pool == NULL) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: creation of flow_pool %d size %d failed\n",
+ __func__, flow_pool_id, flow_pool_size);
+ return;
+ }
+ pool_create = 1;
+ }
+
+ switch (type) {
+
+ case FLOW_TYPE_VDEV:
+ ol_tx_flow_pool_vdev_map(pool, flow_id);
+ break;
+ default:
+ if (pool_create)
+ ol_tx_delete_flow_pool(pool);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: flow type %d not supported !!!\n",
+ __func__, type);
+ break;
+ }
+
+ return;
+}
+
+/**
+ * ol_tx_flow_pool_unmap_handler() - Unmap flow_id from pool of descriptors
+ * @flow_id: flow id
+ * @flow_type: flow type
+ * @flow_pool_id: pool id
+ *
+ * Process below target to host message
+ * HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP
+ *
+ * Return: none
+ */
+void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
+ uint8_t flow_pool_id)
+{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(CDF_MODULE_ID_TXRX);
+ struct ol_tx_flow_pool_t *pool;
+ enum htt_flow_type type = flow_type;
+
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: flow_id %d flow_type %d flow_pool_id %d\n",
+ __func__, flow_id, flow_type, flow_pool_id);
+
+ if (cdf_unlikely(!pdev)) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: pdev is NULL", __func__);
+ return;
+ }
+ pdev->pool_stats.pool_unmap_count++;
+
+ pool = ol_tx_get_flow_pool(flow_pool_id);
+ if (!pool) {
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: flow_pool not available flow_pool_id %d\n",
+ __func__, type);
+ return;
+ }
+
+ switch (type) {
+
+ case FLOW_TYPE_VDEV:
+ ol_tx_flow_pool_vdev_unmap(pool, flow_id);
+ break;
+ default:
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: flow type %d not supported !!!\n",
+ __func__, type);
+ return;
+ }
+
+ /* only delete if all descriptors are available */
+ ol_tx_delete_flow_pool(pool);
+
+ return;
+}
+
+
diff --git a/dp/txrx/ol_txrx_internal.h b/dp/txrx/ol_txrx_internal.h
new file mode 100644
index 000000000000..30315e64ce1b
--- /dev/null
+++ b/dp/txrx/ol_txrx_internal.h
@@ -0,0 +1,737 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _OL_TXRX_INTERNAL__H_
+#define _OL_TXRX_INTERNAL__H_
+
+#include <cdf_util.h> /* cdf_assert */
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_memory.h> /* cdf_mem_set */
+#include <cds_ieee80211_common.h> /* ieee80211_frame */
+#include <ol_htt_rx_api.h> /* htt_rx_msdu_desc_completes_mpdu, etc. */
+
+#include <ol_txrx_types.h>
+
+#include <ol_txrx_dbg.h>
+#include <enet.h> /* ETHERNET_HDR_LEN, etc. */
+#include <ipv4.h> /* IPV4_HDR_LEN, etc. */
+#include <ipv6.h> /* IPV6_HDR_LEN, etc. */
+#include <ip_prot.h> /* IP_PROTOCOL_TCP, etc. */
+
+#ifdef ATH_11AC_TXCOMPACT
+#define OL_TX_DESC_NO_REFS(tx_desc) 1
+#define OL_TX_DESC_REF_INIT(tx_desc) /* no-op */
+#define OL_TX_DESC_REF_INC(tx_desc) /* no-op */
+#else
+#define OL_TX_DESC_NO_REFS(tx_desc) \
+ cdf_atomic_dec_and_test(&tx_desc->ref_cnt)
+#define OL_TX_DESC_REF_INIT(tx_desc) cdf_atomic_init(&tx_desc->ref_cnt)
+#define OL_TX_DESC_REF_INC(tx_desc) cdf_atomic_inc(&tx_desc->ref_cnt)
+#endif
+
+#ifndef TXRX_ASSERT_LEVEL
+#define TXRX_ASSERT_LEVEL 3
+#endif
+
+#ifdef __KLOCWORK__
+#define TXRX_ASSERT1(x) do { if (!(x)) abort(); } while (0)
+#define TXRX_ASSERT2(x) do { if (!(x)) abort(); } while (0)
+#else /* #ifdef __KLOCWORK__ */
+
+#if TXRX_ASSERT_LEVEL > 0
+#define TXRX_ASSERT1(condition) cdf_assert((condition))
+#else
+#define TXRX_ASSERT1(condition)
+#endif
+
+#if TXRX_ASSERT_LEVEL > 1
+#define TXRX_ASSERT2(condition) cdf_assert((condition))
+#else
+#define TXRX_ASSERT2(condition)
+#endif
+#endif /* #ifdef __KLOCWORK__ */
+enum {
+ /* FATAL_ERR - print only irrecoverable error messages */
+ TXRX_PRINT_LEVEL_FATAL_ERR,
+
+ /* ERR - include non-fatal err messages */
+ TXRX_PRINT_LEVEL_ERR,
+
+ /* WARN - include warnings */
+ TXRX_PRINT_LEVEL_WARN,
+
+ /* INFO1 - include fundamental, infrequent events */
+ TXRX_PRINT_LEVEL_INFO1,
+
+ /* INFO2 - include non-fundamental but infrequent events */
+ TXRX_PRINT_LEVEL_INFO2,
+
+ /* INFO3 - include frequent events */
+ /* to avoid performance impact, don't use INFO3
+ unless explicitly enabled */
+#ifdef TXRX_PRINT_VERBOSE_ENABLE
+ TXRX_PRINT_LEVEL_INFO3,
+#endif /* TXRX_PRINT_VERBOSE_ENABLE */
+};
+
+extern unsigned g_txrx_print_level;
+
+#ifdef TXRX_PRINT_ENABLE
+
+#include <stdarg.h> /* va_list */
+#include <cdf_types.h> /* cdf_vprint */
+
+/* Supress 4296 - expression is always true
+* It will fire if level is TXRX_PRINT_LEVEL_FATAL_ERR (0)
+* because g_txrx_print_level is unsigned */
+#define ol_txrx_print(level, fmt, ...) { \
+ if (level <= g_txrx_print_level) \
+ cdf_print(fmt, ## __VA_ARGS__); }
+#define TXRX_PRINT(level, fmt, ...) \
+ ol_txrx_print(level, "TXRX: " fmt, ## __VA_ARGS__)
+
+#ifdef TXRX_PRINT_VERBOSE_ENABLE
+
+#define ol_txrx_print_verbose(fmt, ...) { \
+ if (TXRX_PRINT_LEVEL_INFO3 <= g_txrx_print_level) \
+ cdf_print(fmt, ## __VA_ARGS__); }
+#define TXRX_PRINT_VERBOSE(fmt, ...) \
+ ol_txrx_print_verbose("TXRX: " fmt, ## __VA_ARGS__)
+#else
+#define TXRX_PRINT_VERBOSE(fmt, ...)
+#endif /* TXRX_PRINT_VERBOSE_ENABLE */
+
+/* define PN check failure message print rate
+ as 1 second */
+#define TXRX_PN_CHECK_FAILURE_PRINT_PERIOD_MS 1000
+
+#else
+#define TXRX_PRINT(level, fmt, ...)
+#define TXRX_PRINT_VERBOSE(fmt, ...)
+#endif /* TXRX_PRINT_ENABLE */
+
+/*--- tx credit debug printouts ---*/
+
+#ifndef DEBUG_CREDIT
+#define DEBUG_CREDIT 0
+#endif
+
+#if DEBUG_CREDIT
+#define TX_CREDIT_DEBUG_PRINT(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
+#else
+#define TX_CREDIT_DEBUG_PRINT(fmt, ...)
+#endif
+
+/*--- tx scheduler debug printouts ---*/
+
+#ifdef HOST_TX_SCHED_DEBUG
+#define TX_SCHED_DEBUG_PRINT(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
+#else
+#define TX_SCHED_DEBUG_PRINT(fmt, ...)
+#endif
+#define TX_SCHED_DEBUG_PRINT_ALWAYS(fmt, ...) cdf_print(fmt, ## __VA_ARGS__)
+
+#define OL_TXRX_LIST_APPEND(head, tail, elem) \
+ do { \
+ if (!(head)) { \
+ (head) = (elem); \
+ } else { \
+ cdf_nbuf_set_next((tail), (elem)); \
+ } \
+ (tail) = (elem); \
+ } while (0)
+
+static inline void
+ol_rx_mpdu_list_next(struct ol_txrx_pdev_t *pdev,
+ void *mpdu_list,
+ cdf_nbuf_t *mpdu_tail, cdf_nbuf_t *next_mpdu)
+{
+ htt_pdev_handle htt_pdev = pdev->htt_pdev;
+ cdf_nbuf_t msdu;
+
+ /*
+ * For now, we use a simply flat list of MSDUs.
+ * So, traverse the list until we reach the last MSDU within the MPDU.
+ */
+ TXRX_ASSERT2(mpdu_list);
+ msdu = mpdu_list;
+ while (!htt_rx_msdu_desc_completes_mpdu
+ (htt_pdev, htt_rx_msdu_desc_retrieve(htt_pdev, msdu))) {
+ msdu = cdf_nbuf_next(msdu);
+ TXRX_ASSERT2(msdu);
+ }
+ /* msdu now points to the last MSDU within the first MPDU */
+ *mpdu_tail = msdu;
+ *next_mpdu = cdf_nbuf_next(msdu);
+}
+
+/*--- txrx stats macros ---*/
+
+/* unconditional defs */
+#define TXRX_STATS_INCR(pdev, field) TXRX_STATS_ADD(pdev, field, 1)
+
+/* default conditional defs (may be undefed below) */
+
+#define TXRX_STATS_INIT(_pdev) \
+ cdf_mem_set(&((_pdev)->stats), sizeof((_pdev)->stats), 0x0)
+#define TXRX_STATS_ADD(_pdev, _field, _delta) { \
+ _pdev->stats._field += _delta; }
+#define TXRX_STATS_MSDU_INCR(pdev, field, netbuf) \
+ do { \
+ TXRX_STATS_INCR((pdev), pub.field.pkts); \
+ TXRX_STATS_ADD((pdev), pub.field.bytes, cdf_nbuf_len(netbuf)); \
+ } while (0)
+
+/* conditional defs based on verbosity level */
+
+
+#define TXRX_STATS_MSDU_LIST_INCR(pdev, field, netbuf_list) \
+ do { \
+ cdf_nbuf_t tmp_list = netbuf_list; \
+ while (tmp_list) { \
+ TXRX_STATS_MSDU_INCR(pdev, field, tmp_list); \
+ tmp_list = cdf_nbuf_next(tmp_list); \
+ } \
+ } while (0)
+
+#define TXRX_STATS_MSDU_INCR_TX_STATUS(status, pdev, netbuf) do { \
+ if (status == htt_tx_status_ok) \
+ TXRX_STATS_MSDU_INCR(pdev, tx.delivered, netbuf); \
+ else if (status == htt_tx_status_discard) \
+ TXRX_STATS_MSDU_INCR(pdev, tx.dropped.target_discard, \
+ netbuf); \
+ else if (status == htt_tx_status_no_ack) \
+ TXRX_STATS_MSDU_INCR(pdev, tx.dropped.no_ack, netbuf); \
+ else if (status == htt_tx_status_download_fail) \
+ TXRX_STATS_MSDU_INCR(pdev, tx.dropped.download_fail, \
+ netbuf); \
+ else \
+ /* NO-OP */; \
+ } while (0)
+
+#define TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs) \
+ do { \
+ if (_p_cntrs == 1) { \
+ TXRX_STATS_ADD(_pdev, pub.tx.comp_histogram.pkts_1, 1);\
+ } else if (_p_cntrs > 2 && _p_cntrs <= 10) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_2_10, 1); \
+ } else if (_p_cntrs > 10 && _p_cntrs <= 20) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_11_20, 1); \
+ } else if (_p_cntrs > 20 && _p_cntrs <= 30) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_21_30, 1); \
+ } else if (_p_cntrs > 30 && _p_cntrs <= 40) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_31_40, 1); \
+ } else if (_p_cntrs > 40 && _p_cntrs <= 50) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_41_50, 1); \
+ } else if (_p_cntrs > 50 && _p_cntrs <= 60) { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_51_60, 1); \
+ } else { \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.comp_histogram.pkts_61_plus, 1); \
+ } \
+ } while (0)
+
+#define TXRX_STATS_UPDATE_TX_STATS(_pdev, _status, _p_cntrs, _b_cntrs) \
+ do { \
+ switch (status) { \
+ case htt_tx_status_ok: \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.delivered.pkts, _p_cntrs); \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.delivered.bytes, _b_cntrs); \
+ break; \
+ case htt_tx_status_discard: \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.dropped.target_discard.pkts, _p_cntrs);\
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.dropped.target_discard.bytes, _b_cntrs);\
+ break; \
+ case htt_tx_status_no_ack: \
+ TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.pkts, \
+ _p_cntrs); \
+ TXRX_STATS_ADD(_pdev, pub.tx.dropped.no_ack.bytes, \
+ _b_cntrs); \
+ break; \
+ case htt_tx_status_download_fail: \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.dropped.download_fail.pkts, _p_cntrs); \
+ TXRX_STATS_ADD(_pdev, \
+ pub.tx.dropped.download_fail.bytes, _b_cntrs);\
+ break; \
+ default: \
+ break; \
+ } \
+ TXRX_STATS_UPDATE_TX_COMP_HISTOGRAM(_pdev, _p_cntrs); \
+ } while (0)
+
+
+/*--- txrx sequence number trace macros ---*/
+
+#define TXRX_SEQ_NUM_ERR(_status) (0xffff - _status)
+
+#if defined(ENABLE_RX_REORDER_TRACE)
+
+A_STATUS ol_rx_reorder_trace_attach(ol_txrx_pdev_handle pdev);
+void ol_rx_reorder_trace_detach(ol_txrx_pdev_handle pdev);
+void ol_rx_reorder_trace_add(ol_txrx_pdev_handle pdev,
+ uint8_t tid,
+ uint16_t reorder_idx,
+ uint16_t seq_num, int num_mpdus);
+
+#define OL_RX_REORDER_TRACE_ATTACH ol_rx_reorder_trace_attach
+#define OL_RX_REORDER_TRACE_DETACH ol_rx_reorder_trace_detach
+#define OL_RX_REORDER_TRACE_ADD ol_rx_reorder_trace_add
+
+#else
+
+#define OL_RX_REORDER_TRACE_ATTACH(_pdev) A_OK
+#define OL_RX_REORDER_TRACE_DETACH(_pdev)
+#define OL_RX_REORDER_TRACE_ADD(pdev, tid, reorder_idx, seq_num, num_mpdus)
+
+#endif /* ENABLE_RX_REORDER_TRACE */
+
+/*--- txrx packet number trace macros ---*/
+
+#if defined(ENABLE_RX_PN_TRACE)
+
+A_STATUS ol_rx_pn_trace_attach(ol_txrx_pdev_handle pdev);
+void ol_rx_pn_trace_detach(ol_txrx_pdev_handle pdev);
+void ol_rx_pn_trace_add(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer,
+ uint16_t tid, void *rx_desc);
+
+#define OL_RX_PN_TRACE_ATTACH ol_rx_pn_trace_attach
+#define OL_RX_PN_TRACE_DETACH ol_rx_pn_trace_detach
+#define OL_RX_PN_TRACE_ADD ol_rx_pn_trace_add
+
+#else
+
+#define OL_RX_PN_TRACE_ATTACH(_pdev) A_OK
+#define OL_RX_PN_TRACE_DETACH(_pdev)
+#define OL_RX_PN_TRACE_ADD(pdev, peer, tid, rx_desc)
+
+#endif /* ENABLE_RX_PN_TRACE */
+
+static inline int ol_txrx_ieee80211_hdrsize(const void *data)
+{
+ const struct ieee80211_frame *wh = (const struct ieee80211_frame *)data;
+ int size = sizeof(struct ieee80211_frame);
+
+ /* NB: we don't handle control frames */
+ TXRX_ASSERT1((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
+ IEEE80211_FC0_TYPE_CTL);
+ if ((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
+ IEEE80211_FC1_DIR_DSTODS)
+ size += IEEE80211_ADDR_LEN;
+ if (IEEE80211_QOS_HAS_SEQ(wh)) {
+ size += sizeof(uint16_t);
+ /* Qos frame with Order bit set indicates an HTC frame */
+ if (wh->i_fc[1] & IEEE80211_FC1_ORDER)
+ size += sizeof(struct ieee80211_htc);
+ }
+ return size;
+}
+
+/*--- frame display utility ---*/
+
+enum ol_txrx_frm_dump_options {
+ ol_txrx_frm_dump_contents = 0x1,
+ ol_txrx_frm_dump_tcp_seq = 0x2,
+};
+
+#ifdef TXRX_DEBUG_DATA
+static inline void
+ol_txrx_frms_dump(const char *name,
+ struct ol_txrx_pdev_t *pdev,
+ cdf_nbuf_t frm,
+ enum ol_txrx_frm_dump_options display_options, int max_len)
+{
+#define TXRX_FRM_DUMP_MAX_LEN 128
+ uint8_t local_buf[TXRX_FRM_DUMP_MAX_LEN] = { 0 };
+ uint8_t *p;
+
+ if (name) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO, "%s\n",
+ name);
+ }
+ while (frm) {
+ p = cdf_nbuf_data(frm);
+ if (display_options & ol_txrx_frm_dump_tcp_seq) {
+ int tcp_offset;
+ int l2_hdr_size;
+ uint16_t ethtype;
+ uint8_t ip_prot;
+
+ if (pdev->frame_format == wlan_frm_fmt_802_3) {
+ struct ethernet_hdr_t *enet_hdr =
+ (struct ethernet_hdr_t *)p;
+ l2_hdr_size = ETHERNET_HDR_LEN;
+
+ /*
+ * LLC/SNAP present?
+ */
+ ethtype = (enet_hdr->ethertype[0] << 8) |
+ enet_hdr->ethertype[1];
+ if (!IS_ETHERTYPE(ethertype)) {
+ /* 802.3 format */
+ struct llc_snap_hdr_t *llc_hdr;
+
+ llc_hdr = (struct llc_snap_hdr_t *)
+ (p + l2_hdr_size);
+ l2_hdr_size += LLC_SNAP_HDR_LEN;
+ ethtype = (llc_hdr->ethertype[0] << 8) |
+ llc_hdr->ethertype[1];
+ }
+ } else {
+ struct llc_snap_hdr_t *llc_hdr;
+ /* (generic?) 802.11 */
+ l2_hdr_size = sizeof(struct ieee80211_frame);
+ llc_hdr = (struct llc_snap_hdr_t *)
+ (p + l2_hdr_size);
+ l2_hdr_size += LLC_SNAP_HDR_LEN;
+ ethtype = (llc_hdr->ethertype[0] << 8) |
+ llc_hdr->ethertype[1];
+ }
+ if (ethtype == ETHERTYPE_IPV4) {
+ struct ipv4_hdr_t *ipv4_hdr;
+ ipv4_hdr =
+ (struct ipv4_hdr_t *)(p + l2_hdr_size);
+ ip_prot = ipv4_hdr->protocol;
+ tcp_offset = l2_hdr_size + IPV4_HDR_LEN;
+ } else if (ethtype == ETHERTYPE_IPV6) {
+ struct ipv6_hdr_t *ipv6_hdr;
+ ipv6_hdr =
+ (struct ipv6_hdr_t *)(p + l2_hdr_size);
+ ip_prot = ipv6_hdr->next_hdr;
+ tcp_offset = l2_hdr_size + IPV6_HDR_LEN;
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO,
+ "frame %p non-IP ethertype (%x)\n",
+ frm, ethtype);
+ goto NOT_IP_TCP;
+ }
+ if (ip_prot == IP_PROTOCOL_TCP) {
+#if NEVERDEFINED
+ struct tcp_hdr_t *tcp_hdr;
+ uint32_t tcp_seq_num;
+ tcp_hdr = (struct tcp_hdr_t *)(p + tcp_offset);
+ tcp_seq_num =
+ (tcp_hdr->seq_num[0] << 24) |
+ (tcp_hdr->seq_num[1] << 16) |
+ (tcp_hdr->seq_num[1] << 8) |
+ (tcp_hdr->seq_num[1] << 0);
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO,
+ "frame %p: TCP seq num = %d\n", frm,
+ tcp_seq_num);
+#else
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO,
+ "frame %p: TCP seq num = %d\n", frm,
+ ((*(p + tcp_offset + 4)) << 24) |
+ ((*(p + tcp_offset + 5)) << 16) |
+ ((*(p + tcp_offset + 6)) << 8) |
+ (*(p + tcp_offset + 7)));
+#endif
+ } else {
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO,
+ "frame %p non-TCP IP protocol (%x)\n",
+ frm, ip_prot);
+ }
+ }
+NOT_IP_TCP:
+ if (display_options & ol_txrx_frm_dump_contents) {
+ int i, frag_num, len_lim;
+ len_lim = max_len;
+ if (len_lim > cdf_nbuf_len(frm))
+ len_lim = cdf_nbuf_len(frm);
+ if (len_lim > TXRX_FRM_DUMP_MAX_LEN)
+ len_lim = TXRX_FRM_DUMP_MAX_LEN;
+
+ /*
+ * Gather frame contents from netbuf fragments
+ * into a contiguous buffer.
+ */
+ frag_num = 0;
+ i = 0;
+ while (i < len_lim) {
+ int frag_bytes;
+ frag_bytes =
+ cdf_nbuf_get_frag_len(frm, frag_num);
+ if (frag_bytes > len_lim - i)
+ frag_bytes = len_lim - i;
+ if (frag_bytes > 0) {
+ p = cdf_nbuf_get_frag_vaddr(frm,
+ frag_num);
+ cdf_mem_copy(&local_buf[i], p,
+ frag_bytes);
+ }
+ frag_num++;
+ i += frag_bytes;
+ }
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "frame %p data (%p), hex dump of bytes 0-%d of %d:\n",
+ frm, p, len_lim - 1, (int)cdf_nbuf_len(frm));
+ p = local_buf;
+ while (len_lim > 16) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO,
+ " " /* indent */
+ "%02x %02x %02x %02x %02x %02x %02x %02x "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n",
+ *(p + 0), *(p + 1), *(p + 2),
+ *(p + 3), *(p + 4), *(p + 5),
+ *(p + 6), *(p + 7), *(p + 8),
+ *(p + 9), *(p + 10), *(p + 11),
+ *(p + 12), *(p + 13), *(p + 14),
+ *(p + 15));
+ p += 16;
+ len_lim -= 16;
+ }
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ " " /* indent */);
+ while (len_lim > 0) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO, "%02x ", *p);
+ p++;
+ len_lim--;
+ }
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO,
+ "\n");
+ }
+ frm = cdf_nbuf_next(frm);
+ }
+}
+#else
+#define ol_txrx_frms_dump(name, pdev, frms, display_options, max_len)
+#endif /* TXRX_DEBUG_DATA */
+
+#ifdef SUPPORT_HOST_STATISTICS
+
+#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast) \
+ ol_rx_err_statistics(pdev->ctrl_pdev, vdev->vdev_id, err_type, \
+ sec_type, is_mcast);
+
+#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type) \
+ do { \
+ int is_mcast; \
+ enum htt_sec_type sec_type; \
+ is_mcast = htt_rx_msdu_is_wlan_mcast( \
+ pdev->htt_pdev, rx_desc); \
+ sec_type = peer->security[is_mcast \
+ ? txrx_sec_mcast \
+ : txrx_sec_ucast].sec_type; \
+ OL_RX_ERR_STATISTICS(pdev, vdev, err_type, \
+ pdev->sec_types[sec_type], \
+ is_mcast); \
+ } while (false)
+
+#define OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu) \
+ do { \
+ struct ieee80211_frame *wh = NULL; \
+ /*FIX THIS : */ \
+ /* Here htt_rx_mpdu_wifi_hdr_retrieve should be used. */ \
+ /*But at present it seems it does not work.*/ \
+ /*wh = (struct ieee80211_frame *) */ \
+ /*htt_rx_mpdu_wifi_hdr_retrieve(pdev->htt_pdev, rx_desc);*/ \
+ /* this only apply to LL device.*/ \
+ if (ol_cfg_frame_type(pdev->ctrl_pdev) == \
+ wlan_frm_fmt_native_wifi) { \
+ /* For windows, it is always native wifi header .*/ \
+ wh = (struct ieee80211_frame *)cdf_nbuf_data(rx_msdu); \
+ } \
+ ol_rx_err_inv_peer_statistics(pdev->ctrl_pdev, \
+ wh, OL_RX_ERR_UNKNOWN_PEER); \
+ } while (false)
+
+#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status) \
+ do { \
+ enum ol_rx_err_type err_type = OL_RX_ERR_NONE; \
+ if (rx_status == htt_rx_status_decrypt_err) \
+ err_type = OL_RX_ERR_DECRYPT; \
+ else if (rx_status == htt_rx_status_tkip_mic_err) \
+ err_type = OL_RX_ERR_TKIP_MIC; \
+ else if (rx_status == htt_rx_status_mpdu_length_err) \
+ err_type = OL_RX_ERR_MPDU_LENGTH; \
+ else if (rx_status == htt_rx_status_mpdu_encrypt_required_err) \
+ err_type = OL_RX_ERR_ENCRYPT_REQUIRED; \
+ else if (rx_status == htt_rx_status_err_dup) \
+ err_type = OL_RX_ERR_DUP; \
+ else if (rx_status == htt_rx_status_err_fcs) \
+ err_type = OL_RX_ERR_FCS; \
+ else \
+ err_type = OL_RX_ERR_UNKNOWN; \
+ \
+ if (vdev != NULL && peer != NULL) { \
+ OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, \
+ rx_mpdu_desc, err_type); \
+ } else { \
+ OL_RX_ERR_INV_PEER_STATISTICS(pdev, rx_msdu); \
+ } \
+ } while (false)
+#else
+#define OL_RX_ERR_STATISTICS(pdev, vdev, err_type, sec_type, is_mcast)
+#define OL_RX_ERR_STATISTICS_1(pdev, vdev, peer, rx_desc, err_type)
+#define OL_RX_ERR_STATISTICS_2(pdev, vdev, peer, rx_desc, rx_msdu, rx_status)
+#endif /* SUPPORT_HOST_STATISTICS */
+
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+#define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
+ do { \
+ cdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
+ peer->stats.tx_or_rx.frms.type += 1; \
+ peer->stats.tx_or_rx.bytes.type += cdf_nbuf_len(msdu); \
+ cdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex); \
+ } while (0)
+#define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu) \
+ do { \
+ struct ol_txrx_vdev_t *vdev = peer->vdev; \
+ struct ol_txrx_pdev_t *pdev = vdev->pdev; \
+ uint8_t *dest_addr; \
+ if (pdev->frame_format == wlan_frm_fmt_802_3) { \
+ dest_addr = cdf_nbuf_data(msdu); \
+ } else { /* 802.11 format */ \
+ struct ieee80211_frame *frm; \
+ frm = (struct ieee80211_frame *) cdf_nbuf_data(msdu); \
+ if (vdev->opmode == wlan_op_mode_ap) { \
+ dest_addr = (uint8_t *) &(frm->i_addr1[0]); \
+ } else { \
+ dest_addr = (uint8_t *) &(frm->i_addr3[0]); \
+ } \
+ } \
+ if (cdf_unlikely(IEEE80211_IS_BROADCAST(dest_addr))) { \
+ OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
+ bcast, msdu); \
+ } else if (cdf_unlikely(IEEE80211_IS_MULTICAST(dest_addr))) { \
+ OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
+ mcast, msdu); \
+ } else { \
+ OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, \
+ ucast, msdu); \
+ } \
+ } while (0)
+#define OL_TX_PEER_STATS_UPDATE(peer, msdu) \
+ OL_TXRX_PEER_STATS_UPDATE(peer, tx, msdu)
+#define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
+ OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
+#define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
+ cdf_spinlock_init(&pdev->peer_stat_mutex)
+#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
+ cdf_spinlock_destroy(&pdev->peer_stat_mutex)
+#else
+#define OL_TX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
+#define OL_RX_PEER_STATS_UPDATE(peer, msdu) /* no-op */
+#define OL_TXRX_PEER_STATS_MUTEX_INIT(peer) /* no-op */
+#define OL_TXRX_PEER_STATS_MUTEX_DESTROY(peer) /* no-op */
+#endif
+
+#ifndef DEBUG_HTT_CREDIT
+#define DEBUG_HTT_CREDIT 0
+#endif
+
+#if defined(FEATURE_TSO_DEBUG)
+#define TXRX_STATS_TSO_RESET_MSDU(pdev) \
+ do { \
+ int idx = TXRX_STATS_TSO_MSDU_IDX(pdev);\
+ pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg = 0; \
+ pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].tso_seg_idx = 0; \
+ } while (0)
+
+#define TXRX_STATS_TSO_MSDU_IDX(pdev) \
+ pdev->stats.pub.tx.tso.tso_info.tso_msdu_idx
+
+#define TXRX_STATS_TSO_MSDU(pdev, idx) \
+ pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx]
+
+#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) \
+ pdev->stats.pub.tx.tso.tso_info.tso_msdu_info[idx].num_seg
+
+#define TXRX_STATS_TSO_CURR_MSDU(pdev) \
+ TXRX_STATS_TSO_MSDU(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev))
+
+#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) \
+ do { \
+ TXRX_STATS_TSO_MSDU_IDX(pdev)++; \
+ TXRX_STATS_TSO_MSDU_IDX(pdev) &= NUM_MAX_TSO_MSDUS_MASK; \
+ } while (0)
+
+#define TXRX_STATS_TSO_SEG_IDX(pdev) \
+ TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx
+
+#define TXRX_STATS_TSO_INC_SEG(pdev) \
+ TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg++
+
+#define TXRX_STATS_TSO_RST_SEG(pdev) \
+ TXRX_STATS_TSO_CURR_MSDU(pdev).num_seg = 0
+
+#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) \
+ TXRX_STATS_TSO_CURR_MSDU(pdev).tso_seg_idx = 0
+
+#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) \
+ TXRX_STATS_TSO_MSDU(pdev, msdu_idx).tso_segs[seg_idx]
+
+#define TXRX_STATS_TSO_CURR_SEG(pdev) \
+ TXRX_STATS_TSO_SEG(pdev, TXRX_STATS_TSO_MSDU_IDX(pdev), \
+ TXRX_STATS_TSO_SEG_IDX(pdev)) \
+
+#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) \
+ do { \
+ TXRX_STATS_TSO_SEG_IDX(pdev)++; \
+ TXRX_STATS_TSO_SEG_IDX(pdev) &= NUM_MAX_TSO_SEGS_MASK; \
+ } while (0)
+
+#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) \
+ (TXRX_STATS_TSO_CURR_SEG(pdev) = tso_seg)
+
+#else
+#define TXRX_STATS_TSO_RESET_MSDU(pdev) /* no-op */
+#define TXRX_STATS_TSO_MSDU_IDX(pdev) /* no-op */
+#define TXRX_STATS_TSO_MSDU(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_MSDU_NUM_SEG(pdev, idx) /* no-op */
+#define TXRX_STATS_TSO_CURR_MSDU(pdev) /* no-op */
+#define TXRX_STATS_TSO_INC_MSDU_IDX(pdev) /* no-op */
+#define TXRX_STATS_TSO_SEG_IDX(pdev) /* no-op */
+#define TXRX_STATS_TSO_SEG(pdev, msdu_idx, seg_idx) /* no-op */
+#define TXRX_STATS_TSO_CURR_SEG(pdev) /* no-op */
+#define TXRX_STATS_TSO_INC_SEG_IDX(pdev) /* no-op */
+#define TXRX_STATS_TSO_SEG_UPDATE(pdev, tso_seg) /* no-op */
+#define TXRX_STATS_TSO_INC_SEG(pdev) /* no-op */
+#define TXRX_STATS_TSO_RST_SEG(pdev) /* no-op */
+#define TXRX_STATS_TSO_RST_SEG_IDX(pdev) /* no-op */
+
+#endif /* FEATURE_TSO_DEBUG */
+
+#endif /* _OL_TXRX_INTERNAL__H_ */
diff --git a/dp/txrx/ol_txrx_peer_find.c b/dp/txrx/ol_txrx_peer_find.c
new file mode 100644
index 000000000000..6422ed4433a0
--- /dev/null
+++ b/dp/txrx/ol_txrx_peer_find.c
@@ -0,0 +1,492 @@
+/*
+ * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/*=== includes ===*/
+/* header files for OS primitives */
+#include <osdep.h> /* uint32_t, etc. */
+#include <cdf_memory.h> /* cdf_mem_malloc, etc. */
+#include <cdf_types.h> /* cdf_device_t, cdf_print */
+/* header files for utilities */
+#include <cds_queue.h> /* TAILQ */
+
+/* header files for configuration API */
+#include <ol_cfg.h> /* ol_cfg_max_peer_id */
+
+/* header files for our internal definitions */
+#include <ol_txrx_api.h> /* ol_txrx_pdev_t, etc. */
+#include <ol_txrx_dbg.h> /* TXRX_DEBUG_LEVEL */
+#include <ol_txrx_internal.h> /* ol_txrx_pdev_t, etc. */
+#include <ol_txrx.h> /* ol_txrx_peer_unref_delete */
+#include <ol_txrx_peer_find.h> /* ol_txrx_peer_find_attach, etc. */
+#include <ol_tx_queue.h>
+
+/*=== misc. / utility function definitions ==================================*/
+
+static int ol_txrx_log2_ceil(unsigned value)
+{
+ /* need to switch to unsigned math so that negative values
+ * will right-shift towards 0 instead of -1
+ */
+ unsigned tmp = value;
+ int log2 = -1;
+
+ if (value == 0) {
+ TXRX_ASSERT2(0);
+ return 0;
+ }
+
+ while (tmp) {
+ log2++;
+ tmp >>= 1;
+ }
+ if (1U << log2 != value)
+ log2++;
+
+ return log2;
+}
+
+static int
+ol_txrx_peer_find_add_id_to_obj(struct ol_txrx_peer_t *peer, uint16_t peer_id)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+ if (peer->peer_ids[i] == HTT_INVALID_PEER) {
+ peer->peer_ids[i] = peer_id;
+ return 0; /* success */
+ }
+ }
+ return 1; /* failure */
+}
+
+/*=== function definitions for peer MAC addr --> peer object hash table =====*/
+
+/*
+ * TXRX_PEER_HASH_LOAD_FACTOR:
+ * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a
+ * power of two.
+ * This provides at least twice as many bins in the peer hash table
+ * as there will be entries.
+ * Having substantially more bins than spaces minimizes the probability of
+ * having to compare MAC addresses.
+ * Because the MAC address comparison is fairly efficient, it is okay if the
+ * hash table is sparsely loaded, but it's generally better to use extra mem
+ * to keep the table sparse, to keep the lookups as fast as possible.
+ * An optimization would be to apply a more conservative loading factor for
+ * high latency, where the lookup happens during the tx classification of
+ * every tx frame, than for low-latency, where the lookup only happens
+ * during association, when the PEER_MAP message is received.
+ */
+#define TXRX_PEER_HASH_LOAD_MULT 2
+#define TXRX_PEER_HASH_LOAD_SHIFT 0
+
+static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev)
+{
+ int i, hash_elems, log2;
+
+ /* allocate the peer MAC address -> peer object hash table */
+ hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
+ hash_elems *= TXRX_PEER_HASH_LOAD_MULT;
+ hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT;
+ log2 = ol_txrx_log2_ceil(hash_elems);
+ hash_elems = 1 << log2;
+
+ pdev->peer_hash.mask = hash_elems - 1;
+ pdev->peer_hash.idx_bits = log2;
+ /* allocate an array of TAILQ peer object lists */
+ pdev->peer_hash.bins =
+ cdf_mem_malloc(hash_elems *
+ sizeof(TAILQ_HEAD(anonymous_tail_q,
+ ol_txrx_peer_t)));
+ if (!pdev->peer_hash.bins)
+ return 1; /* failure */
+
+ for (i = 0; i < hash_elems; i++)
+ TAILQ_INIT(&pdev->peer_hash.bins[i]);
+
+ return 0; /* success */
+}
+
+static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev)
+{
+ cdf_mem_free(pdev->peer_hash.bins);
+}
+
+static inline unsigned
+ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev,
+ union ol_txrx_align_mac_addr_t *mac_addr)
+{
+ unsigned index;
+
+ index =
+ mac_addr->align2.bytes_ab ^
+ mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef;
+ index ^= index >> pdev->peer_hash.idx_bits;
+ index &= pdev->peer_hash.mask;
+ return index;
+}
+
+void
+ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ unsigned index;
+
+ index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ /*
+ * It is important to add the new peer at the tail of the peer list
+ * with the bin index. Together with having the hash_find function
+ * search from head to tail, this ensures that if two entries with
+ * the same MAC address are stored, the one added first will be
+ * found first.
+ */
+ TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+}
+
+struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ uint8_t *peer_mac_addr,
+ int mac_addr_is_aligned,
+ uint8_t check_valid)
+{
+ union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
+ unsigned index;
+ struct ol_txrx_peer_t *peer;
+
+ if (mac_addr_is_aligned) {
+ mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
+ } else {
+ cdf_mem_copy(&local_mac_addr_aligned.raw[0],
+ peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
+ mac_addr = &local_mac_addr_aligned;
+ }
+ index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
+ if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
+ 0 && (check_valid == 0 || peer->valid)
+ && peer->vdev == vdev) {
+ /* found it - increment the ref count before releasing
+ the lock */
+ cdf_atomic_inc(&peer->ref_cnt);
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return peer;
+ }
+ }
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return NULL; /* failure */
+}
+
+struct ol_txrx_peer_t *ol_txrx_peer_find_hash_find(struct ol_txrx_pdev_t *pdev,
+ uint8_t *peer_mac_addr,
+ int mac_addr_is_aligned,
+ uint8_t check_valid)
+{
+ union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
+ unsigned index;
+ struct ol_txrx_peer_t *peer;
+
+ if (mac_addr_is_aligned) {
+ mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
+ } else {
+ cdf_mem_copy(&local_mac_addr_aligned.raw[0],
+ peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
+ mac_addr = &local_mac_addr_aligned;
+ }
+ index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
+ cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+ TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
+ if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
+ 0 && (check_valid == 0 || peer->valid)) {
+ /* found it - increment the ref count before
+ releasing the lock */
+ cdf_atomic_inc(&peer->ref_cnt);
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return peer;
+ }
+ }
+ cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+ return NULL; /* failure */
+}
+
+void
+ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer)
+{
+ unsigned index;
+
+ index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
+ /*
+ * DO NOT take the peer_ref_mutex lock here - it needs to be taken
+ * by the caller.
+ * The caller needs to hold the lock from the time the peer object's
+ * reference count is decremented and tested up through the time the
+ * reference to the peer object is removed from the hash table, by
+ * this function.
+ * Holding the lock only while removing the peer object reference
+ * from the hash table keeps the hash table consistent, but does not
+ * protect against a new HL tx context starting to use the peer object
+ * if it looks up the peer object from its MAC address just after the
+ * peer ref count is decremented to zero, but just before the peer
+ * object reference is removed from the hash table.
+ */
+ /* cdf_spin_lock_bh(&pdev->peer_ref_mutex); */
+ TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
+ /* cdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
+}
+
+void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
+{
+ unsigned i;
+ /*
+ * Not really necessary to take peer_ref_mutex lock - by this point,
+ * it's known that the pdev is no longer in use.
+ */
+
+ for (i = 0; i <= pdev->peer_hash.mask; i++) {
+ if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
+ struct ol_txrx_peer_t *peer, *peer_next;
+
+ /*
+ * TAILQ_FOREACH_SAFE must be used here to avoid any
+ * memory access violation after peer is freed
+ */
+ TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i],
+ hash_list_elem, peer_next) {
+ /*
+ * Don't remove the peer from the hash table -
+ * that would modify the list we are currently
+ * traversing,
+ * and it's not necessary anyway.
+ */
+ /*
+ * Artificially adjust the peer's ref count to
+ * 1, so it will get deleted by
+ * ol_txrx_peer_unref_delete.
+ */
+ cdf_atomic_init(&peer->ref_cnt); /* set to 0 */
+ cdf_atomic_inc(&peer->ref_cnt); /* incr to 1 */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Delete Peer %p\n", __func__,
+ peer);
+ ol_txrx_peer_unref_delete(peer);
+ }
+ }
+ }
+}
+
+/*=== function definitions for peer id --> peer object map ==================*/
+
+static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev)
+{
+ int max_peers, peer_map_size;
+
+ /* allocate the peer ID -> peer object map */
+ max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
+ peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]);
+ pdev->peer_id_to_obj_map = cdf_mem_malloc(peer_map_size);
+ if (!pdev->peer_id_to_obj_map)
+ return 1; /* failure */
+
+ /*
+ * The peer_id_to_obj_map doesn't really need to be initialized,
+ * since elements are only used after they have been individually
+ * initialized.
+ * However, it is convenient for debugging to have all elements
+ * that are not in use set to 0.
+ */
+ cdf_mem_set(pdev->peer_id_to_obj_map, peer_map_size, 0);
+
+ return 0; /* success */
+}
+
+static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev)
+{
+ cdf_mem_free(pdev->peer_id_to_obj_map);
+}
+
+static inline void
+ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
+ uint8_t *peer_mac_addr, uint16_t peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+
+ /* check if there's already a peer object with this MAC address */
+ peer =
+ ol_txrx_peer_find_hash_find(pdev, peer_mac_addr,
+ 1 /* is aligned */, 0);
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "%s: peer %p ID %d\n", __func__,
+ peer, peer_id);
+ if (peer) {
+ /* peer's ref count was already incremented by
+ peer_find_hash_find */
+ pdev->peer_id_to_obj_map[peer_id] = peer;
+ /*
+ * remove the reference added in ol_txrx_peer_find_hash_find.
+ * the reference for the first peer id is already added in
+ * ol_txrx_peer_attach.
+ * Riva/Pronto has one peer id for each peer.
+ * Peregrine/Rome has two peer id for each peer.
+ */
+ if (peer->peer_ids[0] == HTT_INVALID_PEER) {
+ ol_txrx_peer_unref_delete(peer);
+ }
+ if (ol_txrx_peer_find_add_id_to_obj(peer, peer_id)) {
+ /* TBDXXX: assert for now */
+ cdf_assert(0);
+ }
+ return;
+ }
+ /*
+ * Currently peer IDs are assigned for vdevs as well as peers.
+ * If the peer ID is for a vdev, then we will fail to find a peer
+ * with a matching MAC address.
+ */
+ /* TXRX_ASSERT2(0); */
+}
+
+/*=== allocation / deallocation function definitions ========================*/
+
+int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev)
+{
+ if (ol_txrx_peer_find_map_attach(pdev))
+ return 1;
+ if (ol_txrx_peer_find_hash_attach(pdev)) {
+ ol_txrx_peer_find_map_detach(pdev);
+ return 1;
+ }
+ return 0; /* success */
+}
+
+void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
+{
+ ol_txrx_peer_find_map_detach(pdev);
+ ol_txrx_peer_find_hash_detach(pdev);
+}
+
+/*=== function definitions for message handling =============================*/
+
+void
+ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
+ uint16_t peer_id,
+ uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
+{
+ ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
+}
+
+void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
+{
+}
+
+void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+ peer = (peer_id == HTT_INVALID_PEER) ? NULL :
+ pdev->peer_id_to_obj_map[peer_id];
+ TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
+ "%s: peer %p with ID %d to be unmapped.\n", __func__, peer,
+ peer_id);
+ pdev->peer_id_to_obj_map[peer_id] = NULL;
+ /*
+ * Currently peer IDs are assigned for vdevs as well as peers.
+ * If the peer ID is for a vdev, then the peer pointer stored
+ * in peer_id_to_obj_map will be NULL.
+ */
+ if (!peer)
+ return;
+ /*
+ * Remove a reference to the peer.
+ * If there are no more references, delete the peer object.
+ */
+ TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+ "%s: Remove the ID %d reference to peer %p\n",
+ __func__, peer_id, peer);
+ ol_txrx_peer_unref_delete(peer);
+}
+
+struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
+{
+ struct ol_txrx_peer_t *peer;
+
+ cdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
+ /*
+ * Check the TXRX Peer is itself valid And also
+ * if HTT Peer ID has been setup for this peer
+ */
+ if (vdev->last_real_peer
+ && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
+ cdf_atomic_inc(&vdev->last_real_peer->ref_cnt);
+ peer = vdev->last_real_peer;
+ } else {
+ peer = NULL;
+ }
+ cdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
+ return peer;
+}
+
+/*=== function definitions for debug ========================================*/
+
+#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
+void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent)
+{
+ int i, max_peers;
+
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*speer map:\n", indent, " ");
+ max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
+ for (i = 0; i < max_peers; i++) {
+ if (pdev->peer_id_to_obj_map[i]) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*sid %d -> %p\n",
+ indent + 4, " ", i,
+ pdev->peer_id_to_obj_map[i]);
+ }
+ }
+ CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+ "%*speer hash table:\n", indent, " ");
+ for (i = 0; i <= pdev->peer_hash.mask; i++) {
+ if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
+ struct ol_txrx_peer_t *peer;
+ TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i],
+ hash_list_elem) {
+ CDF_TRACE(CDF_MODULE_ID_TXRX,
+ CDF_TRACE_LEVEL_INFO_LOW,
+ "%*shash idx %d -> %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+ indent + 4, " ", i, peer,
+ peer->mac_addr.raw[0],
+ peer->mac_addr.raw[1],
+ peer->mac_addr.raw[2],
+ peer->mac_addr.raw[3],
+ peer->mac_addr.raw[4],
+ peer->mac_addr.raw[5]);
+ }
+ }
+ }
+}
+#endif /* if TXRX_DEBUG_LEVEL */
diff --git a/dp/txrx/ol_txrx_peer_find.h b/dp/txrx/ol_txrx_peer_find.h
new file mode 100644
index 000000000000..25886cae1cf0
--- /dev/null
+++ b/dp/txrx/ol_txrx_peer_find.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2011, 2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_peer_find.h
+ * @brief Define the API for the rx peer lookup datapath module.
+ */
+#ifndef _OL_TXRX_PEER_FIND__H_
+#define _OL_TXRX_PEER_FIND__H_
+
+#include <htt.h> /* HTT_INVALID_PEER */
+#include <ol_txrx_types.h> /* ol_txrx_pdev_t, etc. */
+#include <ol_txrx_internal.h> /* TXRX_ASSERT */
+
+int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev);
+
+void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev);
+
+static inline
+int
+ol_txrx_peer_find_mac_addr_cmp(union ol_txrx_align_mac_addr_t *mac_addr1,
+ union ol_txrx_align_mac_addr_t *mac_addr2)
+{
+ return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
+ /*
+ * Intentionally use & rather than &&.
+ * because the operands are binary rather than generic bool,
+ * the functionality is equivalent.
+ * Using && has the advantage of short-circuited evaluation,
+ * but using & has the advantage of no conditional branching,
+ * which is a more significant benefit.
+ */
+ & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
+}
+
+static inline
+struct ol_txrx_peer_t *ol_txrx_peer_find_by_id(struct ol_txrx_pdev_t *pdev,
+ uint16_t peer_id)
+{
+ struct ol_txrx_peer_t *peer;
+ peer = (peer_id > ol_cfg_max_peer_id(pdev->ctrl_pdev)) ? NULL :
+ pdev->peer_id_to_obj_map[peer_id];
+ /*
+ * Currently, peer IDs are assigned to vdevs as well as peers.
+ * If the peer ID is for a vdev, the peer_id_to_obj_map entry
+ * will hold NULL rather than a valid peer pointer.
+ */
+ /* TXRX_ASSERT2(peer != NULL); */
+ /*
+ * Only return the peer object if it is valid,
+ * i.e. it has not already been detached.
+ * If it has already been detached, then returning the
+ * peer object could result in unpausing the peer's tx queues
+ * in HL systems, which is an invalid operation following peer_detach.
+ */
+ if (peer && peer->valid)
+ return peer;
+
+ return NULL;
+}
+
+void
+ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer);
+
+struct ol_txrx_peer_t *ol_txrx_peer_find_hash_find(struct ol_txrx_pdev_t *pdev,
+ uint8_t *peer_mac_addr,
+ int mac_addr_is_aligned,
+ uint8_t check_valid);
+
+struct
+ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_vdev_t *vdev,
+ uint8_t *peer_mac_addr,
+ int mac_addr_is_aligned,
+ uint8_t check_valid);
+
+void
+ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
+ struct ol_txrx_peer_t *peer);
+
+void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev);
+
+struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev);
+
+#if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
+void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent);
+#else
+#define ol_txrx_peer_find_display(pdev, indent)
+#endif /* TXRX_DEBUG_LEVEL */
+
+#endif /* _OL_TXRX_PEER_FIND__H_ */
diff --git a/dp/txrx/ol_txrx_types.h b/dp/txrx/ol_txrx_types.h
new file mode 100644
index 000000000000..3fc470bd107e
--- /dev/null
+++ b/dp/txrx/ol_txrx_types.h
@@ -0,0 +1,1011 @@
+/*
+ * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file ol_txrx_types.h
+ * @brief Define the major data types used internally by the host datapath SW.
+ */
+#ifndef _OL_TXRX_TYPES__H_
+#define _OL_TXRX_TYPES__H_
+
+#include <cdf_nbuf.h> /* cdf_nbuf_t */
+#include <cdf_memory.h>
+#include <cds_queue.h> /* TAILQ */
+#include <a_types.h> /* A_UINT8 */
+#include <htt.h> /* htt_sec_type, htt_pkt_type, etc. */
+#include <cdf_atomic.h> /* cdf_atomic_t */
+#include <wdi_event_api.h> /* wdi_event_subscribe */
+#include <cdf_softirq_timer.h> /* cdf_softirq_timer_t */
+#include <cdf_lock.h> /* cdf_spinlock */
+#include <pktlog.h> /* ol_pktlog_dev_handle */
+#include <ol_txrx_stats.h>
+#include <txrx.h>
+#include "ol_txrx_htt_api.h"
+#include "ol_htt_tx_api.h"
+#include "ol_htt_rx_api.h"
+#include <ol_ctrl_txrx_api.h>
+#include <ol_txrx_ctrl_api.h>
+
+
+/*
+ * The target may allocate multiple IDs for a peer.
+ * In particular, the target may allocate one ID to represent the
+ * multicast key the peer uses, and another ID to represent the
+ * unicast key the peer uses.
+ */
+#define MAX_NUM_PEER_ID_PER_PEER 8
+
+#define OL_TXRX_MAC_ADDR_LEN 6
+
+/* OL_TXRX_NUM_EXT_TIDS -
+ * 16 "real" TIDs + 3 pseudo-TIDs for mgmt, mcast/bcast & non-QoS data
+ */
+#define OL_TXRX_NUM_EXT_TIDS 19
+
+#define OL_TX_NUM_QOS_TIDS 16 /* 16 regular TIDs */
+#define OL_TX_NON_QOS_TID 16
+#define OL_TX_MGMT_TID 17
+#define OL_TX_NUM_TIDS 18
+#define OL_RX_MCAST_TID 18 /* Mcast TID only between f/w & host */
+
+#define OL_TX_VDEV_MCAST_BCAST 0 // HTT_TX_EXT_TID_MCAST_BCAST
+#define OL_TX_VDEV_DEFAULT_MGMT 1 // HTT_TX_EXT_TID_DEFALT_MGMT
+#define OL_TX_VDEV_NUM_QUEUES 2
+
+#define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
+#define OL_TXRX_MGMT_NUM_TYPES 8
+
+#define OL_TX_MUTEX_TYPE cdf_spinlock_t
+#define OL_RX_MUTEX_TYPE cdf_spinlock_t
+
+/* TXRX Histogram defines */
+#define TXRX_DATA_HISTROGRAM_GRANULARITY 1000
+#define TXRX_DATA_HISTROGRAM_NUM_INTERVALS 100
+
+struct ol_txrx_pdev_t;
+struct ol_txrx_vdev_t;
+struct ol_txrx_peer_t;
+
+struct ol_pdev_t;
+typedef struct ol_pdev_t *ol_pdev_handle;
+
+struct ol_vdev_t;
+typedef struct ol_vdev_t *ol_vdev_handle;
+
+struct ol_peer_t;
+typedef struct ol_peer_t *ol_peer_handle;
+
+/* rx filter related */
+#define MAX_PRIVACY_FILTERS 4 /* max privacy filters */
+
+enum privacy_filter {
+ PRIVACY_FILTER_ALWAYS,
+ PRIVACY_FILTER_KEY_UNAVAILABLE,
+};
+
+enum privacy_filter_packet_type {
+ PRIVACY_FILTER_PACKET_UNICAST,
+ PRIVACY_FILTER_PACKET_MULTICAST,
+ PRIVACY_FILTER_PACKET_BOTH
+};
+
+struct privacy_exemption {
+ /* ethertype -
+ * type of ethernet frames this filter applies to, in host byte order
+ */
+ uint16_t ether_type;
+ enum privacy_filter filter_type;
+ enum privacy_filter_packet_type packet_type;
+};
+
+enum ol_tx_frm_type {
+ ol_tx_frm_std = 0, /* regular frame - no added header fragments */
+ ol_tx_frm_tso, /* TSO segment, with a modified IP header added */
+ ol_tx_frm_audio, /* audio frames, with a custom LLC/SNAP hdr added */
+ ol_tx_frm_no_free, /* frame requires special tx completion callback */
+};
+
+struct ol_tx_desc_t {
+ cdf_nbuf_t netbuf;
+ void *htt_tx_desc;
+ uint16_t id;
+ uint32_t htt_tx_desc_paddr;
+ void *htt_frag_desc; /* struct msdu_ext_desc_t * */
+ uint32_t htt_frag_desc_paddr;
+ cdf_atomic_t ref_cnt;
+ enum htt_tx_status status;
+
+#ifdef QCA_COMPUTE_TX_DELAY
+ uint32_t entry_timestamp_ticks;
+#endif
+ /*
+ * Allow tx descriptors to be stored in (doubly-linked) lists.
+ * This is mainly used for HL tx queuing and scheduling, but is
+ * also used by LL+HL for batch processing of tx frames.
+ */
+ TAILQ_ENTRY(ol_tx_desc_t) tx_desc_list_elem;
+
+ /*
+ * Remember whether the tx frame is a regular packet, or whether
+ * the driver added extra header fragments (e.g. a modified IP header
+ * for TSO fragments, or an added LLC/SNAP header for audio interworking
+ * data) that need to be handled in a special manner.
+ * This field is filled in with the ol_tx_frm_type enum.
+ */
+ uint8_t pkt_type;
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ /* used by tx encap, to restore the os buf start offset
+ after tx complete */
+ uint8_t orig_l2_hdr_bytes;
+#endif
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+ struct ol_tx_flow_pool_t *pool;
+#endif
+ void *tso_desc;
+};
+
+typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
+
+union ol_tx_desc_list_elem_t {
+ union ol_tx_desc_list_elem_t *next;
+ struct ol_tx_desc_t tx_desc;
+};
+
+union ol_txrx_align_mac_addr_t {
+ uint8_t raw[OL_TXRX_MAC_ADDR_LEN];
+ struct {
+ uint16_t bytes_ab;
+ uint16_t bytes_cd;
+ uint16_t bytes_ef;
+ } align2;
+ struct {
+ uint32_t bytes_abcd;
+ uint16_t bytes_ef;
+ } align4;
+};
+
+struct ol_rx_reorder_timeout_list_elem_t {
+ TAILQ_ENTRY(ol_rx_reorder_timeout_list_elem_t)
+ reorder_timeout_list_elem;
+ uint32_t timestamp_ms;
+ struct ol_txrx_peer_t *peer;
+ uint8_t tid;
+ uint8_t active;
+};
+
+#define TXRX_TID_TO_WMM_AC(_tid) ( \
+ (((_tid) >> 1) == 3) ? TXRX_WMM_AC_VO : \
+ (((_tid) >> 1) == 2) ? TXRX_WMM_AC_VI : \
+ (((_tid) ^ ((_tid) >> 1)) & 0x1) ? TXRX_WMM_AC_BK : \
+ TXRX_WMM_AC_BE)
+
+struct ol_tx_reorder_cat_timeout_t {
+ TAILQ_HEAD(, ol_rx_reorder_timeout_list_elem_t) virtual_timer_list;
+ cdf_softirq_timer_t timer;
+ uint32_t duration_ms;
+ struct ol_txrx_pdev_t *pdev;
+};
+
+enum ol_tx_queue_status {
+ ol_tx_queue_empty = 0,
+ ol_tx_queue_active,
+ ol_tx_queue_paused,
+};
+
+struct ol_txrx_msdu_info_t {
+ struct htt_msdu_info_t htt;
+ struct ol_txrx_peer_t *peer;
+ struct cdf_tso_info_t tso_info;
+};
+
+enum {
+ ol_tx_aggr_untried = 0,
+ ol_tx_aggr_enabled,
+ ol_tx_aggr_disabled,
+ ol_tx_aggr_retry,
+ ol_tx_aggr_in_progress,
+};
+
+struct ol_tx_frms_queue_t {
+ /* list_elem -
+ * Allow individual tx frame queues to be linked together into
+ * scheduler queues of tx frame queues
+ */
+ TAILQ_ENTRY(ol_tx_frms_queue_t) list_elem;
+ uint8_t aggr_state;
+ struct {
+ uint8_t total;
+ /* pause requested by ctrl SW rather than txrx SW */
+ uint8_t by_ctrl;
+ } paused_count;
+ uint8_t ext_tid;
+ uint16_t frms;
+ uint32_t bytes;
+ ol_tx_desc_list head;
+ enum ol_tx_queue_status flag;
+};
+
+enum {
+ ol_tx_log_entry_type_invalid,
+ ol_tx_log_entry_type_queue_state,
+ ol_tx_log_entry_type_enqueue,
+ ol_tx_log_entry_type_dequeue,
+ ol_tx_log_entry_type_drop,
+ ol_tx_log_entry_type_queue_free,
+
+ ol_tx_log_entry_type_wrap,
+};
+
+struct ol_tx_log_queue_state_var_sz_t {
+ uint32_t active_bitmap;
+ uint16_t credit;
+ uint8_t num_cats_active;
+ uint8_t data[1];
+};
+
+struct ol_tx_log_queue_add_t {
+ uint8_t num_frms;
+ uint8_t tid;
+ uint16_t peer_id;
+ uint16_t num_bytes;
+};
+
+struct ol_mac_addr {
+ uint8_t mac_addr[OL_TXRX_MAC_ADDR_LEN];
+};
+
+#ifndef OL_TXRX_NUM_LOCAL_PEER_IDS
+#define OL_TXRX_NUM_LOCAL_PEER_IDS 33 /* default */
+#endif
+
+#ifndef ol_txrx_local_peer_id_t
+#define ol_txrx_local_peer_id_t uint8_t /* default */
+#endif
+
+#ifdef QCA_COMPUTE_TX_DELAY
+/*
+ * Delay histogram bins: 16 bins of 10 ms each to count delays
+ * from 0-160 ms, plus one overflow bin for delays > 160 ms.
+ */
+#define QCA_TX_DELAY_HIST_INTERNAL_BINS 17
+#define QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS 10
+
+struct ol_tx_delay_data {
+ struct {
+ uint64_t transmit_sum_ticks;
+ uint64_t queue_sum_ticks;
+ uint32_t transmit_num;
+ uint32_t queue_num;
+ } avgs;
+ uint16_t hist_bins_queue[QCA_TX_DELAY_HIST_INTERNAL_BINS];
+};
+
+#endif /* QCA_COMPUTE_TX_DELAY */
+
+/* Thermal Mitigation */
+
+enum throttle_level {
+ THROTTLE_LEVEL_0,
+ THROTTLE_LEVEL_1,
+ THROTTLE_LEVEL_2,
+ THROTTLE_LEVEL_3,
+ /* Invalid */
+ THROTTLE_LEVEL_MAX,
+};
+
+enum throttle_phase {
+ THROTTLE_PHASE_OFF,
+ THROTTLE_PHASE_ON,
+ /* Invalid */
+ THROTTLE_PHASE_MAX,
+};
+
+#define THROTTLE_TX_THRESHOLD (100)
+
+typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt);
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+
+/**
+ * enum flow_pool_status - flow pool status
+ * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
+ * and network queues are unpaused
+ * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
+ * and network queues are paused
+ * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
+ * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
+ */
+enum flow_pool_status {
+ FLOW_POOL_ACTIVE_UNPAUSED = 0,
+ FLOW_POOL_ACTIVE_PAUSED = 1,
+ FLOW_POOL_INVALID = 2,
+ FLOW_POOL_INACTIVE = 3,
+};
+
+/**
+ * struct ol_txrx_pool_stats - flow pool related statistics
+ * @pool_map_count: flow pool map received
+ * @pool_unmap_count: flow pool unmap received
+ * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
+ * @pkt_drop_no_desc: packets dropped due to unavailablity of descriptors
+ */
+struct ol_txrx_pool_stats {
+ uint16_t pool_map_count;
+ uint16_t pool_unmap_count;
+ uint16_t pkt_drop_no_pool;
+ uint16_t pkt_drop_no_desc;
+};
+
+/**
+ * struct ol_tx_flow_pool_t - flow_pool info
+ * @flow_pool_list_elem: flow_pool_list element
+ * @flow_pool_lock: flow_pool lock
+ * @flow_pool_id: flow_pool id
+ * @flow_pool_size: flow_pool size
+ * @avail_desc: available descriptors
+ * @deficient_desc: deficient descriptors
+ * @status: flow pool status
+ * @flow_type: flow pool type
+ * @member_flow_id: member flow id
+ * @stop_th: stop threshold
+ * @start_th: start threshold
+ * @freelist: tx descriptor freelist
+ */
+struct ol_tx_flow_pool_t {
+ TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
+ cdf_spinlock_t flow_pool_lock;
+ uint8_t flow_pool_id;
+ uint16_t flow_pool_size;
+ uint16_t avail_desc;
+ uint16_t deficient_desc;
+ enum flow_pool_status status;
+ enum htt_flow_type flow_type;
+ uint8_t member_flow_id;
+ uint16_t stop_th;
+ uint16_t start_th;
+ union ol_tx_desc_list_elem_t *freelist;
+};
+
+#endif
+
+/*
+ * As depicted in the diagram below, the pdev contains an array of
+ * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
+ * Each element identifies all the tx queues that are active for
+ * the TID, from the different peers.
+ *
+ * Each peer contains an array of NUM_EXT_TID ol_tx_frms_queue_t elements.
+ * Each element identifies the tx frames for the TID that need to be sent
+ * to the peer.
+ *
+ *
+ * pdev: ol_tx_active_queues_in_tid_t active_in_tids[NUM_EXT_TIDS]
+ * TID
+ * 0 1 2 17
+ * +============+============+============+== ==+============+
+ * | active (y) | active (n) | active (n) | | active (y) |
+ * |------------+------------+------------+-- --+------------|
+ * | queues | queues | queues | | queues |
+ * +============+============+============+== ==+============+
+ * | |
+ * .--+-----------------------------------------------'
+ * | |
+ * | | peer X: peer Y:
+ * | | ol_tx_frms_queue_t ol_tx_frms_queue_t
+ * | | tx_queues[NUM_EXT_TIDS] tx_queues[NUM_EXT_TIDS]
+ * | | TID +======+ TID +======+
+ * | `---->| next |-------------------------->| next |--X
+ * | 0 | prev | .------. .------. 0 | prev | .------.
+ * | | txq |-->|txdesc|-->|txdesc| | txq |-->|txdesc|
+ * | +======+ `------' `------' +======+ `------'
+ * | | next | | | 1 | next | |
+ * | 1 | prev | v v | prev | v
+ * | | txq | .------. .------. | txq | .------.
+ * | +======+ |netbuf| |netbuf| +======+ |netbuf|
+ * | | next | `------' `------' | next | `------'
+ * | 2 | prev | 2 | prev |
+ * | | txq | | txq |
+ * | +======+ +======+
+ * | | | | |
+ * |
+ * |
+ * | | | | |
+ * | +======+ +======+
+ * `------->| next |--X | next |
+ * 17 | prev | .------. 17 | prev |
+ * | txq |-->|txdesc| | txq |
+ * +======+ `------' +======+
+ * |
+ * v
+ * .------.
+ * |netbuf|
+ * `------'
+ */
+struct ol_txrx_pdev_t {
+ /* ctrl_pdev - handle for querying config info */
+ ol_pdev_handle ctrl_pdev;
+
+ /* osdev - handle for mem alloc / free, map / unmap */
+ cdf_device_t osdev;
+
+ htt_pdev_handle htt_pdev;
+
+#ifdef WLAN_FEATURE_FASTPATH
+ struct CE_handle *ce_tx_hdl; /* Handle to Tx packet posting CE */
+ struct CE_handle *ce_htt_msg_hdl; /* Handle to TxRx completion CE */
+#endif /* WLAN_FEATURE_FASTPATH */
+
+ struct {
+ int is_high_latency;
+ int host_addba;
+ int ll_pause_txq_limit;
+ int default_tx_comp_req;
+ } cfg;
+
+ /* WDI subscriber's event list */
+ wdi_event_subscribe **wdi_event_list;
+
+#ifndef REMOVE_PKT_LOG
+ /* Pktlog pdev */
+ struct ol_pktlog_dev_t *pl_dev;
+#endif /* #ifndef REMOVE_PKT_LOG */
+
+ enum ol_sec_type sec_types[htt_num_sec_types];
+ /* standard frame type */
+ enum wlan_frm_fmt frame_format;
+ enum htt_pkt_type htt_pkt_type;
+
+#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
+ /* txrx encap/decap */
+ uint8_t sw_tx_encap;
+ uint8_t sw_rx_decap;
+ uint8_t target_tx_tran_caps;
+ uint8_t target_rx_tran_caps;
+ /* llc process */
+ uint8_t sw_tx_llc_proc_enable;
+ uint8_t sw_rx_llc_proc_enable;
+ /* A-MSDU */
+ uint8_t sw_subfrm_hdr_recovery_enable;
+ /* Protected Frame bit handling */
+ uint8_t sw_pf_proc_enable;
+#endif
+ /*
+ * target tx credit -
+ * not needed for LL, but used for HL download scheduler to keep
+ * track of roughly how much space is available in the target for
+ * tx frames
+ */
+ cdf_atomic_t target_tx_credit;
+ cdf_atomic_t orig_target_tx_credit;
+
+ /* Peer mac address to staid mapping */
+ struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];
+
+ /* ol_txrx_vdev list */
+ TAILQ_HEAD(, ol_txrx_vdev_t) vdev_list;
+
+ /* peer ID to peer object map (array of pointers to peer objects) */
+ struct ol_txrx_peer_t **peer_id_to_obj_map;
+
+ struct {
+ unsigned mask;
+ unsigned idx_bits;
+ TAILQ_HEAD(, ol_txrx_peer_t) * bins;
+ } peer_hash;
+
+ /* rx specific processing */
+ struct {
+ struct {
+ TAILQ_HEAD(, ol_rx_reorder_t) waitlist;
+ uint32_t timeout_ms;
+ } defrag;
+ struct {
+ int defrag_timeout_check;
+ int dup_check;
+ } flags;
+
+ struct {
+ struct ol_tx_reorder_cat_timeout_t
+ access_cats[TXRX_NUM_WMM_AC];
+ } reorder_timeout;
+ cdf_spinlock_t mutex;
+ } rx;
+
+ /* rx proc function */
+ void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list);
+
+ /* tx data delivery notification callback function */
+ struct {
+ ol_txrx_data_tx_cb func;
+ void *ctxt;
+ } tx_data_callback;
+
+ /* tx management delivery notification callback functions */
+ struct {
+ struct {
+ ol_txrx_mgmt_tx_cb download_cb;
+ ol_txrx_mgmt_tx_cb ota_ack_cb;
+ void *ctxt;
+ } callbacks[OL_TXRX_MGMT_NUM_TYPES];
+ } tx_mgmt;
+
+ struct {
+ uint16_t pool_size;
+ uint16_t num_free;
+ union ol_tx_desc_list_elem_t *freelist;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+ uint8_t num_invalid_bin;
+ cdf_spinlock_t flow_pool_list_lock;
+ TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
+#endif
+ uint32_t page_size;
+ uint16_t desc_reserved_size;
+ uint8_t page_divider;
+ uint32_t offset_filter;
+ struct cdf_mem_multi_page_t desc_pages;
+ } tx_desc;
+
+ uint8_t is_mgmt_over_wmi_enabled;
+#if defined(QCA_LL_TX_FLOW_CONTROL_V2)
+ struct ol_txrx_pool_stats pool_stats;
+ uint32_t num_msdu_desc;
+#ifdef QCA_LL_TX_FLOW_GLOBAL_MGMT_POOL
+ struct ol_tx_flow_pool_t *mgmt_pool;
+#endif
+#endif
+
+ struct {
+ int (*cmp)(union htt_rx_pn_t *new,
+ union htt_rx_pn_t *old,
+ int is_unicast, int opmode);
+ int len;
+ } rx_pn[htt_num_sec_types];
+
+ /* tx mutex */
+ OL_TX_MUTEX_TYPE tx_mutex;
+
+ /*
+ * peer ref mutex:
+ * 1. Protect peer object lookups until the returned peer object's
+ * reference count is incremented.
+ * 2. Provide mutex when accessing peer object lookup structures.
+ */
+ OL_RX_MUTEX_TYPE peer_ref_mutex;
+
+ /*
+ * last_real_peer_mutex:
+ * Protect lookups of any vdev's last_real_peer pointer until the
+ * reference count for the pointed-to peer object is incremented.
+ * This mutex could be in the vdev struct, but it's slightly simpler
+ * to have a single lock in the pdev struct. Since the lock is only
+ * held for an extremely short time, and since it's very unlikely for
+ * two vdev's to concurrently access the lock, there's no real
+ * benefit to having a per-vdev lock.
+ */
+ OL_RX_MUTEX_TYPE last_real_peer_mutex;
+
+ struct {
+ struct {
+ struct {
+ struct {
+ uint64_t ppdus;
+ uint64_t mpdus;
+ } normal;
+ struct {
+ /*
+ * mpdu_bad is general -
+ * replace it with the specific counters
+ * below
+ */
+ uint64_t mpdu_bad;
+ /* uint64_t mpdu_fcs; */
+ /* uint64_t mpdu_duplicate; */
+ /* uint64_t mpdu_pn_replay; */
+ /* uint64_t mpdu_bad_sender; */
+ /* ^ comment: peer not found */
+ /* uint64_t mpdu_flushed; */
+ /* uint64_t msdu_defrag_mic_err; */
+ uint64_t msdu_mc_dup_drop;
+ } err;
+ } rx;
+ } priv;
+ struct ol_txrx_stats pub;
+ } stats;
+
+#if defined(ENABLE_RX_REORDER_TRACE)
+ struct {
+ uint32_t mask;
+ uint32_t idx;
+ uint64_t cnt;
+#define TXRX_RX_REORDER_TRACE_SIZE_LOG2 8 /* 256 entries */
+ struct {
+ uint16_t reorder_idx;
+ uint16_t seq_num;
+ uint8_t num_mpdus;
+ uint8_t tid;
+ } *data;
+ } rx_reorder_trace;
+#endif /* ENABLE_RX_REORDER_TRACE */
+
+#if defined(ENABLE_RX_PN_TRACE)
+ struct {
+ uint32_t mask;
+ uint32_t idx;
+ uint64_t cnt;
+#define TXRX_RX_PN_TRACE_SIZE_LOG2 5 /* 32 entries */
+ struct {
+ struct ol_txrx_peer_t *peer;
+ uint32_t pn32;
+ uint16_t seq_num;
+ uint8_t unicast;
+ uint8_t tid;
+ } *data;
+ } rx_pn_trace;
+#endif /* ENABLE_RX_PN_TRACE */
+
+#if defined(PERE_IP_HDR_ALIGNMENT_WAR)
+ bool host_80211_enable;
+#endif
+
+ /*
+ * tx_queue only applies for HL, but is defined unconditionally to avoid
+ * wrapping references to tx_queue in "defined(CONFIG_HL_SUPPORT)"
+ * conditional compilation.
+ */
+ struct {
+ cdf_atomic_t rsrc_cnt;
+ /* threshold_lo - when to start tx desc margin replenishment */
+ uint16_t rsrc_threshold_lo;
+ /* threshold_hi - where to stop during tx desc margin
+ replenishment */
+ uint16_t rsrc_threshold_hi;
+ } tx_queue;
+
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+ cdf_spinlock_t peer_stat_mutex;
+#endif
+
+ int rssi_update_shift;
+ int rssi_new_weight;
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ struct {
+ ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
+ ol_txrx_local_peer_id_t freelist;
+ cdf_spinlock_t lock;
+ ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
+ } local_peer_ids;
+#endif
+
+#ifdef QCA_COMPUTE_TX_DELAY
+#ifdef QCA_COMPUTE_TX_DELAY_PER_TID
+#define QCA_TX_DELAY_NUM_CATEGORIES \
+ (OL_TX_NUM_TIDS + OL_TX_VDEV_NUM_QUEUES)
+#else
+#define QCA_TX_DELAY_NUM_CATEGORIES 1
+#endif
+ struct {
+ cdf_spinlock_t mutex;
+ struct {
+ struct ol_tx_delay_data copies[2]; /* ping-pong */
+ int in_progress_idx;
+ uint32_t avg_start_time_ticks;
+ } cats[QCA_TX_DELAY_NUM_CATEGORIES];
+ uint32_t tx_compl_timestamp_ticks;
+ uint32_t avg_period_ticks;
+ uint32_t hist_internal_bin_width_mult;
+ uint32_t hist_internal_bin_width_shift;
+ } tx_delay;
+
+ uint16_t packet_count[QCA_TX_DELAY_NUM_CATEGORIES];
+ uint16_t packet_loss_count[QCA_TX_DELAY_NUM_CATEGORIES];
+
+#endif /* QCA_COMPUTE_TX_DELAY */
+
+ struct {
+ cdf_spinlock_t mutex;
+ /* timer used to monitor the throttle "on" phase and
+ "off" phase */
+ cdf_softirq_timer_t phase_timer;
+ /* timer used to send tx frames */
+ cdf_softirq_timer_t tx_timer;
+ /* This is the time in ms of the throttling window, it will
+ * include an "on" phase and an "off" phase */
+ uint32_t throttle_period_ms;
+ /* Current throttle level set by the client ex. level 0,
+ level 1, etc */
+ enum throttle_level current_throttle_level;
+ /* Index that points to the phase within the throttle period */
+ enum throttle_phase current_throttle_phase;
+ /* Maximum number of frames to send to the target at one time */
+ uint32_t tx_threshold;
+ /* stores time in ms of on/off phase for each throttle level */
+ int throttle_time_ms[THROTTLE_LEVEL_MAX][THROTTLE_PHASE_MAX];
+ /* mark true if traffic is paused due to thermal throttling */
+ bool is_paused;
+ } tx_throttle;
+
+#ifdef IPA_OFFLOAD
+ ipa_uc_op_cb_type ipa_uc_op_cb;
+ void *osif_dev;
+#endif /* IPA_UC_OFFLOAD */
+
+#if defined(FEATURE_TSO)
+ struct {
+ uint16_t pool_size;
+ uint16_t num_free;
+ struct cdf_tso_seg_elem_t *freelist;
+ /* tso mutex */
+ OL_TX_MUTEX_TYPE tso_mutex;
+ } tso_seg_pool;
+#endif
+ uint8_t ocb_peer_valid;
+ struct ol_txrx_peer_t *ocb_peer;
+ ol_tx_pause_callback_fp pause_cb;
+
+ struct {
+ void *lro_data;
+ void (*lro_flush_cb)(void *);
+ } lro_info;
+};
+
+struct ol_txrx_ocb_chan_info {
+ uint32_t chan_freq;
+ uint16_t disable_rx_stats_hdr:1;
+};
+
+struct ol_txrx_vdev_t {
+ struct ol_txrx_pdev_t *pdev; /* pdev - the physical device that is
+ the parent of this virtual device */
+ uint8_t vdev_id; /* ID used to specify a particular vdev
+ to the target */
+ void *osif_dev;
+ union ol_txrx_align_mac_addr_t mac_addr; /* MAC address */
+ /* tx paused - NO LONGER NEEDED? */
+ TAILQ_ENTRY(ol_txrx_vdev_t) vdev_list_elem; /* node in the pdev's list
+ of vdevs */
+ TAILQ_HEAD(peer_list_t, ol_txrx_peer_t) peer_list;
+ struct ol_txrx_peer_t *last_real_peer; /* last real peer created for
+ this vdev (not "self"
+ pseudo-peer) */
+ ol_txrx_tx_fp tx; /* transmit function used by this vdev */
+
+ struct {
+ /*
+ * If the vdev object couldn't be deleted immediately because
+ * it still had some peer objects left, remember that a delete
+ * was requested, so it can be deleted once all its peers have
+ * been deleted.
+ */
+ int pending;
+ /*
+ * Store a function pointer and a context argument to provide a
+ * notification for when the vdev is deleted.
+ */
+ ol_txrx_vdev_delete_cb callback;
+ void *context;
+ } delete;
+
+ /* safe mode control to bypass the encrypt and decipher process */
+ uint32_t safemode;
+
+ /* rx filter related */
+ uint32_t drop_unenc;
+ struct privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
+ uint32_t num_filters;
+
+ enum wlan_op_mode opmode;
+
+#ifdef QCA_IBSS_SUPPORT
+ /* ibss mode related */
+ int16_t ibss_peer_num; /* the number of active peers */
+ int16_t ibss_peer_heart_beat_timer; /* for detecting peer departure */
+#endif
+
+ struct {
+ struct {
+ cdf_nbuf_t head;
+ cdf_nbuf_t tail;
+ int depth;
+ } txq;
+ uint32_t paused_reason;
+ cdf_spinlock_t mutex;
+ cdf_softirq_timer_t timer;
+ int max_q_depth;
+ bool is_q_paused;
+ bool is_q_timer_on;
+ uint32_t q_pause_cnt;
+ uint32_t q_unpause_cnt;
+ uint32_t q_overflow_cnt;
+ } ll_pause;
+ bool disable_intrabss_fwd;
+ cdf_atomic_t os_q_paused;
+ uint16_t tx_fl_lwm;
+ uint16_t tx_fl_hwm;
+ cdf_spinlock_t flow_control_lock;
+ ol_txrx_tx_flow_control_fp osif_flow_control_cb;
+ void *osif_fc_ctx;
+ uint16_t wait_on_peer_id;
+ cdf_event_t wait_delete_comp;
+#if defined(FEATURE_TSO)
+ struct {
+ int pool_elems; /* total number of elements in the pool */
+ int alloc_cnt; /* number of allocated elements */
+ uint32_t *freelist; /* free list of cdf_tso_seg_elem_t */
+ } tso_pool_t;
+#endif
+
+ /* last channel change event recieved */
+ struct {
+ bool is_valid; /* whether the rest of the members are valid */
+ uint16_t mhz;
+ uint16_t band_center_freq1;
+ uint16_t band_center_freq2;
+ WLAN_PHY_MODE phy_mode;
+ } ocb_channel_event;
+
+ /* Information about the schedules in the schedule */
+ struct ol_txrx_ocb_chan_info *ocb_channel_info;
+ uint32_t ocb_channel_count;
+
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+ struct ol_tx_flow_pool_t *pool;
+#endif
+};
+
+struct ol_rx_reorder_array_elem_t {
+ cdf_nbuf_t head;
+ cdf_nbuf_t tail;
+};
+
+struct ol_rx_reorder_t {
+ uint8_t win_sz;
+ uint8_t win_sz_mask;
+ uint8_t num_mpdus;
+ struct ol_rx_reorder_array_elem_t *array;
+ /* base - single rx reorder element used for non-aggr cases */
+ struct ol_rx_reorder_array_elem_t base;
+#if defined(QCA_SUPPORT_OL_RX_REORDER_TIMEOUT)
+ struct ol_rx_reorder_timeout_list_elem_t timeout;
+#endif
+ /* only used for defrag right now */
+ TAILQ_ENTRY(ol_rx_reorder_t) defrag_waitlist_elem;
+ uint32_t defrag_timeout_ms;
+ /* get back to parent ol_txrx_peer_t when ol_rx_reorder_t is in a
+ * waitlist */
+ uint16_t tid;
+};
+
+enum {
+ txrx_sec_mcast = 0,
+ txrx_sec_ucast
+};
+
+typedef A_STATUS (*ol_tx_filter_func)(struct ol_txrx_msdu_info_t *
+ tx_msdu_info);
+
+struct ol_txrx_peer_t {
+ struct ol_txrx_vdev_t *vdev;
+
+ cdf_atomic_t ref_cnt;
+ cdf_atomic_t delete_in_progress;
+ cdf_atomic_t flush_in_progress;
+
+ /* The peer state tracking is used for HL systems
+ * that don't support tx and rx filtering within the target.
+ * In such systems, the peer's state determines what kind of
+ * tx and rx filtering, if any, is done.
+ * This variable doesn't apply to LL systems, or to HL systems for
+ * which the target handles tx and rx filtering. However, it is
+ * simplest to declare and update this variable unconditionally,
+ * for all systems.
+ */
+ enum ol_txrx_peer_state state;
+ cdf_spinlock_t peer_info_lock;
+ ol_rx_callback_fp osif_rx;
+ cdf_spinlock_t bufq_lock;
+ struct list_head cached_bufq;
+
+ ol_tx_filter_func tx_filter;
+
+ /* peer ID(s) for this peer */
+ uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
+#ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
+ uint16_t local_id;
+#endif
+
+ union ol_txrx_align_mac_addr_t mac_addr;
+
+ /* node in the vdev's list of peers */
+ TAILQ_ENTRY(ol_txrx_peer_t) peer_list_elem;
+ /* node in the hash table bin's list of peers */
+ TAILQ_ENTRY(ol_txrx_peer_t) hash_list_elem;
+
+ /*
+ * per TID info -
+ * stored in separate arrays to avoid alignment padding mem overhead
+ */
+ struct ol_rx_reorder_t tids_rx_reorder[OL_TXRX_NUM_EXT_TIDS];
+ union htt_rx_pn_t tids_last_pn[OL_TXRX_NUM_EXT_TIDS];
+ uint8_t tids_last_pn_valid[OL_TXRX_NUM_EXT_TIDS];
+ uint16_t tids_next_rel_idx[OL_TXRX_NUM_EXT_TIDS];
+ uint16_t tids_last_seq[OL_TXRX_NUM_EXT_TIDS];
+ uint16_t tids_mcast_last_seq[OL_TXRX_NUM_EXT_TIDS];
+
+ struct {
+ enum htt_sec_type sec_type;
+ uint32_t michael_key[2]; /* relevant for TKIP */
+ } security[2]; /* 0 -> multicast, 1 -> unicast */
+
+ /*
+ * rx proc function: this either is a copy of pdev's rx_opt_proc for
+ * regular rx processing, or has been redirected to a /dev/null discard
+ * function when peer deletion is in progress.
+ */
+ void (*rx_opt_proc)(struct ol_txrx_vdev_t *vdev,
+ struct ol_txrx_peer_t *peer,
+ unsigned tid, cdf_nbuf_t msdu_list);
+
+#ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
+ ol_txrx_peer_stats_t stats;
+#endif
+ int16_t rssi_dbm;
+
+ /* NAWDS Flag and Bss Peer bit */
+ uint16_t nawds_enabled:1, bss_peer:1, valid:1;
+
+ /* QoS info */
+ uint8_t qos_capable;
+ /* U-APSD tid mask */
+ uint8_t uapsd_mask;
+ /*flag indicating key installed */
+ uint8_t keyinstalled;
+
+ /* Bit to indicate if PN check is done in fw */
+ cdf_atomic_t fw_pn_check;
+
+#ifdef WLAN_FEATURE_11W
+ /* PN counter for Robust Management Frames */
+ uint64_t last_rmf_pn;
+ uint32_t rmf_pn_replays;
+ uint8_t last_rmf_pn_valid;
+#endif
+
+ /* Properties of the last received PPDU */
+ int16_t last_pkt_rssi_cmb;
+ int16_t last_pkt_rssi[4];
+ uint8_t last_pkt_legacy_rate;
+ uint8_t last_pkt_legacy_rate_sel;
+ uint32_t last_pkt_timestamp_microsec;
+ uint8_t last_pkt_timestamp_submicrosec;
+ uint32_t last_pkt_tsf;
+ uint8_t last_pkt_tid;
+ uint16_t last_pkt_center_freq;
+};
+
+#endif /* _OL_TXRX_TYPES__H_ */
diff --git a/dp/txrx/txrx.h b/dp/txrx/txrx.h
new file mode 100644
index 000000000000..a9808f19be0e
--- /dev/null
+++ b/dp/txrx/txrx.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef TXRX_H
+#define TXRX_H
+
+#include "cds_api.h"
+#include "cdf_nbuf.h"
+#include "csr_api.h"
+#include "sap_api.h"
+#include "cdf_nbuf.h"
+#include "ol_txrx_osif_api.h"
+
+/* wait on peer deletion timeout value in milliseconds */
+#define PEER_DELETION_TIMEOUT 500
+
+enum txrx_wmm_ac {
+ TXRX_WMM_AC_VO,
+ TXRX_WMM_AC_VI,
+ TXRX_WMM_AC_BK,
+ TXRX_WMM_AC_BE,
+
+ TXRX_NUM_WMM_AC
+};
+
+struct txrx_rx_metainfo {
+ u8 up;
+ u16 dest_staid;
+};
+
+enum bt_frame_type {
+ /* BT-AMP packet of type data */
+ TXRX_BT_AMP_TYPE_DATA = 0x0001,
+
+ /* BT-AMP packet of type activity report */
+ TXRX_BT_AMP_TYPE_AR = 0x0002,
+
+ /* BT-AMP packet of type security frame */
+ TXRX_BT_AMP_TYPE_SEC = 0x0003,
+
+ /* BT-AMP packet of type Link Supervision request frame */
+ TXRX_BT_AMP_TYPE_LS_REQ = 0x0004,
+
+ /* BT-AMP packet of type Link Supervision reply frame */
+ TXRX_BT_AMP_TYPE_LS_REP = 0x0005,
+
+ /* Invalid Frame */
+ TXRX_BAP_INVALID_FRAME
+};
+
+enum wlan_ts_direction {
+ /* uplink */
+ WLAN_TX_DIR = 0,
+
+ /* downlink */
+ WLAN_RX_DIR = 1,
+
+ /*bidirectional */
+ WLAN_BI_DIR = 2,
+};
+
+enum wlan_sta_state {
+ /* Transition in this state made upon creation */
+ WLAN_STA_INIT = 0,
+
+ /* Transition happens after Assoc success if second level authentication
+ is needed */
+ WLAN_STA_CONNECTED,
+
+ /* Transition happens when second level auth is successful and keys are
+ properly installed */
+ WLAN_STA_AUTHENTICATED,
+
+ /* Transition happens when connectivity is lost */
+ WLAN_STA_DISCONNECTED,
+
+ WLAN_STA_MAX_STATE
+};
+
+struct wlan_txrx_stats {
+ /* Define various txrx stats here */
+};
+
+struct ol_txrx_vdev_t;
+
+CDF_STATUS wlan_register_mgmt_client(void *pdev_txrx,
+ CDF_STATUS (*rx_mgmt)(void *g_cdsctx,
+ void *buf));
+
+typedef void (*ol_txrx_vdev_delete_cb)(void *context);
+
+/**
+ * @typedef ol_txrx_tx_fp
+ * @brief top-level transmit function
+ */
+typedef cdf_nbuf_t
+(*ol_txrx_tx_fp)(struct ol_txrx_vdev_t *vdev, cdf_nbuf_t msdu_list);
+
+typedef void
+(*ol_txrx_mgmt_tx_cb)(void *ctxt, cdf_nbuf_t tx_mgmt_frm, int had_error);
+
+/* If RSSI realm is changed, send notification to Clients, SME, HDD */
+typedef CDF_STATUS (*wlan_txrx_rssi_cross_thresh)(void *adapter, u8 rssi,
+ void *usr_ctx,
+ int8_t avg_rssi);
+
+struct wlan_txrx_ind_req {
+ u16 msgType; /* message type is same as the request type */
+ u16 msgLen; /* length of the entire request */
+ u8 sessionId; /* sme Session Id */
+ u8 rssiNotification;
+ u8 avgRssi;
+ void *tlCallback;
+ void *pAdapter;
+ void *pUserCtxt;
+};
+
+
+/* Rx callback registered with txrx */
+typedef int (*wlan_txrx_cb_type)(void *g_cdsctx, cdf_nbuf_t buf, u8 sta_id,
+ struct txrx_rx_metainfo *rx_meta_info);
+
+static inline int wlan_txrx_get_rssi(void *g_cdsctx, u8 sta_id, int8_t *rssi)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_enable_uapsd_ac(void *g_cdsctx, u8 sta_id,
+ enum txrx_wmm_ac ac, u8 tid, u8 up,
+ u32 srv_int, u32 suspend_int,
+ enum wlan_ts_direction ts_dir)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_disable_uapsd_ac(void *g_cdsctx, u8 sta_id,
+ enum txrx_wmm_ac ac)
+{
+ return 0;
+}
+
+static inline int wlan_change_sta_state(void *g_cdsctx, u8 sta_id,
+ enum wlan_sta_state state)
+{
+ return 0;
+}
+
+static inline int wlan_deregister_mgmt_client(void *g_cdsctx)
+{
+ return 0;
+}
+
+static inline void wlan_assoc_failed(u8 staid)
+{
+}
+
+static inline int wlan_get_ap_stats(void *g_cdsctx, tSap_SoftapStats *buf,
+ bool reset)
+{
+ return 0;
+}
+
+static inline int wlan_get_txrx_stats(void *g_cdsctx,
+ struct wlan_txrx_stats *stats, u8 sta_id)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_update_rssi_bmps(void *g_cdsctx, u8 sta_id,
+ int8_t rssi)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_deregister_rssi_indcb(void *g_cdsctx,
+ int8_t rssi_val,
+ u8 trigger_event,
+ wlan_txrx_rssi_cross_thresh
+ cb, int mod_id)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_register_rssi_indcb(void *g_cdsctx,
+ int8_t rssi_val,
+ u8 trigger_event,
+ wlan_txrx_rssi_cross_thresh cb,
+ int mod_id, void *usr_ctx)
+{
+ return 0;
+}
+
+/* FIXME: The following stubs will be removed eventually */
+static inline int wlan_txrx_mc_process_msg(void *g_cdsctx, cds_msg_t *msg)
+{
+ return 0;
+}
+
+static inline int wlan_txrx_tx_process_msg(void *g_cdsctx, cds_msg_t *msg)
+{
+ return 0;
+}
+
+static inline void wlan_txrx_mc_free_msg(void *g_cdsctx, cds_msg_t *msg)
+{
+}
+
+static inline void wlan_txrx_tx_free_msg(void *g_cdsctx, cds_msg_t *msg)
+{
+}
+#endif
diff --git a/dp/txrx/wdi_event.h b/dp/txrx/wdi_event.h
new file mode 100644
index 000000000000..d1019eefb093
--- /dev/null
+++ b/dp/txrx/wdi_event.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _WDI_EVENT_H_
+#define _WDI_EVENT_H_
+
+#include "athdefs.h"
+#include "cdf_nbuf.h"
+#define WDI_EVENT_BASE 0x100 /* Event starting number */
+
+enum WDI_EVENT {
+ WDI_EVENT_TX_STATUS = WDI_EVENT_BASE,
+ WDI_EVENT_RX_DESC,
+ WDI_EVENT_RX_DESC_REMOTE,
+ WDI_EVENT_RATE_FIND,
+ WDI_EVENT_RATE_UPDATE,
+ WDI_EVENT_RX_PEER_INVALID,
+ /* End of new event items */
+
+ WDI_EVENT_LAST
+};
+
+struct wdi_event_rx_peer_invalid_msg {
+ cdf_nbuf_t msdu;
+ struct ieee80211_frame *wh;
+ uint8_t vdev_id;
+};
+
+#define WDI_NUM_EVENTS (WDI_EVENT_LAST - WDI_EVENT_BASE)
+
+#define WDI_EVENT_NOTIFY_BASE 0x200
+enum WDI_EVENT_NOTIFY {
+ WDI_EVENT_SUB_DEALLOCATE = WDI_EVENT_NOTIFY_BASE,
+ /* End of new notification types */
+
+ WDI_EVENT_NOTIFY_LAST
+};
+
+/* Opaque event callback */
+typedef void (*wdi_event_cb)(void *pdev, enum WDI_EVENT event, void *data);
+
+/* Opaque event notify */
+typedef void (*wdi_event_notify)(enum WDI_EVENT_NOTIFY notify,
+ enum WDI_EVENT event);
+
+/**
+ * @typedef wdi_event_subscribe
+ * @brief Used by consumers to subscribe to WDI event notifications.
+ * @details
+ * The event_subscribe struct includes pointers to other event_subscribe
+ * objects. These pointers are simply to simplify the management of
+ * lists of event subscribers. These pointers are set during the
+ * event_sub() function, and shall not be modified except by the
+ * WDI event management SW, until after the object's event subscription
+ * is canceled by calling event_unsub().
+ */
+
+typedef struct wdi_event_subscribe_t {
+ wdi_event_cb callback; /* subscriber event callback structure head */
+ void *context; /* subscriber object that processes the event callback */
+ struct {
+ /* private - the event subscriber SW shall not use this struct */
+ struct wdi_event_subscribe_t *next;
+ struct wdi_event_subscribe_t *prev;
+ } priv;
+} wdi_event_subscribe;
+
+#endif
diff --git a/dp/txrx/wdi_event_api.h b/dp/txrx/wdi_event_api.h
new file mode 100644
index 000000000000..5e2ce5471b7b
--- /dev/null
+++ b/dp/txrx/wdi_event_api.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _WDI_EVENT_API_H_
+#define _WDI_EVENT_API_H_
+
+#include "wdi_event.h"
+
+struct ol_txrx_pdev_t;
+
+/**
+ * @brief Subscribe to a specified WDI event.
+ * @details
+ * This function adds the provided wdi_event_subscribe object to a list of
+ * subscribers for the specified WDI event.
+ * When the event in question happens, each subscriber for the event will
+ * have their callback function invoked.
+ * The order in which callback functions from multiple subscribers are
+ * invoked is unspecified.
+ *
+ * @param pdev - the event physical device, that maintains the event lists
+ * @param event_cb_sub - the callback and context for the event subscriber
+ * @param event - which event's notifications are being subscribed to
+ * @return error code, or A_OK for success
+ */
+A_STATUS wdi_event_sub(struct ol_txrx_pdev_t *txrx_pdev,
+ wdi_event_subscribe *event_cb_sub,
+ enum WDI_EVENT event);
+
+/**
+ * @brief Unsubscribe from a specified WDI event.
+ * @details
+ * This function removes the provided event subscription object from the
+ * list of subscribers for its event.
+ * This function shall only be called if there was a successful prior call
+ * to event_sub() on the same wdi_event_subscribe object.
+ *
+ * @param pdev - the event physical device with the list of event subscribers
+ * @param event_cb_sub - the event subscription object
+ * @param event - which event is being unsubscribed
+ * @return error code, or A_OK for success
+ */
+A_STATUS wdi_event_unsub(struct ol_txrx_pdev_t *txrx_pdev,
+ wdi_event_subscribe *event_cb_sub,
+ enum WDI_EVENT event);
+
+#ifdef WDI_EVENT_ENABLE
+
+void wdi_event_handler(enum WDI_EVENT event,
+ struct ol_txrx_pdev_t *txrx_pdev, void *data);
+A_STATUS wdi_event_attach(struct ol_txrx_pdev_t *txrx_pdev);
+A_STATUS wdi_event_detach(struct ol_txrx_pdev_t *txrx_pdev);
+
+#else
+
+static inline void wdi_event_handler(enum WDI_EVENT event,
+ struct ol_txrx_pdev_t *txrx_pdev, void *data)
+{
+ return;
+}
+static inline A_STATUS wdi_event_attach(struct ol_txrx_pdev_t *txrx_pdev)
+{
+ return A_OK;
+}
+static inline A_STATUS wdi_event_detach(struct ol_txrx_pdev_t *txrx_pdev)
+{
+ return A_OK;
+}
+#endif /* WDI_EVENT_ENABLE */
+
+#endif /* _WDI_EVENT_API_H_ */