diff options
| author | Vasanthakumar Thiagarajan <vthiagar@qti.qualcomm.com> | 2014-02-28 20:27:48 +0530 |
|---|---|---|
| committer | Akash Patel <c_akashp@qca.qualcomm.com> | 2014-03-03 10:38:30 -0800 |
| commit | 08d6bfccab4d0e3f3149fa1ec03fa58cd981ca77 (patch) | |
| tree | 0a90326b5a0d8111f033f6ba18eab70c4aca1bbb | |
| parent | 47d763e360ab71a2c3b52133a6e4477b54b5f6b6 (diff) | |
qcacld/tlshim: Improve downlink performance numbers
Instead of sending one netbuf at a time, send the complete
list received from txrx to voss, this would reduce the overhead
of calling multiple functions for every packet. Also, a little
optimization by not calling tl_shim_flush_rx_frames() for every
packet rx, just check if the cache buffer queue is empty
intead of trying to traverse the list. These changes improve TCP
and UDP D/L peak numbers by upto 40Mbps.
Change-Id: I548ace60a433f1779adde00941ac663a437bc58c
CRs-Fixed: 619730
| -rw-r--r-- | CORE/CLD_TXRX/TLSHIM/tl_shim.c | 117 | ||||
| -rw-r--r-- | CORE/VOSS/src/vos_sched.c | 14 |
2 files changed, 77 insertions, 54 deletions
diff --git a/CORE/CLD_TXRX/TLSHIM/tl_shim.c b/CORE/CLD_TXRX/TLSHIM/tl_shim.c index 17c9260ece85..5b1975f06980 100644 --- a/CORE/CLD_TXRX/TLSHIM/tl_shim.c +++ b/CORE/CLD_TXRX/TLSHIM/tl_shim.c @@ -275,14 +275,15 @@ tlshim_mgmt_over_data_rx_handler_non_interrupt_ctx(pVosContextType pVosGCtx, */ bool tlshim_check_n_process_iapp_frame (pVosContextType pVosGCtx, - adf_nbuf_t msdu, u_int16_t sta_id) + adf_nbuf_t *msdu, u_int16_t sta_id) { - u_int8_t *data = adf_nbuf_data(msdu); + u_int8_t *data; u_int8_t offset_snap_header; struct ol_txrx_pdev_t *pdev = pVosGCtx->pdev_txrx_ctx; struct ol_txrx_peer_t *peer = ol_txrx_peer_find_by_local_id(pVosGCtx->pdev_txrx_ctx, sta_id); struct ol_txrx_vdev_t *vdev = peer->vdev; + adf_nbuf_t new_head = NULL, buf, new_list = NULL, next_buf; /* frame format is natve wifi */ if(pdev->frame_format == wlan_frm_fmt_native_wifi) @@ -290,21 +291,36 @@ tlshim_check_n_process_iapp_frame (pVosContextType pVosGCtx, else offset_snap_header = ETHERNET_HDR_LEN; - if(vos_mem_compare( &data[offset_snap_header], - &AIRONET_SNAP_HEADER[0], LLC_SNAP_SIZE) == VOS_TRUE) { - /* process IAPP frames */ - tlshim_mgmt_over_data_rx_handler_non_interrupt_ctx(pVosGCtx, - msdu, vdev); - /* if returned true: the packet will not be passed to upper layer */ - return true; + buf = *msdu; + while (buf) { + data = adf_nbuf_data(buf); + next_buf = adf_nbuf_queue_next(buf); + if (vos_mem_compare( &data[offset_snap_header], + &AIRONET_SNAP_HEADER[0], LLC_SNAP_SIZE) == VOS_TRUE) { + /* process IAPP frames */ + tlshim_mgmt_over_data_rx_handler_non_interrupt_ctx(pVosGCtx, + buf, vdev); + } else { /* Add the packet onto a new list */ + if (new_list == NULL) + new_head = buf; + else + adf_nbuf_set_next(new_list, buf); + new_list = buf; + adf_nbuf_set_next(buf, NULL); + } + buf = next_buf; } + if (!new_list) + return true; + + *msdu = new_head; /* if returned false the packet will be handled by the upper layer */ return false; } - #endif /* defined(FEATURE_WLAN_CCX) && !defined(FEATURE_WLAN_CCX_UPLOAD) */ + #ifdef QCA_WIFI_ISOC static void tlshim_mgmt_rx_dxe_handler(void *context, adf_nbuf_t buflist) { @@ -657,34 +673,50 @@ static void tl_shim_flush_rx_frames(void *vos_ctx, clear_bit(TLSHIM_FLUSH_CACHE_IN_PROGRESS, &sta_info->flags); } -static VOS_STATUS tlshim_data_rx_cb(struct txrx_tl_shim_ctx *tl_shim, - adf_nbuf_t buf, u_int16_t staid) +static void tlshim_data_rx_cb(struct txrx_tl_shim_ctx *tl_shim, + adf_nbuf_t buf_list, u_int16_t staid) { void *vos_ctx = vos_get_global_context(VOS_MODULE_ID_TL, tl_shim); struct tlshim_sta_info *sta_info; + adf_nbuf_t buf, next_buf; VOS_STATUS ret; - if (!vos_ctx) - return VOS_STATUS_E_FAILURE; + if (unlikely(!vos_ctx)) + goto free_buf; + sta_info = &tl_shim->sta_info[staid]; - if (unlikely(!sta_info->registered)) { - adf_nbuf_free(buf); - return VOS_STATUS_E_FAILURE; - } + if (unlikely(!sta_info->registered)) + goto free_buf; adf_os_spin_lock_bh(&tl_shim->bufq_lock); - sta_info->suspend_flush = 1; - adf_os_spin_unlock_bh(&tl_shim->bufq_lock); + if (!list_empty(&sta_info->cached_bufq)) { + sta_info->suspend_flush = 1; + adf_os_spin_unlock_bh(&tl_shim->bufq_lock); + /* Flush the cached frames to HDD before passing new rx frame */ + tl_shim_flush_rx_frames(vos_ctx, tl_shim, staid, 0); + } else + adf_os_spin_unlock_bh(&tl_shim->bufq_lock); - /* Flush the cached frames to HDD before passing new rx frame */ - tl_shim_flush_rx_frames(vos_ctx, tl_shim, staid, 0); - ret = sta_info->data_rx(vos_ctx, buf, staid); - if (ret != VOS_STATUS_SUCCESS) { - TLSHIM_LOGW("Frame Rx to HDD failed"); + buf = buf_list; + while (buf) { + next_buf = adf_nbuf_queue_next(buf); + ret = sta_info->data_rx(vos_ctx, buf, staid); + if (ret != VOS_STATUS_SUCCESS) { + TLSHIM_LOGE("Frame Rx to HDD failed"); + adf_nbuf_free(buf); + } + buf = next_buf; + } + return; + +free_buf: + TLSHIM_LOGW("%s:Dropping frames", __func__); + buf = buf_list; + while (buf) { + next_buf = adf_nbuf_queue_next(buf); adf_nbuf_free(buf); - return VOS_STATUS_E_FAILURE; + buf = next_buf; } - return VOS_STATUS_SUCCESS; } /* @@ -750,9 +782,6 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, ret = sta_info->data_rx(vos_ctx, rx_buf_list, staid); if (ret == VOS_STATUS_E_INVAL) { #endif - buf = rx_buf_list; - while (buf) { - next_buf = adf_nbuf_queue_next(buf); #if defined(FEATURE_WLAN_CCX) && !defined(FEATURE_WLAN_CCX_UPLOAD) /* @@ -762,11 +791,9 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, * 2) send to PE/LIM * 3) free the involved sk_buff */ - if(tlshim_check_n_process_iapp_frame(vos_ctx, - buf, staid)) { - buf = next_buf; - continue; - } + if (tlshim_check_n_process_iapp_frame(vos_ctx, + &rx_buf_list, staid)) + return; /* * above returned false, the packet was not IAPP. @@ -779,36 +806,30 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, * better use multicores. */ if (!tl_shim->enable_rxthread) { - tlshim_data_rx_cb(tl_shim, buf, staid); + tlshim_data_rx_cb(tl_shim, rx_buf_list, staid); } else { pVosSchedContext sched_ctx = get_vos_sched_ctxt(); struct VosTlshimPkt *pkt; - if (unlikely(!sched_ctx)) { - adf_nbuf_free(buf); - buf = next_buf; - continue; - } + if (unlikely(!sched_ctx)) + goto drop_rx_buf; + pkt = vos_alloc_tlshim_pkt(sched_ctx); if (!pkt) { TLSHIM_LOGW("No available Rx message buffer"); - adf_nbuf_free(buf); - buf = next_buf; - continue; + goto drop_rx_buf; } pkt->callback = (vos_tlshim_cb) tlshim_data_rx_cb; pkt->context = (void *) tl_shim; - pkt->Rxpkt = (void *) buf; + pkt->Rxpkt = (void *) rx_buf_list; pkt->staId = staid; vos_indicate_rxpkt(sched_ctx, pkt); } #else /* QCA_CONFIG_SMP */ - tlshim_data_rx_cb(tl_shim, buf, staid); + tlshim_data_rx_cb(tl_shim, rx_buf_list, staid); #endif /* QCA_CONFIG_SMP */ - buf = next_buf; - } #ifdef IPA_OFFLOAD } #endif diff --git a/CORE/VOSS/src/vos_sched.c b/CORE/VOSS/src/vos_sched.c index ef0eebcfe5a7..7a4f72100998 100644 --- a/CORE/VOSS/src/vos_sched.c +++ b/CORE/VOSS/src/vos_sched.c @@ -1444,6 +1444,7 @@ void vos_drop_rxpkt_by_staid(pVosSchedContext pSchedContext, u_int16_t staId) { struct list_head local_list; struct VosTlshimPkt *pkt, *tmp; + adf_nbuf_t buf, next_buf; INIT_LIST_HEAD(&local_list); spin_lock_bh(&pSchedContext->TlshimRxQLock); @@ -1459,7 +1460,12 @@ void vos_drop_rxpkt_by_staid(pVosSchedContext pSchedContext, u_int16_t staId) list_for_each_entry_safe(pkt, tmp, &local_list, list) { list_del(&pkt->list); - kfree_skb(pkt->Rxpkt); + buf = pkt->Rxpkt; + while (buf) { + next_buf = adf_nbuf_queue_next(buf); + adf_nbuf_free(buf); + buf = next_buf; + } vos_free_tlshim_pkt(pSchedContext, pkt); } } @@ -1485,11 +1491,7 @@ static void vos_rx_from_queue(pVosSchedContext pSchedContext) list_del(&pkt->list); spin_unlock_bh(&pSchedContext->TlshimRxQLock); sta_id = pkt->staId; - if (pkt->callback) - pkt->callback(pkt->context, pkt->Rxpkt, sta_id); - else - kfree_skb(pkt->Rxpkt); - + pkt->callback(pkt->context, pkt->Rxpkt, sta_id); vos_free_tlshim_pkt(pSchedContext, pkt); spin_lock_bh(&pSchedContext->TlshimRxQLock); } |
