summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustin Shen <chias@qca.qualcomm.com>2014-08-12 16:11:24 +0800
committerPitani Venkata Rajesh Kumar <c_vpitan@qti.qualcomm.com>2014-08-20 13:16:52 +0530
commita115dd6dd547ea54e400782bf815ef4d40dc2dab (patch)
treeda389696dbc546637d0fa66f2a780a5122831423
parent383c25b0c23f7fc9b3ec325de0936f749b61a9ef (diff)
qcacld: Fine tune TX flow control code in HL.
With CONFIG_PER_VDEV_TX_DESC_POOL_HL and netif_stop/netif_wake mechanism there is still packet drop seems in UDP TX MCC testing. The packet drop would affect video quality once play Miracast application and should not be acceptable in customer case. Fine tune the netif_stop code position and pdev->tx_queue.rsrc_threshold_lo value to avoid the tx packet drop possibility. Change-Id: Ic8880e31c71ef689d25cadc9fe5083a2d5fd1a28 CRs-Fixed: 708239
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx.c19
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx.c8
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx.h1
3 files changed, 22 insertions, 6 deletions
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx.c b/CORE/CLD_TXRX/TXRX/ol_tx.c
index ac828d79729c..6d2f7d19eca9 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx.c
@@ -504,10 +504,13 @@ ol_tx_hl_base(
next = adf_nbuf_next(msdu);
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
- if (adf_os_atomic_read(&vdev->tx_desc_count) <= ((ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) >> 1) - 20)) {
- tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, &tx_msdu_info);
- } else {
+ if (adf_os_atomic_read(&vdev->tx_desc_count) >
+ ((ol_tx_desc_pool_size_hl(pdev->ctrl_pdev) >> 1)
+ - TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED)) {
#ifdef QCA_LL_TX_FLOW_CT
+ /* Give tx desc to avoid drop because net_if will stop later */
+ tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, &tx_msdu_info);
+
adf_os_spin_lock_bh(&pdev->tx_mutex);
if ( !(adf_os_atomic_read(&vdev->os_q_paused)) ) {
/* pause netif_queue */
@@ -518,12 +521,16 @@ ol_tx_hl_base(
} else {
adf_os_spin_unlock_bh(&pdev->tx_mutex);
}
-#endif /* QCA_LL_TX_FLOW_CT */
+#else
tx_desc = NULL;
+#endif /* QCA_LL_TX_FLOW_CT */
+ } else {
+ tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, &tx_msdu_info);
}
-#else
+#else /* CONFIG_PER_VDEV_TX_DESC_POOL */
tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, &tx_msdu_info);
-#endif
+#endif /* CONFIG_PER_VDEV_TX_DESC_POOL */
+
if (! tx_desc) {
/*
* If we're out of tx descs, there's no need to try to allocate
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c
index 5ae55c38a080..b39b0c3d25a8 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx.c
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c
@@ -278,8 +278,16 @@ ol_txrx_pdev_attach(
desc_pool_size = ol_tx_desc_pool_size_hl(ctrl_pdev);
adf_os_atomic_init(&pdev->tx_queue.rsrc_cnt);
adf_os_atomic_add(desc_pool_size, &pdev->tx_queue.rsrc_cnt);
+#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
+ /*
+ * 5% margin of unallocated desc is too much for per vdev mechanism.
+ * Define the value seperately.
+ */
+ pdev->tx_queue.rsrc_threshold_lo = TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED;
+#else
/* always maintain a 5% margin of unallocated descriptors */
pdev->tx_queue.rsrc_threshold_lo = (5 * desc_pool_size)/100;
+#endif
/* when freeing up descriptors, keep going until there's a 15% margin */
pdev->tx_queue.rsrc_threshold_hi = (15 * desc_pool_size)/100;
} else {
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.h b/CORE/CLD_TXRX/TXRX/ol_txrx.h
index fb29eca3095e..8109c75cbee7 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx.h
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx.h
@@ -53,6 +53,7 @@ ol_tx_desc_pool_size_hl(ol_pdev_handle ctrl_pdev);
#ifdef CONFIG_PER_VDEV_TX_DESC_POOL
#define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
+#define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
#endif
#endif /* _OL_TXRX__H_ */