diff options
| author | Liangwei Dong <liangwei@qti.qualcomm.com> | 2016-01-29 17:47:13 +0800 |
|---|---|---|
| committer | Anjaneedevi Kapparapu <akappa@codeaurora.org> | 2016-02-05 14:27:43 +0530 |
| commit | 7997757eff68580191a9575b4d7b99aefe318c0d (patch) | |
| tree | 429d4e44fbe022cbe02c516a82b3783aa7581c33 | |
| parent | 96f20e3abb8af105aea61181ff1b905a357736b6 (diff) | |
qcacld-2.0: fix DUT doesn't send EAPOL key 2/4 during roaming.
In HL platform, establish P2P CLI + Station MCC on 5G band,
then start the iperf throughput by 2 UDP streams on
both Station and P2P.
Then tune the attenuation to make the Station
roaming between AP1 and AP2 back and forth.
At some spot, the EAPOL M2 can not be sent out.
It is pending in driver tx queue due to lack of credit.
This solution is to pause the other vdev when MCC is starting or
already started. Then unpause other vdev when Vdev Start resp come
up. It will give current vdev more credit to send out the M2 frame.
Change-Id: I5fec1c63229c3ece98ceb051199c8ded12e7e339
CRs-Fixed: 969288
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_tx_queue.c | 70 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_txrx.c | 4 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_txrx_types.h | 1 | ||||
| -rw-r--r-- | CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h | 46 | ||||
| -rw-r--r-- | CORE/SERVICES/COMMON/wdi_in.h | 2 | ||||
| -rw-r--r-- | CORE/SERVICES/WMA/wma.c | 79 | ||||
| -rw-r--r-- | CORE/SERVICES/WMA/wma.h | 1 |
7 files changed, 197 insertions, 6 deletions
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c index 2385e3455e74..4844e8bbde5c 100644 --- a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c +++ b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c @@ -494,7 +494,7 @@ ol_txrx_throttle_pause(ol_txrx_pdev_handle pdev) pdev->tx_throttle.is_paused = TRUE; adf_os_spin_unlock_bh(&pdev->tx_throttle.mutex); #endif - ol_txrx_pdev_pause(pdev, 0); + ol_txrx_pdev_pause(pdev, OL_TXQ_PAUSE_REASON_THROTTLE); } void @@ -511,7 +511,7 @@ ol_txrx_throttle_unpause(ol_txrx_pdev_handle pdev) pdev->tx_throttle.is_paused = FALSE; adf_os_spin_unlock_bh(&pdev->tx_throttle.mutex); #endif - ol_txrx_pdev_unpause(pdev, 0); + ol_txrx_pdev_unpause(pdev, OL_TXQ_PAUSE_REASON_THROTTLE); } #endif /* defined(CONFIG_HL_SUPPORT) */ @@ -562,6 +562,60 @@ ol_txrx_pdev_unpause(ol_txrx_pdev_handle pdev, u_int32_t reason) } } +#if defined(CONFIG_HL_SUPPORT) +/** + * ol_txrx_pdev_pause_other_vdev() - Suspend all tx data for the specified physical device except + * current vdev. + * @data_pdev: the physical device being paused. + * @reason: pause reason. + * One can provide multiple line descriptions + * for arguments. + * @current_id: do not pause this vdev id queues + * + * This function applies to HL systems - + * in LL systems, applies when txrx_vdev_pause_all is enabled. + * In some cases it is necessary to be able to temporarily + * suspend other vdevs traffic, e.g. to avoid current EAPOL frames credit starvation + * + * Return: None + */ +void +ol_txrx_pdev_pause_other_vdev(ol_txrx_pdev_handle pdev, u_int32_t reason, u_int32_t current_id) +{ + struct ol_txrx_vdev_t *vdev = NULL, *tmp; + + TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { + if (vdev->vdev_id != current_id) { + ol_txrx_vdev_pause(vdev, reason); + } + } +} + +/** + * ol_txrx_pdev_unpause_other_vdev() - Resume tx for the paused vdevs.. + * @data_pdev: the physical device being paused. + * @reason: pause reason. + * @current_id: do not unpause this vdev + * + * This function applies to HL systems - + * in LL systems, applies when txrx_vdev_pause_all is enabled. + * + * + * Return: None + */ +void +ol_txrx_pdev_unpause_other_vdev(ol_txrx_pdev_handle pdev, u_int32_t reason, u_int32_t current_id) +{ + struct ol_txrx_vdev_t *vdev = NULL, *tmp; + + TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) { + if (vdev->vdev_id != current_id) { + ol_txrx_vdev_unpause(vdev, reason); + } + } +} +#endif + #ifdef QCA_BAD_PEER_TX_FLOW_CL /** @@ -972,6 +1026,7 @@ ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, u_int32_t reason) /* use peer_ref_mutex before accessing peer_list */ adf_os_spin_lock_bh(&pdev->peer_ref_mutex); adf_os_spin_lock_bh(&pdev->tx_queue_spinlock); + vdev->hl_paused_reason |= reason; TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { ol_txrx_peer_pause_base(pdev, peer); } @@ -1004,11 +1059,14 @@ ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, u_int32_t reason) /* take peer_ref_mutex before accessing peer_list */ adf_os_spin_lock_bh(&pdev->peer_ref_mutex); adf_os_spin_lock_bh(&pdev->tx_queue_spinlock); + if (vdev->hl_paused_reason & reason) { + vdev->hl_paused_reason &= ~reason; - TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { - int i; - for (i = 0; i < ARRAY_LEN(peer->txqs); i++) { - ol_txrx_peer_tid_unpause_base(pdev, peer, i); + TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) { + int i; + for (i = 0; i < ARRAY_LEN(peer->txqs); i++) { + ol_txrx_peer_tid_unpause_base(pdev, peer, i); + } } } adf_os_spin_unlock_bh(&pdev->tx_queue_spinlock); diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c index f7892e6b7396..e328fd410cec 100644 --- a/CORE/CLD_TXRX/TXRX/ol_txrx.c +++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c @@ -1072,6 +1072,10 @@ ol_txrx_vdev_attach( adf_os_spinlock_init(&vdev->ll_pause.mutex); vdev->ll_pause.paused_reason = 0; +#if defined(CONFIG_HL_SUPPORT) + vdev->hl_paused_reason = 0; +#endif + vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL; vdev->ll_pause.txq.depth = 0; adf_os_timer_init( diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h index 1cc772360204..0e51647e9d72 100644 --- a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h +++ b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h @@ -903,6 +903,7 @@ struct ol_txrx_vdev_t { #if defined(CONFIG_HL_SUPPORT) struct ol_tx_frms_queue_t txqs[OL_TX_VDEV_NUM_QUEUES]; + u_int32_t hl_paused_reason; #endif struct { diff --git a/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h b/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h index 42b169147e1e..77f4f7e0dc86 100644 --- a/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h +++ b/CORE/SERVICES/COMMON/ol_txrx_ctrl_api.h @@ -63,6 +63,8 @@ enum wlan_op_mode { #define OL_TXQ_PAUSE_REASON_TX_ABORT (1 << 2) #define OL_TXQ_PAUSE_REASON_VDEV_STOP (1 << 3) #define OL_TXQ_PAUSE_REASON_VDEV_SUSPEND (1 << 4) +#define OL_TXQ_PAUSE_REASON_MCC_VDEV_START (1 << 5) +#define OL_TXQ_PAUSE_REASON_THROTTLE (1 << 6) /* command options for dumpStats*/ #define WLAN_HDD_STATS 0 @@ -450,6 +452,50 @@ ol_txrx_pdev_unpause(ol_txrx_pdev_handle data_pdev, u_int32_t reason); #endif /* CONFIG_HL_SUPPORT */ /** + * ol_txrx_pdev_pause_other_vdev() - Suspend all tx data for the specified physical device except + * current vdev. + * @data_pdev: the physical device being paused. + * @reason: pause reason. + * One can provide multiple line descriptions + * for arguments. + * @current_id: do not pause this vdev id queues + * + * This function applies to HL systems - + * in LL systems, applies when txrx_vdev_pause_all is enabled. + * In some cases it is necessary to be able to temporarily + * suspend other vdevs traffic, e.g. to avoid current EAPOL frames credit starvation + * + * Return: None + */ +#if defined(CONFIG_HL_SUPPORT) +void +ol_txrx_pdev_pause_other_vdev(ol_txrx_pdev_handle data_pdev, u_int32_t reason, u_int32_t current_id); +#else +#define ol_txrx_pdev_pause_other_vdev(data_pdev,reason,current_id) /* no-op */ +#endif /* CONFIG_HL_SUPPORT */ + +/** + * ol_txrx_pdev_unpause_other_vdev() - Resume tx for the paused vdevs.. + * @data_pdev: the physical device being paused. + * @reason: pause reason. + * @current_id: do not unpause this vdev + * + * This function applies to HL systems - + * in LL systems, applies when txrx_vdev_pause_all is enabled. + * + * + * Return: None + */ +#if defined(CONFIG_HL_SUPPORT) +void +ol_txrx_pdev_unpause_other_vdev(ol_txrx_pdev_handle data_pdev, u_int32_t reason, u_int32_t current_id); +#else +#define ol_txrx_pdev_unpause_other_vdev(data_pdev,reason,current_id) /* no-op */ +#endif /* CONFIG_HL_SUPPORT */ + + + +/** * @brief Synchronize the data-path tx with a control-path target download * @dtails * @param data_pdev - the data-path physical device object diff --git a/CORE/SERVICES/COMMON/wdi_in.h b/CORE/SERVICES/COMMON/wdi_in.h index b6d37f60c60d..9745f46ef327 100644 --- a/CORE/SERVICES/COMMON/wdi_in.h +++ b/CORE/SERVICES/COMMON/wdi_in.h @@ -1201,6 +1201,8 @@ ol_tx_queue_log_display(ol_txrx_pdev_handle pdev); #define wdi_in_tx_release ol_txrx_tx_release #define wdi_in_vdev_pause ol_txrx_vdev_pause #define wdi_in_vdev_unpause ol_txrx_vdev_unpause +#define wdi_in_pdev_pause_other_vdev ol_txrx_pdev_pause_other_vdev +#define wdi_in_pdev_unpause_other_vdev ol_txrx_pdev_unpause_other_vdev #define wdi_in_pdev_pause ol_txrx_pdev_pause #define wdi_in_pdev_unpause ol_txrx_pdev_unpause #define wdi_in_tx_sync ol_txrx_tx_sync diff --git a/CORE/SERVICES/WMA/wma.c b/CORE/SERVICES/WMA/wma.c index b12372ef92b0..3e4afd2422c9 100644 --- a/CORE/SERVICES/WMA/wma.c +++ b/CORE/SERVICES/WMA/wma.c @@ -1112,6 +1112,7 @@ static int wma_vdev_start_resp_handler(void *handle, u_int8_t *cmd_param_info, u_int8_t *buf; vos_msg_t vos_msg = {0}; tp_wma_handle wma = (tp_wma_handle) handle; + ol_txrx_pdev_handle pdev = NULL; WMA_LOGI("%s: Enter", __func__); param_buf = (WMI_VDEV_START_RESP_EVENTID_param_tlvs *) cmd_param_info; @@ -1120,6 +1121,13 @@ static int wma_vdev_start_resp_handler(void *handle, u_int8_t *cmd_param_info, return -EINVAL; } + pdev = vos_get_context(VOS_MODULE_ID_TXRX, wma->vos_context); + + if (pdev == NULL) { + WMA_LOGE("vdev start resp fail as pdev is NULL"); + return -EINVAL; + } + resp_event = param_buf->fixed_param; buf = vos_mem_malloc(sizeof(wmi_vdev_start_response_event_fixed_param)); if (!buf) { @@ -1133,6 +1141,14 @@ static int wma_vdev_start_resp_handler(void *handle, u_int8_t *cmd_param_info, adf_os_spin_unlock_bh(&wma->dfs_ic->chan_lock); } + if (wma->pause_other_vdev_on_mcc_start) { + WMA_LOGD("%s: unpause other vdevs since paused when MCC start", __func__); + wma->pause_other_vdev_on_mcc_start = false; + wdi_in_pdev_unpause_other_vdev(pdev, + OL_TXQ_PAUSE_REASON_MCC_VDEV_START, + resp_event->vdev_id); + } + vos_mem_zero(buf, sizeof(wmi_vdev_start_response_event_fixed_param)); vos_mem_copy(buf, (u_int8_t *)resp_event, sizeof(wmi_vdev_start_response_event_fixed_param)); @@ -8542,6 +8558,38 @@ static bool wma_is_mcc_24G(WMA_HANDLE handle) return false; } +/** + * wma_is_mcc_starting() - Function to check MCC will start or already started + * @handle: WMA handle + * + * This function is used to check MCC will start or already started + * + * Return: True if WMA is in MCC will or already started + * + */ +static bool wma_is_mcc_starting(WMA_HANDLE handle, A_UINT32 starting_mhz) +{ + tp_wma_handle wma_handle = (tp_wma_handle) handle; + int32_t prev_chan = starting_mhz; + int32_t i; + + if (NULL == wma_handle) { + WMA_LOGE("%s: wma_handle is NULL", __func__); + return false; + } + for (i = 0; i < wma_handle->max_bssid; i++) { + if (wma_handle->interfaces[i].handle && + wma_handle->interfaces[i].vdev_up) { + if ((prev_chan != 0 && + prev_chan != wma_handle->interfaces[i].mhz)) + return true; + else + prev_chan = wma_handle->interfaces[i].mhz; + } + } + return false; +} + /* function : wma_get_buf_start_scan_cmd * Description : * Args : @@ -11825,6 +11873,7 @@ VOS_STATUS wma_vdev_start(tp_wma_handle wma, struct wma_txrx_node *intr = wma->interfaces; tpAniSirGlobal pmac = NULL; struct ath_dfs *dfs; + ol_txrx_pdev_handle pdev = NULL; pmac = (tpAniSirGlobal) vos_get_context(VOS_MODULE_ID_PE, wma->vos_context); @@ -11834,6 +11883,13 @@ VOS_STATUS wma_vdev_start(tp_wma_handle wma, return VOS_STATUS_E_FAILURE; } + pdev = vos_get_context(VOS_MODULE_ID_TXRX, wma->vos_context); + + if (pdev == NULL) { + WMA_LOGE("%s: vdev start failed as pdev is NULL", __func__); + return VOS_STATUS_E_FAILURE; + } + dfs = (struct ath_dfs *)wma->dfs_ic->ic_dfs; WMA_LOGD("%s: Enter isRestart=%d vdev=%d", __func__, isRestart,req->vdev_id); @@ -12037,6 +12093,20 @@ VOS_STATUS wma_vdev_start(tp_wma_handle wma, WMI_VDEV_RESTART_REQUEST_CMDID); } else { + + /* + *Need to put other vdevs to pause to avoid current connection + *credit starvation. + */ + if (wma_is_mcc_starting(wma, chan->mhz)) { + WMA_LOGD("%s, vdev_id: %d, pasue other VDEVs when MCC on", + __func__, cmd->vdev_id); + wdi_in_pdev_pause_other_vdev(pdev, + OL_TXQ_PAUSE_REASON_MCC_VDEV_START, + req->vdev_id); + wma->pause_other_vdev_on_mcc_start = true; + } + WMA_LOGD("%s, vdev_id: %d, unpausing tx_ll_queue at VDEV_START", __func__, cmd->vdev_id); wdi_in_vdev_unpause(wma->interfaces[cmd->vdev_id].handle, @@ -12253,6 +12323,15 @@ void wma_vdev_resp_timer(void *data) OL_TXQ_PAUSE_REASON_VDEV_STOP); wma->interfaces[tgt_req->vdev_id].pause_bitmap |= (1 << PAUSE_TYPE_HOST); + + if (wma->pause_other_vdev_on_mcc_start) { + WMA_LOGD("%s: unpause other vdevs since paused when MCC start", __func__); + wma->pause_other_vdev_on_mcc_start = false; + wdi_in_pdev_unpause_other_vdev(pdev, + OL_TXQ_PAUSE_REASON_MCC_VDEV_START, + tgt_req->vdev_id); + } + if (wmi_unified_vdev_stop_send(wma->wmi_handle, tgt_req->vdev_id)) { WMA_LOGP("%s: %d Failed to send vdev stop", __func__, __LINE__); wma_remove_vdev_req(wma, tgt_req->vdev_id, diff --git a/CORE/SERVICES/WMA/wma.h b/CORE/SERVICES/WMA/wma.h index 0e422ebdc203..ce711dcc2311 100644 --- a/CORE/SERVICES/WMA/wma.h +++ b/CORE/SERVICES/WMA/wma.h @@ -855,6 +855,7 @@ typedef struct wma_handle { struct wma_runtime_pm_context runtime_context; uint32_t fine_time_measurement_cap; bool bpf_enabled; + bool pause_other_vdev_on_mcc_start; }t_wma_handle, *tp_wma_handle; struct wma_target_cap { |
