diff options
| author | Vasanthakumar Thiagarajan <vthiagar@qti.qualcomm.com> | 2014-03-08 15:48:29 +0530 |
|---|---|---|
| committer | Nandini Suresh <snandini@qca.qualcomm.com> | 2014-03-08 23:00:51 -0800 |
| commit | 0f00b7fb3fef4068c08fd8c8fe6341aa629dd331 (patch) | |
| tree | 031812590f52df8f1a787dc0fb523ea65b2a056e | |
| parent | 4c6523f0890eb6e65975639b51ac72e7dbb32934 (diff) | |
qcacld/tlshim: Fix race in accessing stainfo->data_rx
There will be race in accessing data_rx callback when
WLANTL_ClearSTAClient() is called from MCThread context
and tlshim_data_rx_cb() which is accessing data_rx
in TlshimRxThread context. The device would crash when
data_rx is set to NULL in WLANTL_ClearSTAClient(). Make
sure sta_info->registered and sta_info->data_rx are
atomic by having a spinlock.
Change-Id: I495f8a7c1143e6d2ea177d8c873e6783d344f9ed
CRs-Fixed: 628444
| -rw-r--r-- | CORE/CLD_TXRX/TLSHIM/tl_shim.c | 51 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TLSHIM/tl_shim.h | 2 | ||||
| -rw-r--r-- | CORE/HDD/src/wlan_hdd_softap_tx_rx.c | 1 | ||||
| -rw-r--r-- | CORE/HDD/src/wlan_hdd_tx_rx.c | 1 |
4 files changed, 41 insertions, 14 deletions
diff --git a/CORE/CLD_TXRX/TLSHIM/tl_shim.c b/CORE/CLD_TXRX/TLSHIM/tl_shim.c index db270f9de0f3..d023e49ffacf 100644 --- a/CORE/CLD_TXRX/TLSHIM/tl_shim.c +++ b/CORE/CLD_TXRX/TLSHIM/tl_shim.c @@ -648,10 +648,18 @@ static void tl_shim_flush_rx_frames(void *vos_ctx, struct tlshim_sta_info *sta_info = &tl_shim->sta_info[sta_id]; struct tlshim_buf *cache_buf, *tmp; VOS_STATUS ret; + WLANTL_STARxCBType data_rx = NULL; if (test_and_set_bit(TLSHIM_FLUSH_CACHE_IN_PROGRESS, &sta_info->flags)) return; + adf_os_spin_lock_bh(&sta_info->stainfo_lock); + if (sta_info->registered) + data_rx = sta_info->data_rx; + else + drop = true; + adf_os_spin_unlock_bh(&sta_info->stainfo_lock); + adf_os_spin_lock_bh(&tl_shim->bufq_lock); list_for_each_entry_safe(cache_buf, tmp, &sta_info->cached_bufq, list) { @@ -661,8 +669,7 @@ static void tl_shim_flush_rx_frames(void *vos_ctx, adf_nbuf_free(cache_buf->buf); else { /* Flush the cached frames to HDD */ - ret = sta_info->data_rx(vos_ctx, cache_buf->buf, - sta_id); + ret = data_rx(vos_ctx, cache_buf->buf, sta_id); if (ret != VOS_STATUS_SUCCESS) adf_nbuf_free(cache_buf->buf); } @@ -680,13 +687,19 @@ static void tlshim_data_rx_cb(struct txrx_tl_shim_ctx *tl_shim, struct tlshim_sta_info *sta_info; adf_nbuf_t buf, next_buf; VOS_STATUS ret; + WLANTL_STARxCBType data_rx = NULL; if (unlikely(!vos_ctx)) goto free_buf; sta_info = &tl_shim->sta_info[staid]; - if (unlikely(!sta_info->registered)) + adf_os_spin_lock_bh(&sta_info->stainfo_lock); + if (unlikely(!sta_info->registered)) { + adf_os_spin_unlock_bh(&sta_info->stainfo_lock); goto free_buf; + } + data_rx = sta_info->data_rx; + adf_os_spin_unlock_bh(&sta_info->stainfo_lock); adf_os_spin_lock_bh(&tl_shim->bufq_lock); if (!list_empty(&sta_info->cached_bufq)) { @@ -700,7 +713,7 @@ static void tlshim_data_rx_cb(struct txrx_tl_shim_ctx *tl_shim, buf = buf_list; while (buf) { next_buf = adf_nbuf_queue_next(buf); - ret = sta_info->data_rx(vos_ctx, buf, staid); + ret = data_rx(vos_ctx, buf, staid); if (ret != VOS_STATUS_SUCCESS) { TLSHIM_LOGE("Frame Rx to HDD failed"); adf_nbuf_free(buf); @@ -732,6 +745,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, #endif struct tlshim_sta_info *sta_info; adf_nbuf_t buf, next_buf; + WLANTL_STARxCBType data_rx = NULL; if (staid >= WLAN_MAX_STA_COUNT) { TLSHIM_LOGE("Invalid sta id :%d", staid); @@ -741,12 +755,17 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, tl_shim = (struct txrx_tl_shim_ctx *) context; sta_info = &tl_shim->sta_info[staid]; + adf_os_spin_lock_bh(&sta_info->stainfo_lock); + if (sta_info->registered) + data_rx = sta_info->data_rx; + adf_os_spin_unlock_bh(&sta_info->stainfo_lock); + /* * If there is a data frame from peer before the peer is * registered for data service, enqueue them on to pending queue * which will be flushed to HDD once that station is registered. */ - if (!sta_info->registered) { + if (!data_rx) { struct tlshim_buf *cache_buf; buf = rx_buf_list; while (buf) { @@ -764,7 +783,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, } buf = next_buf; } - } else if (sta_info->data_rx) { /* Send rx packet to HDD if there is no frame pending in cached_bufq */ + } else { /* Send rx packet to HDD if there is no frame pending in cached_bufq */ /* Suspend frames flush from timer */ /* * TODO: Need to see if acquiring/releasing lock even when @@ -779,7 +798,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, /* Flush the cached frames to HDD before passing new rx frame */ tl_shim_flush_rx_frames(vos_ctx, tl_shim, staid, 0); - ret = sta_info->data_rx(vos_ctx, rx_buf_list, staid); + ret = data_rx(vos_ctx, rx_buf_list, staid); if (ret == VOS_STATUS_E_INVAL) { #endif @@ -833,8 +852,7 @@ static void tlshim_data_rx_handler(void *context, u_int16_t staid, #ifdef IPA_OFFLOAD } #endif - } else /* This should not happen if sta_info->registered is true */ - goto drop_rx_buf; + } return; @@ -1442,7 +1460,6 @@ VOS_STATUS WLANTL_ClearSTAClient(void *vos_ctx, u_int8_t sta_id) TLSHIM_LOGE("%s: Failed to get TLSHIM context", __func__); return VOS_STATUS_E_FAILURE; } - tl_shim->sta_info[sta_id].registered = 0; #ifdef QCA_CONFIG_SMP { @@ -1459,7 +1476,10 @@ VOS_STATUS WLANTL_ClearSTAClient(void *vos_ctx, u_int8_t sta_id) tl_shim->sta_info[sta_id].suspend_flush = 0; adf_os_spin_unlock_bh(&tl_shim->bufq_lock); + adf_os_spin_lock_bh(&tl_shim->sta_info[sta_id].stainfo_lock); + tl_shim->sta_info[sta_id].registered = 0; tl_shim->sta_info[sta_id].data_rx = NULL; + adf_os_spin_unlock_bh(&tl_shim->sta_info[sta_id].stainfo_lock); return VOS_STATUS_SUCCESS; } @@ -1479,6 +1499,7 @@ VOS_STATUS WLANTL_RegisterSTAClient(void *vos_ctx, struct txrx_tl_shim_ctx *tl_shim; struct ol_txrx_peer_t *peer; ol_txrx_peer_update_param_t param; + struct tlshim_sta_info *sta_info; ENTER(); if (sta_desc->ucSTAId >= WLAN_MAX_STA_COUNT) { @@ -1496,8 +1517,13 @@ VOS_STATUS WLANTL_RegisterSTAClient(void *vos_ctx, TLSHIM_LOGE("tl_shim is NULL"); return VOS_STATUS_E_FAULT; } - tl_shim->sta_info[sta_desc->ucSTAId].data_rx = rxcb; - tl_shim->sta_info[sta_desc->ucSTAId].registered = true; + + sta_info = &tl_shim->sta_info[sta_desc->ucSTAId]; + adf_os_spin_lock_bh(&sta_info->stainfo_lock); + sta_info->data_rx = rxcb; + sta_info->registered = true; + adf_os_spin_unlock_bh(&sta_info->stainfo_lock); + param.qos_capable = sta_desc->ucQosEnabled; wdi_in_peer_update(peer->vdev, peer->mac_addr.raw, ¶m, ol_txrx_peer_update_qos_capable); @@ -1594,6 +1620,7 @@ VOS_STATUS WLANTL_Open(void *vos_ctx, WLANTL_ConfigInfoType *tl_cfg) for (i = 0; i < WLAN_MAX_STA_COUNT; i++) { tl_shim->sta_info[i].suspend_flush = 0; + adf_os_spinlock_init(&tl_shim->sta_info[i].stainfo_lock); tl_shim->sta_info[i].flags = 0; INIT_LIST_HEAD(&tl_shim->sta_info[i].cached_bufq); } diff --git a/CORE/CLD_TXRX/TLSHIM/tl_shim.h b/CORE/CLD_TXRX/TLSHIM/tl_shim.h index 31084662b82b..4029179e7743 100644 --- a/CORE/CLD_TXRX/TLSHIM/tl_shim.h +++ b/CORE/CLD_TXRX/TLSHIM/tl_shim.h @@ -51,6 +51,8 @@ struct tlshim_sta_info { bool registered; bool suspend_flush; WLANTL_STARxCBType data_rx; + /* To protect stainfo data like registered and data_rx */ + adf_os_spinlock_t stainfo_lock; struct list_head cached_bufq; unsigned long flags; }; diff --git a/CORE/HDD/src/wlan_hdd_softap_tx_rx.c b/CORE/HDD/src/wlan_hdd_softap_tx_rx.c index 880ddb17152c..0579575d32dc 100644 --- a/CORE/HDD/src/wlan_hdd_softap_tx_rx.c +++ b/CORE/HDD/src/wlan_hdd_softap_tx_rx.c @@ -1642,7 +1642,6 @@ VOS_STATUS hdd_softap_rx_packet_cbk(v_VOID_t *vosContext, pAdapter = pHddCtx->sta_to_adapter[staId]; if( NULL == pAdapter ) { - VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } diff --git a/CORE/HDD/src/wlan_hdd_tx_rx.c b/CORE/HDD/src/wlan_hdd_tx_rx.c index cf6b2e84521f..d2ae468b6221 100644 --- a/CORE/HDD/src/wlan_hdd_tx_rx.c +++ b/CORE/HDD/src/wlan_hdd_tx_rx.c @@ -2002,7 +2002,6 @@ VOS_STATUS hdd_rx_packet_cbk(v_VOID_t *vosContext, pAdapter = pHddCtx->sta_to_adapter[staId]; if( NULL == pAdapter ) { - VOS_ASSERT(0); return VOS_STATUS_E_FAILURE; } ++pAdapter->hdd_stats.hddTxRxStats.rxChains; |
