summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKomal Seelam <kseelam@qti.qualcomm.com>2014-06-13 11:13:10 +0530
committerPitani Venkata Rajesh Kumar <c_vpitan@qti.qualcomm.com>2014-06-15 00:39:52 +0530
commit0676b09998a6d2ca3a628eee6478bcb4a4e6d599 (patch)
tree02f412d8f78e98a608eefc4dedb5dab48497b820
parentab0d891925557b699f79a2c709bd058af767dd3f (diff)
qcald: Fix Race b/wn hif_pci_suspend & WMI_TX_PAUSE_EVENTID evt_handler
Target asserted on processing HTT packet after wmi WOW_ENABLE command is processed. The timestamp of host sending WMI_WOW_ENABLE_CMDID to FW and receiving WMI_TX_PAUSE_EVENTID is exactly same. Host queued the pending HTT packet in ol tx queues to FW after sending wow_enable wmi command to FW resulted target assert. There might be a data stall if the queues are paused and we enter hif_pci_suspend and hif_pci_suspend fails and queues are left in pause state. Resume queues in cfg80211 resume as well; the logic ensures that we don't unpause twice as the bit maps are properly maintained. And we can safely unpause queues even the fw last sends the unpause event to host on a vdev. Fix by syncronizing kernel worker thread for WMI_TX_PAUSE_EVENTID event handler with hif_pci_suspend. Change-Id: I18392bdca3f547a49da0be80965e14e046c8d3cb CRs-Fixed: 679197
-rw-r--r--CORE/SERVICES/WMA/wma.c74
-rw-r--r--CORE/SERVICES/WMA/wma.h1
2 files changed, 54 insertions, 21 deletions
diff --git a/CORE/SERVICES/WMA/wma.c b/CORE/SERVICES/WMA/wma.c
index e7b300a8be52..fdc63dcc1728 100644
--- a/CORE/SERVICES/WMA/wma.c
+++ b/CORE/SERVICES/WMA/wma.c
@@ -3316,10 +3316,9 @@ VOS_STATUS WDA_open(v_VOID_t *vos_context, v_VOID_t *os_ctx,
INIT_LIST_HEAD(&wma_handle->vdev_resp_queue);
adf_os_spinlock_init(&wma_handle->vdev_respq_lock);
-
- adf_os_spinlock_init(&wma_handle->vdev_detach_lock);
-
- adf_os_spinlock_init(&wma_handle->roam_preauth_lock);
+ adf_os_spinlock_init(&wma_handle->vdev_detach_lock);
+ adf_os_spinlock_init(&wma_handle->roam_preauth_lock);
+ adf_os_atomic_init(&wma_handle->is_wow_bus_suspended);
/* Register vdev start response event handler */
wmi_unified_register_event_handler(wma_handle->wmi_handle,
@@ -13469,6 +13468,16 @@ static int wma_wow_wakeup_host_event(void *handle, u_int8_t *event,
return 0;
}
+static inline void wma_set_wow_bus_suspend(tp_wma_handle wma, int val) {
+
+ adf_os_atomic_set(&wma->is_wow_bus_suspended, val);
+}
+
+static inline int wma_get_wow_bus_suspend(tp_wma_handle wma) {
+
+ return adf_os_atomic_read(&wma->is_wow_bus_suspended);
+}
+
/* Configures wow wakeup events. */
static VOS_STATUS wma_add_wow_wakeup_event(tp_wma_handle wma,
WOW_WAKE_EVENT_TYPE event,
@@ -13994,6 +14003,28 @@ static bool wma_is_wow_prtn_cached(tp_wma_handle wma, u_int8_t vdev_id)
return false;
}
+/* Unpause all the vdev after resume */
+static void wma_unpause_vdev(tp_wma_handle wma) {
+ int8_t vdev_id;
+ struct wma_txrx_node *iface;
+
+ for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
+ if (!wma->interfaces[vdev_id].handle)
+ continue;
+
+ #ifdef QCA_SUPPORT_TXRX_VDEV_PAUSE_LL
+ /* When host resume, by default, unpause all active vdev */
+ if (wma->interfaces[vdev_id].pause_bitmap) {
+ wdi_in_vdev_unpause(wma->interfaces[vdev_id].handle);
+ wma->interfaces[vdev_id].pause_bitmap = 0;
+ }
+ #endif /* QCA_SUPPORT_TXRX_VDEV_PAUSE_LL */
+
+ iface = &wma->interfaces[vdev_id];
+ iface->conn_state = FALSE;
+ }
+}
+
static VOS_STATUS wma_resume_req(tp_wma_handle wma)
{
VOS_STATUS ret = VOS_STATUS_SUCCESS;
@@ -14017,6 +14048,10 @@ static VOS_STATUS wma_resume_req(tp_wma_handle wma)
}
end:
+ /* need to reset if hif_pci_suspend_fails */
+ wma_set_wow_bus_suspend(wma, 0);
+ /* unpause the vdev if left paused and hif_pci_suspend fails */
+ wma_unpause_vdev(wma);
return ret;
}
@@ -14461,6 +14496,11 @@ enable_wow:
send_ready_to_suspend:
wma_send_status_to_suspend_ind(wma, TRUE);
+ /* to handle race between hif_pci_suspend and
+ * unpause/pause tx handler
+ */
+ wma_set_wow_bus_suspend(wma, 1);
+
return VOS_STATUS_SUCCESS;
}
@@ -14529,8 +14569,6 @@ static VOS_STATUS wma_send_host_wakeup_ind_to_fw(tp_wma_handle wma)
int wma_disable_wow_in_fw(WMA_HANDLE handle)
{
tp_wma_handle wma = handle;
- struct wma_txrx_node *iface;
- int8_t vdev_id;
VOS_STATUS ret;
if(!wma->wow.wow_enable || !wma->wow.wow_enable_cmd_sent) {
@@ -14547,21 +14585,10 @@ int wma_disable_wow_in_fw(WMA_HANDLE handle)
wma->wow.wow_enable = FALSE;
wma->wow.wow_enable_cmd_sent = FALSE;
- for (vdev_id = 0; vdev_id < wma->max_bssid; vdev_id++) {
- if (!wma->interfaces[vdev_id].handle)
- continue;
-
- #ifdef QCA_SUPPORT_TXRX_VDEV_PAUSE_LL
- /* When host resume, by default, unpause all active vdev */
- if (wma->interfaces[vdev_id].pause_bitmap) {
- wdi_in_vdev_unpause(wma->interfaces[vdev_id].handle);
- wma->interfaces[vdev_id].pause_bitmap = 0;
- }
- #endif /* QCA_SUPPORT_TXRX_VDEV_PAUSE_LL */
-
- iface = &wma->interfaces[vdev_id];
- iface->conn_state = FALSE;
- }
+ /* To allow the tx pause/unpause events */
+ wma_set_wow_bus_suspend(wma, 0);
+ /* Unpause the vdev as we are resuming */
+ wma_unpause_vdev(wma);
vos_wake_lock_timeout_acquire(&wma->wow_wake_lock, 2000);
@@ -17900,6 +17927,11 @@ static int wma_mcc_vdev_tx_pause_evt_handler(void *handle, u_int8_t *event,
return -EINVAL;
}
+ if (wma_get_wow_bus_suspend(wma)) {
+ WMA_LOGD(" Suspend is in progress: Pause/Unpause Tx is NoOp");
+ return 0;
+ }
+
wmi_event = param_buf->fixed_param;
vdev_map = wmi_event->vdev_map;
/* FW mapped vdev from ID
diff --git a/CORE/SERVICES/WMA/wma.h b/CORE/SERVICES/WMA/wma.h
index 1fdbc50a296c..72748bda046c 100644
--- a/CORE/SERVICES/WMA/wma.h
+++ b/CORE/SERVICES/WMA/wma.h
@@ -646,6 +646,7 @@ typedef struct {
vos_wake_lock_t wow_wake_lock;
int wow_nack;
u_int32_t ap_client_cnt;
+ adf_os_atomic_t is_wow_bus_suspended;
vos_timer_t wma_scan_comp_timer;
scan_timer_info wma_scan_timer_info;