summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYun Park <yunp@codeaurora.org>2018-04-13 16:11:44 -0700
committernshrivas <nshrivas@codeaurora.org>2018-05-15 19:32:53 -0700
commit01868c3403fc96b36daec8943b3a53e03c5554ae (patch)
treead74a26e26153daf81e98aecb34cd67d84052123
parent7c948ef863cfbf530c34a706dbe840493049126b (diff)
qcacld-3.0: Dynamic SMMU map/unmap only when IPA enabled
IPA SMMU mapping for RX buffers is needed only when IPA offload and IPA pipes are enabled. Currently in STA only case where IPA is not enabled SMMU map/unmap is done for RX buffers. So enable SMMU mapping only when IPA pipes are enabled. Change-Id: I88db2cc8606bdf4586644a7ffccd0415f85c8241 CRs-Fixed: 2213795
-rw-r--r--core/dp/htt/htt_rx.c155
-rw-r--r--core/dp/htt/htt_types.h4
-rw-r--r--core/dp/ol/inc/ol_htt_rx_api.h3
-rw-r--r--core/dp/txrx/ol_txrx.c5
-rw-r--r--core/hdd/src/wlan_hdd_ipa.c60
5 files changed, 159 insertions, 68 deletions
diff --git a/core/dp/htt/htt_rx.c b/core/dp/htt/htt_rx.c
index e3410315ddf0..11ee196bdcbc 100644
--- a/core/dp/htt/htt_rx.c
+++ b/core/dp/htt/htt_rx.c
@@ -148,10 +148,15 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
struct htt_rx_hash_bucket **hash_table;
struct htt_list_node *list_iter = NULL;
qdf_mem_info_t mem_map_table = {0};
+ bool ipa_smmu = false;
if (NULL == pdev->rx_ring.hash_table)
return;
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+ pdev->rx_ring.smmu_map)
+ ipa_smmu = true;
+
qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
hash_table = pdev->rx_ring.hash_table;
pdev->rx_ring.hash_table = NULL;
@@ -166,8 +171,7 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
pdev->rx_ring.
listnode_offset);
if (hash_entry->netbuf) {
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
+ if (ipa_smmu) {
qdf_update_mem_map_table(pdev->osdev,
&mem_map_table,
QDF_NBUF_CB_PADDR(
@@ -489,9 +493,14 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
int filled = 0;
int debt_served = 0;
qdf_mem_info_t mem_map_table = {0};
+ bool ipa_smmu = false;
idx = *(pdev->rx_ring.alloc_idx.vaddr);
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+ pdev->rx_ring.smmu_map)
+ ipa_smmu = true;
+
moretofill:
while (num > 0) {
qdf_dma_addr_t paddr, paddr_marked;
@@ -580,8 +589,7 @@ moretofill:
pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
}
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
+ if (ipa_smmu) {
qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
paddr, HTT_RX_BUF_SIZE);
cds_smmu_map_unmap(true, 1, &mem_map_table);
@@ -718,10 +726,16 @@ static inline unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
void htt_rx_detach(struct htt_pdev_t *pdev)
{
+ bool ipa_smmu = false;
+
qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
htt_rx_dbg_rxbuf_deinit(pdev);
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+ pdev->rx_ring.smmu_map)
+ ipa_smmu = true;
+
if (pdev->cfg.is_full_reorder_offload) {
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
sizeof(uint32_t),
@@ -734,30 +748,18 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
htt_rx_hash_deinit(pdev);
} else {
int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
- qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
- uint32_t num_unmapped = 0;
-
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
- mem_map_table = qdf_mem_map_table_alloc(
- pdev->rx_ring.fill_level);
- if (!mem_map_table) {
- qdf_print("%s: Failed to allocate memory for mem map table\n",
- __func__);
- return;
- }
- mem_info = mem_map_table;
- }
+ qdf_mem_info_t mem_map_table = {0};
+
while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
- qdf_update_mem_map_table(pdev->osdev, mem_info,
+ if (ipa_smmu) {
+ qdf_update_mem_map_table(pdev->osdev,
+ &mem_map_table,
QDF_NBUF_CB_PADDR(
pdev->rx_ring.buf.netbufs_ring[
sw_rd_idx]),
HTT_RX_BUF_SIZE);
- mem_info++;
- num_unmapped++;
+ cds_smmu_map_unmap(false, 1,
+ &mem_map_table);
}
#ifdef DEBUG_DMA_DONE
qdf_nbuf_unmap(pdev->osdev,
@@ -777,13 +779,6 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
}
qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
- if (num_unmapped)
- cds_smmu_map_unmap(false, num_unmapped,
- mem_map_table);
- qdf_mem_free(mem_map_table);
- }
}
qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
@@ -2357,9 +2352,9 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
struct htt_host_rx_desc_base *rx_desc;
enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
qdf_dma_addr_t paddr;
- qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
- uint32_t num_unmapped = 0;
+ qdf_mem_info_t mem_map_table = {0};
int ret = 1;
+ bool ipa_smmu = false;
HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
@@ -2375,15 +2370,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
/* Get the total number of MSDUs */
msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
HTT_RX_CHECK_MSDU_COUNT(msdu_count);
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
- mem_map_table = qdf_mem_map_table_alloc(msdu_count);
- if (!mem_map_table) {
- qdf_print("%s: Failed to allocate memory for mem map table\n",
- __func__);
- return 0;
- }
- mem_info = mem_map_table;
- }
+
+ if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+ pdev->rx_ring.smmu_map)
+ ipa_smmu = true;
+
ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
@@ -2394,7 +2385,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
msg_word);
*head_msdu = *tail_msdu = NULL;
ret = 0;
- goto free_mem_map_table;
+ goto end;
}
paddr = htt_rx_in_ord_paddr_get(msg_word);
@@ -2405,17 +2396,15 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
*tail_msdu = NULL;
ret = 0;
pdev->rx_ring.pop_fail_cnt++;
- goto free_mem_map_table;
+ goto end;
}
while (msdu_count > 0) {
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
- pdev->is_ipa_uc_enabled) {
- qdf_update_mem_map_table(pdev->osdev, mem_info,
+ if (ipa_smmu) {
+ qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
QDF_NBUF_CB_PADDR(msdu),
HTT_RX_BUF_SIZE);
- mem_info++;
- num_unmapped++;
+ cds_smmu_map_unmap(false, 1, &mem_map_table);
}
/*
@@ -2496,11 +2485,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
if (!prev) {
*head_msdu = *tail_msdu = NULL;
ret = 0;
- goto free_mem_map_table;
+ goto end;
}
*tail_msdu = prev;
qdf_nbuf_set_next(prev, NULL);
- goto free_mem_map_table;
+ goto end;
} else { /* if this is not the last msdu */
/* get the next msdu */
msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
@@ -2512,7 +2501,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
*tail_msdu = NULL;
ret = 0;
pdev->rx_ring.pop_fail_cnt++;
- goto free_mem_map_table;
+ goto end;
}
/* if this is not the first msdu, update the
@@ -2545,7 +2534,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
*tail_msdu = NULL;
pdev->rx_ring.pop_fail_cnt++;
ret = 0;
- goto free_mem_map_table;
+ goto end;
}
qdf_nbuf_set_next(msdu, next);
prev = msdu;
@@ -2556,13 +2545,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
}
}
-free_mem_map_table:
- if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
- if (num_unmapped)
- cds_smmu_map_unmap(false, num_unmapped,
- mem_map_table);
- qdf_mem_free(mem_map_table);
- }
+end:
return ret;
}
#endif
@@ -3969,6 +3952,59 @@ int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
htt_rx_ipa_uc_free_wdi2_rsc(pdev);
return 0;
}
+
+static int htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
+{
+ uint32_t i;
+ struct htt_rx_hash_entry *hash_entry;
+ struct htt_rx_hash_bucket **hash_table;
+ struct htt_list_node *list_iter = NULL;
+ qdf_mem_info_t mem_map_table = {0};
+
+ qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
+ hash_table = pdev->rx_ring.hash_table;
+
+ for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+ /* Free the hash entries in hash bucket i */
+ list_iter = hash_table[i]->listhead.next;
+ while (list_iter != &hash_table[i]->listhead) {
+ hash_entry =
+ (struct htt_rx_hash_entry *)((char *)list_iter -
+ pdev->rx_ring.
+ listnode_offset);
+ if (hash_entry->netbuf) {
+ qdf_update_mem_map_table(pdev->osdev,
+ &mem_map_table,
+ QDF_NBUF_CB_PADDR(
+ hash_entry->netbuf),
+ HTT_RX_BUF_SIZE);
+ cds_smmu_map_unmap(map, 1, &mem_map_table);
+ }
+ list_iter = list_iter->next;
+ }
+ }
+ qdf_spin_unlock_bh(&(pdev->rx_ring.rx_hash_lock));
+
+ return 0;
+}
+
+int htt_rx_hash_smmu_map_update(struct htt_pdev_t *pdev, bool map)
+{
+ int ret;
+
+ if (NULL == pdev->rx_ring.hash_table)
+ return 0;
+
+ if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
+ return 0;
+
+ qdf_spin_lock_bh(&(pdev->rx_ring.refill_lock));
+ pdev->rx_ring.smmu_map = map;
+ ret = htt_rx_hash_smmu_map(map, pdev);
+ qdf_spin_unlock_bh(&(pdev->rx_ring.refill_lock));
+
+ return ret;
+}
#endif /* IPA_OFFLOAD */
/**
@@ -4021,4 +4057,3 @@ void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
}
pdev->rx_pkt_dump_cb = NULL;
}
-
diff --git a/core/dp/htt/htt_types.h b/core/dp/htt/htt_types.h
index 65ad008dbf4e..f775379c8b69 100644
--- a/core/dp/htt/htt_types.h
+++ b/core/dp/htt/htt_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -359,6 +359,8 @@ struct htt_pdev_t {
qdf_spinlock_t rx_hash_lock;
struct htt_rx_hash_bucket **hash_table;
uint32_t listnode_offset;
+
+ bool smmu_map;
} rx_ring;
#ifdef CONFIG_HL_SUPPORT
int rx_desc_size_hl;
diff --git a/core/dp/ol/inc/ol_htt_rx_api.h b/core/dp/ol/inc/ol_htt_rx_api.h
index 2d6698a9af6c..61e7e9343ea1 100644
--- a/core/dp/ol/inc/ol_htt_rx_api.h
+++ b/core/dp/ol/inc/ol_htt_rx_api.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -889,4 +889,5 @@ htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
uint32_t htt_rx_amsdu_rx_in_order_get_pktlog(qdf_nbuf_t rx_ind_msg);
+int htt_rx_hash_smmu_map_update(struct htt_pdev_t *pdev, bool map);
#endif /* _OL_HTT_RX_API__H_ */
diff --git a/core/dp/txrx/ol_txrx.c b/core/dp/txrx/ol_txrx.c
index 7dee9d7ffd83..8b6adcd73bd4 100644
--- a/core/dp/txrx/ol_txrx.c
+++ b/core/dp/txrx/ol_txrx.c
@@ -5957,3 +5957,8 @@ QDF_STATUS ol_txrx_set_wisa_mode(ol_txrx_vdev_handle vdev, bool enable)
vdev->is_wisa_mode_enable = enable;
return QDF_STATUS_SUCCESS;
}
+
+int ol_txrx_rx_hash_smmu_map(ol_txrx_pdev_handle pdev, bool map)
+{
+ return htt_rx_hash_smmu_map_update(pdev->htt_pdev, map);
+}
diff --git a/core/hdd/src/wlan_hdd_ipa.c b/core/hdd/src/wlan_hdd_ipa.c
index 5fe300b90808..f95ebb0a2e6b 100644
--- a/core/hdd/src/wlan_hdd_ipa.c
+++ b/core/hdd/src/wlan_hdd_ipa.c
@@ -1356,12 +1356,26 @@ static int hdd_ipa_wdi_dereg_intf(struct hdd_ipa_priv *hdd_ipa,
static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
int ret;
+ /* Map IPA SMMU for all Rx hash table */
+ ret = ol_txrx_rx_hash_smmu_map(pdev, true);
+ if (ret) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU map failed ret=%d", ret);
+ return ret;
+ }
+
ret = ipa_wdi_enable_pipes();
if (ret) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
- "ipa_wdi_enable_pipes failed ret=%d", ret);
+ "ipa_wdi_enable_pipes failed ret=%d", ret);
+
+ if (ol_txrx_rx_hash_smmu_map(pdev, false)) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU unmap failed");
+ }
return ret;
}
@@ -1370,12 +1384,21 @@ static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
static int hdd_ipa_wdi_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
int ret;
ret = ipa_wdi_disable_pipes();
if (ret) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
- "ipa_wdi_disable_pipes failed ret=%d", ret);
+ "ipa_wdi_disable_pipes failed ret=%d", ret);
+ return ret;
+ }
+
+ /* Unmap IPA SMMU for all Rx hash table */
+ ret = ol_txrx_rx_hash_smmu_map(pdev, false);
+ if (ret) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU unmap failed");
return ret;
}
@@ -2169,8 +2192,16 @@ static int hdd_ipa_wdi_dereg_intf(struct hdd_ipa_priv *hdd_ipa,
static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
int result;
+ /* Map IPA SMMU for all Rx hash table */
+ result = ol_txrx_rx_hash_smmu_map(pdev, true);
+ if (result) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU map failed ret=%d", result);
+ return result;
+ }
/* ACTIVATE TX PIPE */
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG,
"Enable TX PIPE(tx_pipe_handle=%d)",
@@ -2180,7 +2211,7 @@ static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"Enable TX PIPE fail, code %d",
result);
- return result;
+ goto smmu_unmap;
}
result = ipa_resume_wdi_pipe(hdd_ipa->tx_pipe_handle);
@@ -2188,7 +2219,7 @@ static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"Resume TX PIPE fail, code %d",
result);
- return result;
+ goto smmu_unmap;
}
/* ACTIVATE RX PIPE */
@@ -2200,7 +2231,7 @@ static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"Enable RX PIPE fail, code %d",
result);
- return result;
+ goto smmu_unmap;
}
result = ipa_resume_wdi_pipe(hdd_ipa->rx_pipe_handle);
@@ -2208,14 +2239,23 @@ static int hdd_ipa_wdi_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"Resume RX PIPE fail, code %d",
result);
- return result;
+ goto smmu_unmap;
}
return 0;
+
+smmu_unmap:
+ if (ol_txrx_rx_hash_smmu_map(pdev, false)) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU unmap failed");
+ }
+
+ return result;
}
static int hdd_ipa_wdi_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
{
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
int result;
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "Disable RX PIPE");
@@ -2248,6 +2288,14 @@ static int hdd_ipa_wdi_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
return result;
}
+ /* Unmap IPA SMMU for all Rx hash table */
+ result = ol_txrx_rx_hash_smmu_map(pdev, false);
+ if (result) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
+ "IPA SMMU unmap failed");
+ return result;
+ }
+
return 0;
}