diff options
| author | Mohit Khanna <mkhanna@qca.qualcomm.com> | 2014-03-17 18:12:44 -0700 |
|---|---|---|
| committer | Akash Patel <c_akashp@qca.qualcomm.com> | 2014-04-19 11:05:34 -0700 |
| commit | b56166702bf1e2e60fc36f23dc66bb7a3ec8d29b (patch) | |
| tree | 823dd27147c5f2f776bbe030d4b50cc8f3fb2881 | |
| parent | 240da39ec8fe9356c55d5bd34650ff249bb106a9 (diff) | |
qcacld-new: Data Path related changes to support SDIO based HIF
The following changes were made-
1.Changed #if to #ifdef for ATH_11AC_TXCOMPACT flag.
2.Added checks for high latency during memory allocated (and
de-alocated) in htt_tx_attach (and htt_tx_dettach). SDIO does not need
DMA coherent memory
3.Protection for PCI specific code
Change-Id: I97b0280790b24c9e137a68bfe16b1732f88218d6
| -rw-r--r-- | CORE/CLD_TXRX/HTT/htt_tx.c | 28 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_tx_queue.c | 3 | ||||
| -rw-r--r-- | CORE/SERVICES/HTC/htc.c | 4 |
3 files changed, 25 insertions, 10 deletions
diff --git a/CORE/CLD_TXRX/HTT/htt_tx.c b/CORE/CLD_TXRX/HTT/htt_tx.c index bbcb240ef652..f72deb0b0b9f 100644 --- a/CORE/CLD_TXRX/HTT/htt_tx.c +++ b/CORE/CLD_TXRX/HTT/htt_tx.c @@ -95,9 +95,13 @@ htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems) pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size; - pdev->tx_descs.pool_vaddr = adf_os_mem_alloc_consistent( - pdev->osdev, pool_size, &pool_paddr, - adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); + if (pdev->cfg.is_high_latency) + pdev->tx_descs.pool_vaddr = adf_os_mem_alloc(pdev->osdev, pool_size); + else + pdev->tx_descs.pool_vaddr = + adf_os_mem_alloc_consistent( pdev->osdev, pool_size, &pool_paddr, + adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); + pdev->tx_descs.pool_paddr = pool_paddr; if (!pdev->tx_descs.pool_vaddr) { @@ -129,14 +133,20 @@ htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems) void htt_tx_detach(struct htt_pdev_t *pdev) { - adf_os_mem_free_consistent( - pdev->osdev, - pdev->tx_descs.pool_elems * pdev->tx_descs.size, /* pool_size */ - pdev->tx_descs.pool_vaddr, - pdev->tx_descs.pool_paddr, - adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); + if (pdev){ + if (pdev->cfg.is_high_latency) + adf_os_mem_free(pdev->tx_descs.pool_vaddr); + else + adf_os_mem_free_consistent( + pdev->osdev, + pdev->tx_descs.pool_elems * pdev->tx_descs.size, /* pool_size */ + pdev->tx_descs.pool_vaddr, + pdev->tx_descs.pool_paddr, + adf_os_get_dma_mem_context((&pdev->tx_descs), memctx)); + } } + /*--- descriptor allocation functions ---------------------------------------*/ void * diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c index 4938c5cc33ed..1f655ec15a07 100644 --- a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c +++ b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c @@ -181,7 +181,8 @@ ol_tx_queue_discard( num = pdev->tx_queue.rsrc_threshold_hi - pdev->tx_queue.rsrc_threshold_lo; } - TX_SCHED_DEBUG_PRINT("+%s : %d\n,", __FUNCTION__, pdev->tx_queue.rsrc_cnt); + TX_SCHED_DEBUG_PRINT("+%s : %u\n,", __FUNCTION__, + adf_os_atomic_read(&pdev->tx_queue.rsrc_cnt)); while (num > 0) { discarded = ol_tx_sched_discard_select( pdev, (u_int16_t)num, tx_descs, flush_all); diff --git a/CORE/SERVICES/HTC/htc.c b/CORE/SERVICES/HTC/htc.c index 1496c52f660d..c12267c880e9 100644 --- a/CORE/SERVICES/HTC/htc.c +++ b/CORE/SERVICES/HTC/htc.c @@ -774,6 +774,7 @@ void *htc_get_targetdef(HTC_HANDLE htc_handle) void HTCSetTargetToSleep(void *context) { +#ifdef HIF_PCI #if CONFIG_ATH_PCIE_MAX_PERF == 0 #if CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD struct ol_softc *sc = (struct ol_softc *)context; @@ -781,12 +782,15 @@ void HTCSetTargetToSleep(void *context) HIFSetTargetSleep(sc->hif_hdl, true, false); #endif #endif +#endif } void HTCCancelDeferredTargetSleep(void *context) { +#ifdef HIF_PCI #if CONFIG_ATH_PCIE_MAX_PERF == 0 struct ol_softc *sc = (struct ol_softc *)context; HIFCancelDeferredTargetSleep(sc->hif_hdl); #endif +#endif } |
