diff options
| author | Houston Hoffman <hhoffman@codeaurora.org> | 2017-02-24 17:37:46 -0800 |
|---|---|---|
| committer | qcabuildsw <qcabuildsw@localhost> | 2017-02-27 21:45:25 -0800 |
| commit | 233d254a95aaf2d1b06ebf384203da350b5dccb2 (patch) | |
| tree | c175488828e54648713e39ed843c3deb8bb9ad60 | |
| parent | d3784b6cbb4388cac8d8c69b8bc358db2803ec8a (diff) | |
qcacmn: Improve recv buffer refill failure logging
Make it easier to identify when nbuf allocation starvation
in hif is the root cause for a crash, by adding events to
the hif_ce_desc_history buffer.
Change-Id: Id3b919f17ee79770f5fa81b389f9a28a0670cfa2
CRs-Fixed: 2011015
| -rw-r--r-- | hif/src/ce/ce_internal.h | 4 | ||||
| -rw-r--r-- | hif/src/ce/ce_main.c | 73 |
2 files changed, 47 insertions, 30 deletions
diff --git a/hif/src/ce/ce_internal.h b/hif/src/ce/ce_internal.h index c9da783c3484..c0244b07da8d 100644 --- a/hif/src/ce/ce_internal.h +++ b/hif/src/ce/ce_internal.h @@ -359,6 +359,10 @@ enum hif_ce_event_type { NAPI_POLL_ENTER, NAPI_COMPLETE, NAPI_POLL_EXIT, + + HIF_RX_NBUF_ALLOC_FAILURE = 0x20, + HIF_RX_NBUF_MAP_FAILURE, + HIF_RX_NBUF_ENQUEUE_FAILURE, }; void ce_init_ce_desc_event_log(int ce_id, int size); diff --git a/hif/src/ce/ce_main.c b/hif/src/ce/ce_main.c index 52e745d454eb..cdcb6eeb9e69 100644 --- a/hif/src/ce/ce_main.c +++ b/hif/src/ce/ce_main.c @@ -1639,6 +1639,34 @@ void hif_dump_pipe_debug_count(struct hif_softc *scn) } } +static void hif_post_recv_buffers_failure(struct HIF_CE_pipe_info *pipe_info, + void *nbuf, uint32_t *error_cnt, + enum hif_ce_event_type failure_type, + const char *failure_type_string) +{ + int bufs_needed_tmp = atomic_inc_return(&pipe_info->recv_bufs_needed); + struct CE_state *CE_state = (struct CE_state *)pipe_info->ce_hdl; + struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state); + int ce_id = CE_state->id; + uint32_t error_cnt_tmp; + + qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); + error_cnt_tmp = ++(*error_cnt); + qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); + HIF_ERROR("%s: pipe_num %d, needed %d, err_cnt = %u, fail_type = %s", + __func__, pipe_info->pipe_num, bufs_needed_tmp, error_cnt_tmp, + failure_type_string); + hif_record_ce_desc_event(scn, ce_id, failure_type, + NULL, nbuf, bufs_needed_tmp); + /* if we fail to allocate the last buffer for an rx pipe, + * there is no trigger to refill the ce and we will + * eventually crash + */ + if (bufs_needed_tmp == CE_state->dest_ring->nentries - 1) + QDF_ASSERT(0); +} + + static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) { struct CE_handle *ce_hdl; @@ -1666,16 +1694,10 @@ static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false); if (!nbuf) { - qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); - pipe_info->nbuf_alloc_err_count++; - qdf_spin_unlock_bh( - &pipe_info->recv_bufs_needed_lock); - HIF_ERROR( - "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u", - __func__, pipe_info->pipe_num, - atomic_read(&pipe_info->recv_bufs_needed), - pipe_info->nbuf_alloc_err_count); - atomic_inc(&pipe_info->recv_bufs_needed); + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_alloc_err_count, + HIF_RX_NBUF_ALLOC_FAILURE, + "HIF_RX_NBUF_ALLOC_FAILURE"); return 1; } @@ -1689,16 +1711,11 @@ static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) QDF_DMA_FROM_DEVICE); if (unlikely(ret != QDF_STATUS_SUCCESS)) { - qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); - pipe_info->nbuf_dma_err_count++; - qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); - HIF_ERROR( - "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u", - __func__, pipe_info->pipe_num, - atomic_read(&pipe_info->recv_bufs_needed), - pipe_info->nbuf_dma_err_count); + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_dma_err_count, + HIF_RX_NBUF_MAP_FAILURE, + "HIF_RX_NBUF_MAP_FAILURE"); qdf_nbuf_free(nbuf); - atomic_inc(&pipe_info->recv_bufs_needed); return 1; } @@ -1708,18 +1725,14 @@ static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) buf_sz, DMA_FROM_DEVICE); status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data); QDF_ASSERT(status == QDF_STATUS_SUCCESS); - if (status != EOK) { - qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock); - pipe_info->nbuf_ce_enqueue_err_count++; - qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); - HIF_ERROR( - "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u", - __func__, pipe_info->pipe_num, - atomic_read(&pipe_info->recv_bufs_needed), - pipe_info->nbuf_ce_enqueue_err_count); + if (unlikely(status != EOK)) { + hif_post_recv_buffers_failure(pipe_info, nbuf, + &pipe_info->nbuf_ce_enqueue_err_count, + HIF_RX_NBUF_ENQUEUE_FAILURE, + "HIF_RX_NBUF_ENQUEUE_FAILURE"); + qdf_nbuf_unmap_single(scn->qdf_dev, nbuf, QDF_DMA_FROM_DEVICE); - atomic_inc(&pipe_info->recv_bufs_needed); qdf_nbuf_free(nbuf); return 1; } @@ -1735,7 +1748,7 @@ static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info) pipe_info->nbuf_dma_err_count - bufs_posted : 0; pipe_info->nbuf_ce_enqueue_err_count = (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ? - pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; + pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0; qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock); |
