diff options
| author | jitiphil <jitiphil@codeaurora.org> | 2018-07-10 17:45:08 +0530 |
|---|---|---|
| committer | jitiphil <jitiphil@codeaurora.org> | 2018-07-12 12:35:12 +0530 |
| commit | 491b131661759bcbb3a1d4b7fc8d5b77ec24c9be (patch) | |
| tree | d6e5c9846295c5bfac81c7be08217a8cbd5af344 | |
| parent | bb07ae00a1c5e62cf3e56aa92c3d2e60b51f4b71 (diff) | |
qcacld-2.0: Implement descriptor pool for fw stats
The kernel address is used as cookie to keep track
of stats request. This address can be disclosed to
target leading to a security vulnerability.
Implement a FW stats descriptor pool, and use a
descriptor ID to keep track of stats requests,
instead of the kernel address, to prevent
kernel address leak.
Change-Id: Ib49150da899c0b9314f614868a90867f4aa92d3d
CRs-Fixed: 2276007
| -rw-r--r-- | CORE/CLD_TXRX/HTT/htt_h2t.c | 8 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/HTT/htt_t2h.c | 3 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_txrx.c | 210 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_txrx.h | 14 | ||||
| -rw-r--r-- | CORE/CLD_TXRX/TXRX/ol_txrx_types.h | 18 | ||||
| -rw-r--r-- | CORE/SERVICES/COMMON/ol_htt_api.h | 4 | ||||
| -rw-r--r-- | CORE/SERVICES/COMMON/ol_txrx_htt_api.h | 2 |
7 files changed, 240 insertions, 19 deletions
diff --git a/CORE/CLD_TXRX/HTT/htt_h2t.c b/CORE/CLD_TXRX/HTT/htt_h2t.c index 073d45f509e6..bef068525051 100644 --- a/CORE/CLD_TXRX/HTT/htt_h2t.c +++ b/CORE/CLD_TXRX/HTT/htt_h2t.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -567,7 +567,7 @@ htt_h2t_dbg_stats_get( u_int32_t stats_type_reset_mask, u_int8_t cfg_stat_type, u_int32_t cfg_val, - u_int64_t cookie) + u_int8_t cookie) { struct htt_htc_pkt *pkt; adf_nbuf_t msg; @@ -647,11 +647,11 @@ htt_h2t_dbg_stats_get( /* cookie LSBs */ msg_word++; - *msg_word = cookie & 0xffffffff; + *msg_word = cookie; /* cookie MSBs */ msg_word++; - *msg_word = cookie >> 32; + *msg_word = 0; SET_HTC_PACKET_INFO_TX( &pkt->htc_pkt, diff --git a/CORE/CLD_TXRX/HTT/htt_t2h.c b/CORE/CLD_TXRX/HTT/htt_t2h.c index 9d35a8af5c92..d6110c366db8 100644 --- a/CORE/CLD_TXRX/HTT/htt_t2h.c +++ b/CORE/CLD_TXRX/HTT/htt_t2h.c @@ -375,11 +375,10 @@ htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg ) #if TXRX_STATS_LEVEL != TXRX_STATS_LEVEL_OFF case HTT_T2H_MSG_TYPE_STATS_CONF: { - u_int64_t cookie; + u_int8_t cookie; u_int8_t *stats_info_list; cookie = *(msg_word + 1); - cookie |= ((u_int64_t) (*(msg_word + 2))) << 32; stats_info_list = (u_int8_t *) (msg_word + 3); htc_pm_runtime_put(pdev->htc_pdev); diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c index 21efd282a241..2a036175b855 100644 --- a/CORE/CLD_TXRX/TXRX/ol_txrx.c +++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c @@ -383,6 +383,7 @@ ol_txrx_pdev_attach( } TXRX_STATS_INIT(pdev); + ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE); TAILQ_INIT(&pdev->vdev_list); TAILQ_INIT(&pdev->req_list); @@ -888,6 +889,7 @@ htt_attach_fail: ol_txrx_peer_find_detach(pdev); peer_find_attach_fail: + ol_txrx_fw_stats_desc_pool_deinit(pdev); adf_os_mem_free(pdev); ol_attach_fail: @@ -1015,6 +1017,7 @@ ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force) htt_detach(pdev->htt_pdev); ol_txrx_peer_find_detach(pdev); + ol_txrx_fw_stats_desc_pool_deinit(pdev); adf_os_spinlock_destroy(&pdev->tx_mutex); adf_os_spinlock_destroy(&pdev->peer_ref_mutex); @@ -2198,7 +2201,7 @@ ol_txrx_fw_stats_cfg( u_int8_t cfg_stats_type, u_int32_t cfg_val) { - u_int64_t dummy_cookie = 0; + u_int8_t dummy_cookie = 0; htt_h2t_dbg_stats_get( vdev->pdev->htt_pdev, 0 /* upload mask */, @@ -2215,8 +2218,10 @@ ol_txrx_fw_stats_get( bool response_expected) { struct ol_txrx_pdev_t *pdev = vdev->pdev; - u_int64_t cookie; + uint8_t cookie = FW_STATS_DESC_POOL_SIZE; struct ol_txrx_stats_req_internal *non_volatile_req; + struct ol_txrx_fw_stats_desc_t *desc = NULL; + struct ol_txrx_fw_stats_desc_elem_t *elem = NULL; if (!pdev || req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS || @@ -2230,7 +2235,7 @@ ol_txrx_fw_stats_get( * (The one provided as an argument is likely allocated on the stack.) */ non_volatile_req = adf_os_mem_alloc(pdev->osdev, sizeof(*non_volatile_req)); - if (! non_volatile_req) { + if (!non_volatile_req) { return A_NO_MEMORY; } /* copy the caller's specifications */ @@ -2238,10 +2243,16 @@ ol_txrx_fw_stats_get( non_volatile_req->serviced = 0; non_volatile_req->offset = 0; - /* use the non-volatile request object's address as the cookie */ - cookie = OL_TXRX_STATS_PTR_TO_U64(non_volatile_req); - if (response_expected) { + + desc = ol_txrx_fw_stats_desc_alloc(pdev); + if (!desc) { + adf_os_mem_free(non_volatile_req); + return A_ERROR; + } + /* use the desc id as the cookie */ + cookie = desc->desc_id; + desc->req = non_volatile_req; adf_os_spin_lock_bh(&pdev->req_list_spinlock); TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem); pdev->req_list_depth++; @@ -2257,9 +2268,28 @@ ol_txrx_fw_stats_get( { if (response_expected) { adf_os_spin_lock_bh(&pdev->req_list_spinlock); - TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem); + TAILQ_REMOVE(&pdev->req_list, non_volatile_req, + req_list_elem); pdev->req_list_depth--; adf_os_spin_unlock_bh(&pdev->req_list_spinlock); + if (desc) { + adf_os_spin_lock_bh(&pdev-> + ol_txrx_fw_stats_desc_pool. + pool_lock); + desc->req = NULL; + elem = container_of(desc, + struct + ol_txrx_fw_stats_desc_elem_t, + desc); + elem->next = + pdev->ol_txrx_fw_stats_desc_pool. + freelist; + pdev->ol_txrx_fw_stats_desc_pool. + freelist = elem; + adf_os_spin_unlock_bh(&pdev-> + ol_txrx_fw_stats_desc_pool. + pool_lock); + } } adf_os_mem_free(non_volatile_req); @@ -2272,10 +2302,161 @@ ol_txrx_fw_stats_get( return A_OK; } #endif +/** + * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool + * @pdev: handle to ol txrx pdev + * @pool_size: Size of fw stats descriptor pool + * + * Return: 0 for success, error code on failure. + */ +int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev, + uint8_t pool_size) +{ + int i; + + if (!pdev) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: pdev is NULL", __func__); + return -EINVAL; + } + pdev->ol_txrx_fw_stats_desc_pool.pool = adf_os_mem_alloc(pdev->osdev, + pool_size * sizeof(struct ol_txrx_fw_stats_desc_elem_t)); + if (!pdev->ol_txrx_fw_stats_desc_pool.pool) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: failed to allocate desc pool", __func__); + return -ENOMEM; + } + pdev->ol_txrx_fw_stats_desc_pool.freelist = + &pdev->ol_txrx_fw_stats_desc_pool.pool[0]; + pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size; + + for (i = 0; i < (pool_size - 1); i++) { + pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i; + pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL; + pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = + &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1]; + } + pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i; + pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL; + pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL; + adf_os_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized); + adf_os_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1); + return 0; +} + +/** + * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the + * fw stats descriptor pool + * @pdev: handle to ol txrx pdev + * + * Return: None + */ +void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev) +{ + if (!pdev) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: pdev is NULL", __func__); + return; + } + if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: Pool is not initialized", __func__); + return; + } + if (!pdev->ol_txrx_fw_stats_desc_pool.pool) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: Pool is not allocated", __func__); + return; + } + + adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); + adf_os_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0); + adf_os_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool); + pdev->ol_txrx_fw_stats_desc_pool.pool = NULL; + + pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL; + pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0; + adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); +} + +/** + * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats + * free descriptor pool + * @pdev: handle to ol txrx pdev + * + * Return: pointer to fw stats descriptor, NULL on failure + */ +struct ol_txrx_fw_stats_desc_t + *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev) +{ + struct ol_txrx_fw_stats_desc_t *desc = NULL; + + adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); + if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool. + initialized)) { + adf_os_spin_unlock_bh(&pdev-> + ol_txrx_fw_stats_desc_pool.pool_lock); + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: Pool deinitialized", __func__); + return NULL; + } + if (pdev->ol_txrx_fw_stats_desc_pool.freelist) { + desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc; + pdev->ol_txrx_fw_stats_desc_pool.freelist = + pdev->ol_txrx_fw_stats_desc_pool.freelist->next; + } + adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); + + if (desc) { + TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, + "%s: desc_id %d allocated", + __func__, desc->desc_id); + } else { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: fw stats descriptors are exhausted", __func__); + } + return desc; +} + +/** + * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor + * back into free pool + * @pdev: handle to ol txrx pdev + * @fw_stats_desc: fw_stats_desc_get descriptor + * + * Return: pointer to request + */ +struct ol_txrx_stats_req_internal + *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev, + unsigned char desc_id) +{ + struct ol_txrx_fw_stats_desc_elem_t *desc_elem; + struct ol_txrx_stats_req_internal *req; + + adf_os_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); + if (!adf_os_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool. + initialized)) { + adf_os_spin_unlock_bh(&pdev-> + ol_txrx_fw_stats_desc_pool.pool_lock); + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: Desc ID %u Pool deinitialized", + __func__, desc_id); + return NULL; + } + desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id]; + req = desc_elem->desc.req; + desc_elem->desc.req = NULL; + desc_elem->next = + pdev->ol_txrx_fw_stats_desc_pool.freelist; + pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem; + adf_os_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock); + return req; +} + void ol_txrx_fw_stats_handler( ol_txrx_pdev_handle pdev, - u_int64_t cookie, + u_int8_t cookie, u_int8_t *stats_info_list) { enum htt_dbg_stats_type type; @@ -2286,7 +2467,18 @@ ol_txrx_fw_stats_handler( int more = 0; int found = 0; - req = OL_TXRX_U64_TO_STATS_PTR(cookie); + if (cookie >= FW_STATS_DESC_POOL_SIZE) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s: Cookie is not valid", + __func__); + return; + } + req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie); + if (!req) { + TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, + "%s: Request not retrieved for cookie %u", __func__, + (uint8_t)cookie); + return; + } adf_os_spin_lock_bh(&pdev->req_list_spinlock); TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) { diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.h b/CORE/CLD_TXRX/TXRX/ol_txrx.h index 08c1e1757d55..11ed17f255be 100644 --- a/CORE/CLD_TXRX/TXRX/ol_txrx.h +++ b/CORE/CLD_TXRX/TXRX/ol_txrx.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2014,2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2014,2017-2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -57,6 +57,10 @@ void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev); #define OL_TX_DESC_POOL_SIZE_MAX_HL 5000 #endif +#ifndef FW_STATS_DESC_POOL_SIZE +#define FW_STATS_DESC_POOL_SIZE 10 +#endif + #ifdef CONFIG_PER_VDEV_TX_DESC_POOL #define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400 #define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100 @@ -69,5 +73,13 @@ void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev); A_STATUS ol_txrx_get_ll_queue_pause_bitmap(uint8_t vdev_id, uint8_t *pause_bitmap, adf_os_time_t *pause_timestamp); +int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev, + uint8_t pool_size); +void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev); +struct ol_txrx_fw_stats_desc_t + *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t + *pdev); +struct ol_txrx_stats_req_internal *ol_txrx_fw_stats_desc_get_req(struct + ol_txrx_pdev_t *pdev, uint8_t desc_id); #endif /* _OL_TXRX__H_ */ diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h index 70c45f55afa7..9fb239d9f9ad 100644 --- a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h +++ b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h @@ -449,6 +449,16 @@ struct ol_tx_group_credit_stats_t { u_int16_t wrap_around; }; +struct ol_txrx_fw_stats_desc_t { + struct ol_txrx_stats_req_internal *req; + unsigned char desc_id; +}; + +struct ol_txrx_fw_stats_desc_elem_t { + struct ol_txrx_fw_stats_desc_elem_t *next; + struct ol_txrx_fw_stats_desc_t desc; +}; + /* * As depicted in the diagram below, the pdev contains an array of * NUM_EXT_TID ol_tx_active_queues_in_tid_t elements. @@ -554,6 +564,14 @@ struct ol_txrx_pdev_t { adf_os_atomic_t target_tx_credit; adf_os_atomic_t orig_target_tx_credit; + struct { + uint16_t pool_size; + struct ol_txrx_fw_stats_desc_elem_t *pool; + struct ol_txrx_fw_stats_desc_elem_t *freelist; + adf_os_spinlock_t pool_lock; + adf_os_atomic_t initialized; + } ol_txrx_fw_stats_desc_pool; + /* Peer mac address to staid mapping */ struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3]; diff --git a/CORE/SERVICES/COMMON/ol_htt_api.h b/CORE/SERVICES/COMMON/ol_htt_api.h index 222db3d0462a..3dc5d1663ec9 100644 --- a/CORE/SERVICES/COMMON/ol_htt_api.h +++ b/CORE/SERVICES/COMMON/ol_htt_api.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2014, 2016 The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2014, 2016, 2018 The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * @@ -170,7 +170,7 @@ htt_h2t_dbg_stats_get( u_int32_t stats_type_reset_mask, u_int8_t cfg_stats_type, u_int32_t cfg_val, - u_int64_t cookie); + u_int8_t cookie); /** * @brief Get the fields from HTT T2H stats upload message's stats info header diff --git a/CORE/SERVICES/COMMON/ol_txrx_htt_api.h b/CORE/SERVICES/COMMON/ol_txrx_htt_api.h index 941a69276335..6834f7045541 100644 --- a/CORE/SERVICES/COMMON/ol_txrx_htt_api.h +++ b/CORE/SERVICES/COMMON/ol_txrx_htt_api.h @@ -696,7 +696,7 @@ ol_rx_pn_ind_handler( void ol_txrx_fw_stats_handler( ol_txrx_pdev_handle pdev, - u_int64_t cookie, + u_int8_t cookie, u_int8_t *stats_info_list); /** |
