summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2014-12-03 11:46:08 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2014-12-03 11:46:08 -0800
commite698cafc7095cbdacd4e41db448b96f3377a09c0 (patch)
treec8b49ec95d94435ad52cef2ebdd6ddb428cc17b0
parent99910b8558c98b024acae9b99ded37610b4b8414 (diff)
parent28f937f60f9e361e56ccb65cd5dc63bac0b9a7f2 (diff)
Merge "Release 1.0.0.248 QCACLD WLAN Driver"
-rw-r--r--CORE/CLD_TXRX/HTT/htt.h204
-rw-r--r--CORE/CLD_TXRX/HTT/htt_h2t.c26
-rw-r--r--CORE/CLD_TXRX/HTT/htt_internal.h9
-rw-r--r--CORE/CLD_TXRX/HTT/htt_t2h.c15
-rw-r--r--CORE/CLD_TXRX/HTT/htt_tx.c35
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx_classify.c6
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx_queue.c152
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx_queue.h46
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx_sched.c55
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_tx_send.c19
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx.c86
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx_internal.h8
-rw-r--r--CORE/CLD_TXRX/TXRX/ol_txrx_types.h23
-rw-r--r--CORE/HDD/src/wlan_hdd_hostapd.c4
-rw-r--r--CORE/HDD/src/wlan_hdd_oemdata.c306
-rw-r--r--CORE/MAC/inc/qwlan_version.h4
-rw-r--r--CORE/SAP/src/sapModule.c2
-rw-r--r--CORE/SERVICES/BMI/ol_fw.c24
-rw-r--r--CORE/SERVICES/COMMON/ol_txrx_htt_api.h29
-rw-r--r--CORE/SERVICES/HTC/htc_services.c10
-rw-r--r--CORE/SERVICES/WMA/wma.c7
-rw-r--r--CORE/VOSS/src/vos_api.c8
-rw-r--r--Kbuild3
23 files changed, 892 insertions, 189 deletions
diff --git a/CORE/CLD_TXRX/HTT/htt.h b/CORE/CLD_TXRX/HTT/htt.h
index da0fa25c61f5..76f6705d7f48 100644
--- a/CORE/CLD_TXRX/HTT/htt.h
+++ b/CORE/CLD_TXRX/HTT/htt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -82,9 +82,15 @@
* 3.9 Added HTT_T2H CHAN_CHANGE message;
* Allow buffer addresses in bus-address format to be stored as
* either 32 bits or 64 bits.
+ * 3.10 Add optional TLV extensions to the VERSION_REQ and VERSION_CONF
+ * messages to specify which HTT options to use.
+ * Initial TLV options cover:
+ * - whether to use 32 or 64 bits to represent LL bus addresses
+ * - whether to use TX_COMPL_IND or TX_CREDIT_UPDATE_IND in HL systems
+ * - how many tx queue groups to use
*/
#define HTT_CURRENT_VERSION_MAJOR 3
-#define HTT_CURRENT_VERSION_MINOR 9
+#define HTT_CURRENT_VERSION_MINOR 10
#define HTT_NUM_TX_FRAG_DESC 1024
@@ -168,6 +174,178 @@ enum htt_dbg_stats_type {
HTT_DBG_NUM_STATS
};
+/*=== HTT option selection TLVs ===
+ * Certain HTT messages have alternatives or options.
+ * For such cases, the host and target need to agree on which option to use.
+ * Option specification TLVs can be appended to the VERSION_REQ and
+ * VERSION_CONF messages to select options other than the default.
+ * These TLVs are entirely optional - if they are not provided, there is a
+ * well-defined default for each option. If they are provided, they can be
+ * provided in any order. Each TLV can be present or absent independent of
+ * the presence / absence of other TLVs.
+ *
+ * The HTT option selection TLVs use the following format:
+ * |31 16|15 8|7 0|
+ * |---------------------------------+----------------+----------------|
+ * | value (payload) | length | tag |
+ * |-------------------------------------------------------------------|
+ * The value portion need not be only 2 bytes; it can be extended by any
+ * integer number of 4-byte units. The total length of the TLV, including
+ * the tag and length fields, must be a multiple of 4 bytes. The length
+ * field specifies the total TLV size in 4-byte units. Thus, the typical
+ * TLV, with a 1-byte tag field, a 1-byte length field, and a 2-byte value
+ * field, would store 0x1 in its length field, to show that the TLV occupies
+ * a single 4-byte unit.
+ */
+
+/*--- TLV header format - applies to all HTT option TLVs ---*/
+
+enum HTT_OPTION_TLV_TAGS {
+ HTT_OPTION_TLV_TAG_RESERVED0 = 0x0,
+ HTT_OPTION_TLV_TAG_LL_BUS_ADDR_SIZE = 0x1,
+ HTT_OPTION_TLV_TAG_HL_SUPPRESS_TX_COMPL_IND = 0x2,
+ HTT_OPTION_TLV_TAG_MAX_TX_QUEUE_GROUPS = 0x3,
+};
+
+PREPACK struct htt_option_tlv_header_t {
+ A_UINT8 tag;
+ A_UINT8 length;
+} POSTPACK;
+
+#define HTT_OPTION_TLV_TAG_M 0x000000ff
+#define HTT_OPTION_TLV_TAG_S 0
+#define HTT_OPTION_TLV_LENGTH_M 0x0000ff00
+#define HTT_OPTION_TLV_LENGTH_S 8
+/*
+ * value0 - 16 bit value field stored in word0
+ * The TLV's value field may be longer than 2 bytes, in which case
+ * the remainder of the value is stored in word1, word2, etc.
+ */
+#define HTT_OPTION_TLV_VALUE0_M 0xffff0000
+#define HTT_OPTION_TLV_VALUE0_S 16
+
+#define HTT_OPTION_TLV_TAG_SET(word, tag) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_OPTION_TLV_TAG, tag); \
+ (word) |= ((tag) << HTT_OPTION_TLV_TAG_S); \
+ } while (0)
+#define HTT_OPTION_TLV_TAG_GET(word) \
+ (((word) & HTT_OPTION_TLV_TAG_M) >> HTT_OPTION_TLV_TAG_S)
+
+#define HTT_OPTION_TLV_LENGTH_SET(word, tag) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_OPTION_TLV_LENGTH, tag); \
+ (word) |= ((tag) << HTT_OPTION_TLV_LENGTH_S); \
+ } while (0)
+#define HTT_OPTION_TLV_LENGTH_GET(word) \
+ (((word) & HTT_OPTION_TLV_LENGTH_M) >> HTT_OPTION_TLV_LENGTH_S)
+
+#define HTT_OPTION_TLV_VALUE0_SET(word, tag) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_OPTION_TLV_VALUE0, tag); \
+ (word) |= ((tag) << HTT_OPTION_TLV_VALUE0_S); \
+ } while (0)
+#define HTT_OPTION_TLV_VALUE0_GET(word) \
+ (((word) & HTT_OPTION_TLV_VALUE0_M) >> HTT_OPTION_TLV_VALUE0_S)
+
+/*--- format of specific HTT option TLVs ---*/
+
+/*
+ * HTT option TLV for specifying LL bus address size
+ * Some chips require bus addresses used by the target to access buffers
+ * within the host's memory to be 32 bits; others require bus addresses
+ * used by the target to access buffers within the host's memory to be
+ * 64 bits.
+ * The LL_BUS_ADDR_SIZE TLV can be sent from the target to the host as
+ * a suffix to the VERSION_CONF message to specify which bus address format
+ * the target requires.
+ * If this LL_BUS_ADDR_SIZE TLV is not sent by the target, the host should
+ * default to providing bus addresses to the target in 32-bit format.
+ */
+enum HTT_OPTION_TLV_LL_BUS_ADDR_SIZE_VALUES {
+ HTT_OPTION_TLV_LL_BUS_ADDR_SIZE32 = 0x0,
+ HTT_OPTION_TLV_LL_BUS_ADDR_SIZE64 = 0x1,
+};
+PREPACK struct htt_option_tlv_ll_bus_addr_size_t {
+ struct htt_option_tlv_header_t hdr;
+ A_UINT16 ll_bus_addr_size; /* LL_BUS_ADDR_SIZE_VALUES enum */
+} POSTPACK;
+
+/*
+ * HTT option TLV for specifying whether HL systems should indicate
+ * over-the-air tx completion for individual frames, or should instead
+ * send a bulk TX_CREDIT_UPDATE_IND except when the host explicitly
+ * requests an OTA tx completion for a particular tx frame.
+ * This option does not apply to LL systems, where the TX_COMPL_IND
+ * is mandatory.
+ * This option is primarily intended for HL systems in which the tx frame
+ * downloads over the host --> target bus are as slow as or slower than
+ * the transmissions over the WLAN PHY. For cases where the bus is faster
+ * than the WLAN PHY, the target will transmit relatively large A-MPDUs,
+ * and consquently will send one TX_COMPL_IND message that covers several
+ * tx frames. For cases where the WLAN PHY is faster than the bus,
+ * the target will end up transmitting very short A-MPDUs, and consequently
+ * sending many TX_COMPL_IND messages, which each cover a very small number
+ * of tx frames.
+ * The HL_SUPPRESS_TX_COMPL_IND TLV can be sent by the host to the target as
+ * a suffix to the VERSION_REQ message to request whether the host desires to
+ * use TX_CREDIT_UPDATE_IND rather than TX_COMPL_IND. The target can then
+ * send a HTT_SUPPRESS_TX_COMPL_IND TLV to the host as a suffix to the
+ * VERSION_CONF message to confirm whether TX_CREDIT_UPDATE_IND will be used
+ * rather than TX_COMPL_IND. TX_CREDIT_UPDATE_IND shall only be used if the
+ * host sends a HL_SUPPRESS_TX_COMPL_IND TLV requesting use of
+ * TX_CREDIT_UPDATE_IND, and the target sends a HL_SUPPRESS_TX_COMPLE_IND TLV
+ * back to the host confirming use of TX_CREDIT_UPDATE_IND.
+ * Lack of a HL_SUPPRESS_TX_COMPL_IND TLV from either host --> target or
+ * target --> host is equivalent to a HL_SUPPRESS_TX_COMPL_IND that
+ * explicitly specifies HL_ALLOW_TX_COMPL_IND in the value payload of the
+ * TLV.
+ */
+enum HTT_OPTION_TLV_HL_SUPPRESS_TX_COMPL_IND_VALUES {
+ HTT_OPTION_TLV_HL_ALLOW_TX_COMPL_IND = 0x0,
+ HTT_OPTION_TLV_HL_SUPPRESS_TX_COMPL_IND = 0x1,
+};
+PREPACK struct htt_option_tlv_hl_suppress_tx_compl_ind_t {
+ struct htt_option_tlv_header_t hdr;
+ A_UINT16 hl_suppress_tx_compl_ind; /* HL_SUPPRESS_TX_COMPL_IND enum */
+} POSTPACK;
+
+/*
+ * HTT option TLV for specifying how many tx queue groups the target
+ * may establish.
+ * This TLV specifies the maximum value the target may send in the
+ * txq_group_id field of any TXQ_GROUP information elements sent by
+ * the target to the host. This allows the host to pre-allocate an
+ * appropriate number of tx queue group structs.
+ *
+ * The MAX_TX_QUEUE_GROUPS_TLV can be sent from the host to the target as
+ * a suffix to the VERSION_REQ message to specify whether the host supports
+ * tx queue groups at all, and if so if there is any limit on the number of
+ * tx queue groups that the host supports.
+ * The MAX_TX_QUEUE_GROUPS TLV can be sent from the target to the host as
+ * a suffix to the VERSION_CONF message. If the host has specified in the
+ * VER_REQ message a limit on the number of tx queue groups the host can
+ * supprt, the target shall limit its specification of the maximum tx groups
+ * to be no larger than this host-specified limit.
+ *
+ * If the target does not provide a MAX_TX_QUEUE_GROUPS TLV, then the host
+ * shall preallocate 4 tx queue group structs, and the target shall not
+ * specify a txq_group_id larger than 3.
+ */
+enum HTT_OPTION_TLV_MAX_TX_QUEUE_GROUPS_VALUES {
+ HTT_OPTION_TLV_TX_QUEUE_GROUPS_UNSUPPORTED = 0,
+ /*
+ * values 1 through N specify the max number of tx queue groups
+ * the sender supports
+ */
+ HTT_OPTION_TLV_TX_QUEUE_GROUPS_UNLIMITED = 0xffff,
+};
+PREPACK struct htt_option_tlv_mac_tx_queue_groups_t {
+ struct htt_option_tlv_header_t hdr;
+ A_UINT16 max_tx_queue_groups; /* max txq_group_id + 1 */
+} POSTPACK;
+
+
/*=== host -> target messages ===============================================*/
enum htt_h2t_msg_type {
@@ -208,6 +386,17 @@ enum htt_h2t_msg_type {
* |----------------+----------------+----------------+----------------|
* | reserved | msg type |
* |-------------------------------------------------------------------|
+ * : option request TLV (optional) |
+ * :...................................................................:
+ *
+ * The VER_REQ message may consist of a single 4-byte word, or may be
+ * extended with TLVs that specify which HTT options the host is requesting
+ * from the target.
+ * The following option TLVs may be appended to the VER_REQ message:
+ * - HL_SUPPRESS_TX_COMPL_IND
+ * - HL_MAX_TX_QUEUE_GROUPS
+ * These TLVs may appear in an arbitrary order. Any number of these TLVs
+ * may be appended to the VER_REQ message (but only one TLV of each type).
*
* Header fields:
* - MSG_TYPE
@@ -1676,6 +1865,17 @@ enum htt_t2h_msg_type {
* |----------------+----------------+----------------+----------------|
* | reserved | major number | minor number | msg type |
* |-------------------------------------------------------------------|
+ * : option request TLV (optional) |
+ * :...................................................................:
+ *
+ * The VER_CONF message may consist of a single 4-byte word, or may be
+ * extended with TLVs that specify HTT options selected by the target.
+ * The following option TLVs may be appended to the VER_CONF message:
+ * - LL_BUS_ADDR_SIZE
+ * - HL_SUPPRESS_TX_COMPL_IND
+ * - MAX_TX_QUEUE_GROUPS
+ * These TLVs may appear in an arbitrary order. Any number of these TLVs
+ * may be appended to the VER_CONF message (but only one TLV of each type).
*
* Header fields:
* - MSG_TYPE
diff --git a/CORE/CLD_TXRX/HTT/htt_h2t.c b/CORE/CLD_TXRX/HTT/htt_h2t.c
index fc70c99fdfe5..b3ea8cb53c20 100644
--- a/CORE/CLD_TXRX/HTT/htt_h2t.c
+++ b/CORE/CLD_TXRX/HTT/htt_h2t.c
@@ -114,19 +114,30 @@ htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
struct htt_htc_pkt *pkt;
adf_nbuf_t msg;
u_int32_t *msg_word;
+ u_int32_t msg_size;
+ u_int32_t max_tx_group;
pkt = htt_htc_pkt_alloc(pdev);
if (!pkt) {
return A_ERROR; /* failure */
}
+ max_tx_group = OL_TX_GET_MAX_GROUPS(pdev->txrx_pdev);
+
+ if (max_tx_group) {
+ msg_size = HTT_VER_REQ_BYTES +
+ sizeof(struct htt_option_tlv_mac_tx_queue_groups_t);
+ } else {
+ msg_size = HTT_VER_REQ_BYTES;
+ }
+
/* show that this is not a tx frame download (not required, but helpful) */
pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
pkt->pdev_ctxt = NULL; /* not used during send-done callback */
msg = adf_nbuf_alloc(
pdev->osdev,
- HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+ HTT_MSG_BUF_SIZE(msg_size),
/* reserve room for the HTC header */
HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
if (!msg) {
@@ -140,7 +151,7 @@ htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
* separately during the below call to adf_nbuf_push_head.
* The contribution from the HTC header is added separately inside HTC.
*/
- adf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES);
+ adf_nbuf_put_tail(msg, msg_size);
/* fill in the message contents */
msg_word = (u_int32_t *) adf_nbuf_data(msg);
@@ -151,6 +162,17 @@ htt_h2t_ver_req_msg(struct htt_pdev_t *pdev)
*msg_word = 0;
HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
+ if (max_tx_group) {
+ *(msg_word + 1) = 0;
+ /* Fill Group Info */
+ HTT_OPTION_TLV_TAG_SET(*(msg_word+1),
+ HTT_OPTION_TLV_TAG_MAX_TX_QUEUE_GROUPS);
+ HTT_OPTION_TLV_LENGTH_SET(*(msg_word+1),
+ (sizeof(struct htt_option_tlv_mac_tx_queue_groups_t)/
+ sizeof(u_int32_t)));
+ HTT_OPTION_TLV_VALUE0_SET(*(msg_word+1), max_tx_group);
+ }
+
SET_HTC_PACKET_INFO_TX(
&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
diff --git a/CORE/CLD_TXRX/HTT/htt_internal.h b/CORE/CLD_TXRX/HTT/htt_internal.h
index 51fde7416a57..53658fe2fc99 100644
--- a/CORE/CLD_TXRX/HTT/htt_internal.h
+++ b/CORE/CLD_TXRX/HTT/htt_internal.h
@@ -425,4 +425,13 @@ htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
int
htt_tx_credit_update(struct htt_pdev_t *pdev);
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+#define HTT_TX_GROUP_INDEX_OFFSET \
+ (sizeof(struct htt_txq_group) / sizeof(u_int32_t))
+void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word);
+#define HTT_TX_GROUP_CREDIT_PROCESS htt_tx_group_credit_process
+#else
+#define HTT_TX_GROUP_CREDIT_PROCESS(pdev, msg_word) /* no-op */
+#endif
+
#endif /* _HTT_INTERNAL__H_ */
diff --git a/CORE/CLD_TXRX/HTT/htt_t2h.c b/CORE/CLD_TXRX/HTT/htt_t2h.c
index 5f404f3b4f53..796b9f512acd 100644
--- a/CORE/CLD_TXRX/HTT/htt_t2h.c
+++ b/CORE/CLD_TXRX/HTT/htt_t2h.c
@@ -299,6 +299,8 @@ htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg )
ol_tx_target_credit_update(pdev->txrx_pdev, credit_delta);
}
}
+ OL_TX_DESC_UPDATE_GROUP_CREDIT(
+ pdev->txrx_pdev, compl_msg->desc_id, 1, 0);
ol_tx_single_completion_handler(
pdev->txrx_pdev, compl_msg->status, compl_msg->desc_id);
HTT_TX_SCHED(pdev);
@@ -359,9 +361,16 @@ htt_t2h_lp_msg_handler(void *context, adf_nbuf_t htt_t2h_msg )
&pdev->htt_tx_credit.target_delta);
htt_credit_delta = htt_tx_credit_update(pdev);
}
- if (htt_credit_delta) {
- ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta);
- }
+
+ HTT_TX_GROUP_CREDIT_PROCESS(pdev, msg_word);
+ /*
+ * Call ol_tx_credit_completion even if htt_credit_delta is zero,
+ * in case there is some global credit already available, but the
+ * above group credit updates have removed credit restrictions,
+ * possibly allowing the download scheduler to perform a download
+ * even if htt_credit_delta == 0.
+ */
+ ol_tx_credit_completion_handler(pdev->txrx_pdev, htt_credit_delta);
break;
}
diff --git a/CORE/CLD_TXRX/HTT/htt_tx.c b/CORE/CLD_TXRX/HTT/htt_tx.c
index 123f4543535e..d8d97abc6ea8 100644
--- a/CORE/CLD_TXRX/HTT/htt_tx.c
+++ b/CORE/CLD_TXRX/HTT/htt_tx.c
@@ -749,3 +749,38 @@ int htt_tx_credit_update(struct htt_pdev_t *pdev)
}
return credit_delta;
}
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+void htt_tx_group_credit_process(struct htt_pdev_t *pdev, u_int32_t *msg_word)
+{
+ int group_credit_sign;
+ int32_t group_credit;
+ u_int32_t group_credit_abs, vdev_id_mask, ac_mask;
+ u_int8_t group_abs, group_id;
+ u_int8_t group_offset = 0, more_group_present = 0;
+
+ more_group_present = HTT_TX_CREDIT_TXQ_GRP_GET(*msg_word);
+
+ while (more_group_present) {
+ /* Parse the Group Data */
+ group_id = HTT_TXQ_GROUP_ID_GET(*(msg_word+1+group_offset));
+ group_credit_abs =
+ HTT_TXQ_GROUP_CREDIT_COUNT_GET(*(msg_word+1+group_offset));
+ group_credit_sign =
+ HTT_TXQ_GROUP_SIGN_GET(*(msg_word+1+group_offset)) ? -1 : 1;
+ group_credit = group_credit_sign * group_credit_abs;
+ group_abs = HTT_TXQ_GROUP_ABS_GET(*(msg_word+1+group_offset));
+
+ vdev_id_mask =
+ HTT_TXQ_GROUP_VDEV_ID_MASK_GET(*(msg_word+2+group_offset));
+ ac_mask = HTT_TXQ_GROUP_AC_MASK_GET(*(msg_word+2+group_offset));
+
+ ol_txrx_update_tx_queue_groups(pdev->txrx_pdev, group_id,
+ group_credit, group_abs,
+ vdev_id_mask, ac_mask);
+ more_group_present = HTT_TXQ_GROUP_EXT_GET(*(msg_word+1+group_offset));
+ group_offset += HTT_TX_GROUP_INDEX_OFFSET;
+ }
+}
+#endif
+
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_classify.c b/CORE/CLD_TXRX/TXRX/ol_tx_classify.c
index 960d24de34c3..2d8c75249ae5 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_classify.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_classify.c
@@ -513,6 +513,9 @@ ol_tx_classify(
/* Whether this frame can download though HTT2 data pipe or not. */
OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+ /* Update Tx Queue info */
+ tx_desc->txq = txq;
+
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
return txq;
}
@@ -591,6 +594,9 @@ ol_tx_classify_mgmt(
/* Whether this frame can download though HTT2 data pipe or not. */
OL_TX_CLASSIFY_HTT2_EXTENSION(vdev, tx_nbuf, tx_msdu_info);
+ /* Update Tx Queue info */
+ tx_desc->txq = txq;
+
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
return txq;
}
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c
index 65723af694c9..0f2f49508d51 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_queue.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_queue.c
@@ -1235,3 +1235,155 @@ ol_tx_queues_display(struct ol_txrx_pdev_t *pdev)
#endif
#endif /* defined(CONFIG_HL_SUPPORT) */
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+static a_bool_t
+ol_tx_vdev_has_tx_queue_group(
+ struct ol_tx_queue_group_t* group,
+ u_int8_t vdev_id)
+{
+ u_int16_t vdev_bitmap;
+ vdev_bitmap = OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
+ if (OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_bitmap, vdev_id)) {
+ return A_TRUE;
+ }
+ return A_FALSE;
+}
+
+static a_bool_t
+ol_tx_ac_has_tx_queue_group(
+ struct ol_tx_queue_group_t* group,
+ u_int8_t ac)
+{
+ u_int16_t ac_bitmap;
+ ac_bitmap = OL_TXQ_GROUP_AC_MASK_GET(group->membership);
+ if (OL_TXQ_GROUP_AC_BIT_MASK_GET(ac_bitmap, ac)) {
+ return A_TRUE;
+ }
+ return A_FALSE;
+}
+
+u_int32_t ol_tx_txq_group_credit_limit(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int32_t credit)
+{
+ u_int8_t i;
+ /*
+ * If this tx queue belongs to a group, check whether the group's
+ * credit limit is more stringent than the global credit limit.
+ */
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ if (txq->group_ptrs[i]) {
+ u_int32_t group_credit;
+ group_credit = adf_os_atomic_read(&txq->group_ptrs[i]->credit);
+ credit = MIN(credit, group_credit);
+ }
+ }
+ return credit;
+}
+
+void ol_tx_txq_group_credit_update(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int32_t credit,
+ u_int8_t absolute)
+{
+ u_int8_t i;
+ /*
+ * If this tx queue belongs to a group then
+ * update group credit
+ */
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ if (txq->group_ptrs[i]) {
+ ol_txrx_update_group_credit(txq->group_ptrs[i], credit, absolute);
+ }
+ }
+}
+
+void
+ol_tx_set_vdev_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t vdev_id,
+ struct ol_tx_queue_group_t *grp_ptr)
+{
+ struct ol_txrx_vdev_t *vdev = NULL;
+ struct ol_txrx_peer_t *peer = NULL;
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ if (vdev->vdev_id == vdev_id) {
+ u_int8_t i, j;
+ /* update vdev queues group pointers */
+ for (i = 0; i < OL_TX_VDEV_NUM_QUEUES; i++) {
+ for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++) {
+ vdev->txqs[i].group_ptrs[j] = grp_ptr;
+ }
+ }
+ adf_os_spin_lock_bh(&pdev->peer_ref_mutex);
+ /* Update peer queue group pointers */
+ TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+ for (i = 0; i < OL_TX_NUM_TIDS; i++) {
+ for (j = 0; j < OL_TX_MAX_GROUPS_PER_QUEUE; j++) {
+ peer->txqs[i].group_ptrs[j] = grp_ptr;
+ }
+ }
+ }
+ adf_os_spin_unlock_bh(&pdev->peer_ref_mutex);
+ break;
+ }
+ }
+}
+
+void
+ol_tx_txq_set_group_ptr(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_queue_group_t *grp_ptr)
+{
+ u_int8_t i;
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ txq->group_ptrs[i] = grp_ptr;
+ }
+}
+
+void
+ol_tx_set_peer_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ u_int8_t vdev_id,
+ u_int8_t tid)
+{
+ u_int8_t i, j = 0;
+ struct ol_tx_queue_group_t *group = NULL;
+
+ for (i = 0; i < OL_TX_MAX_GROUPS_PER_QUEUE; i++) {
+ peer->txqs[tid].group_ptrs[i] = NULL;
+ }
+ for (i = 0; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+ group = &pdev->txq_grps[i];
+ if (ol_tx_vdev_has_tx_queue_group(group, vdev_id)) {
+ if (tid < OL_TX_NUM_QOS_TIDS) {
+ if (ol_tx_ac_has_tx_queue_group(
+ group, TXRX_TID_TO_WMM_AC(tid))) {
+ peer->txqs[tid].group_ptrs[j] = group;
+ j++;
+ }
+ } else {
+ peer->txqs[tid].group_ptrs[j] = group;
+ j++;
+ }
+ }
+ if (j >= OL_TX_MAX_GROUPS_PER_QUEUE) {
+ break;
+ }
+ }
+}
+
+u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev)
+{
+#ifdef HIF_SDIO
+ return OL_TX_MAX_TXQ_GROUPS;
+#else
+ return 0;
+#endif
+}
+#endif
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_queue.h b/CORE/CLD_TXRX/TXRX/ol_tx_queue.h
index 94ec2e39b821..e73fe2813a91 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_queue.h
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_queue.h
@@ -170,4 +170,50 @@ void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev);
#else
#define ol_tx_throttle_init(pdev) /*no op*/
#endif
+
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+#define OL_TX_IS_TXQ_LAST_SERVICED_QUEUE(pdev, txq) \
+ txq == pdev->tx_sched.last_used_txq
+
+u_int32_t ol_tx_txq_group_credit_limit(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ u_int32_t credit);
+
+void ol_tx_txq_group_credit_update(
+ struct ol_txrx_pdev_t *pdev,
+ struct ol_tx_frms_queue_t *txq,
+ int32_t credit,
+ u_int8_t absolute);
+
+void
+ol_tx_set_vdev_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t vdev_id,
+ struct ol_tx_queue_group_t *grp_ptr);
+
+void
+ol_tx_txq_set_group_ptr(
+ struct ol_tx_frms_queue_t *txq,
+ struct ol_tx_queue_group_t *grp_ptr);
+
+void
+ol_tx_set_peer_group_ptr(
+ ol_txrx_pdev_handle pdev,
+ struct ol_txrx_peer_t *peer,
+ u_int8_t vdev_id,
+ u_int8_t tid);
+
+#define OL_TX_TXQ_GROUP_CREDIT_LIMIT ol_tx_txq_group_credit_limit
+#define OL_TX_TXQ_GROUP_CREDIT_UPDATE ol_tx_txq_group_credit_update
+#define OL_TX_TXQ_SET_GROUP_PTR ol_tx_txq_set_group_ptr
+#define OL_TX_SET_PEER_GROUP_PTR ol_tx_set_peer_group_ptr
+#else
+#define OL_TX_IS_TXQ_LAST_SERVICED_QUEUE(pdev, txq) 0
+#define OL_TX_TXQ_GROUP_CREDIT_LIMIT(pdev, txq, credit) credit
+#define OL_TX_TXQ_GROUP_CREDIT_UPDATE(pdev, txq, credit, absolute) /* no-op */
+#define OL_TX_TXQ_SET_GROUP_PTR(txq,grp_ptr) /* no-op */
+#define OL_TX_SET_PEER_GROUP_PTR(pdev, peer, vdev_id, tid) /* no-op */
+#endif
+
#endif /* _OL_TX_QUEUE__H_ */
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_sched.c b/CORE/CLD_TXRX/TXRX/ol_tx_sched.c
index 8d8de6c7bae8..0314f7d589ab 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_sched.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_sched.c
@@ -658,6 +658,7 @@ ol_tx_sched_select_init_wrr_adv(struct ol_txrx_pdev_t *pdev)
struct ol_tx_sched_wrr_adv_t *scheduler = pdev->tx_sched.scheduler;
/* start selection from the front of the ordered list */
scheduler->index = 0;
+ pdev->tx_sched.last_used_txq = NULL;
}
static void
@@ -711,7 +712,7 @@ ol_tx_sched_select_batch_wrr_adv(
struct ol_tx_frms_queue_t *txq;
int index;
struct ol_tx_sched_wrr_adv_category_info_t *category = NULL;
- int frames, bytes, used_credits;
+ int frames, bytes, used_credits = 0;
/*
* the macro may end up the function if all tx_queue is empty
*/
@@ -782,22 +783,52 @@ ol_tx_sched_select_batch_wrr_adv(
* Take the tx queue from the head of the category list.
*/
txq = TAILQ_FIRST(&category->state.head);
+
if (txq){
TAILQ_REMOVE(&category->state.head, txq, list_elem);
- credit -= category->specs.credit_reserve;
- frames = ol_tx_dequeue(
- pdev, txq, &sctx->head, category->specs.send_limit, &credit, &bytes);
- used_credits = credit;
- category->state.frms -= frames;
- category->state.bytes -= bytes;
- if (txq->frms > 0) {
- TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
+ credit = OL_TX_TXQ_GROUP_CREDIT_LIMIT(pdev, txq, credit);
+ if (credit > category->specs.credit_reserve) {
+ credit -= category->specs.credit_reserve;
+ /*
+ * this tx queue will download some frames,
+ * so update last_used_txq
+ */
+ pdev->tx_sched.last_used_txq = txq;
+
+ frames = ol_tx_dequeue(
+ pdev, txq, &sctx->head, category->specs.send_limit,
+ &credit, &bytes);
+ used_credits = credit;
+ category->state.frms -= frames;
+ category->state.bytes -= bytes;
+ if (txq->frms > 0) {
+ TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
+ } else {
+ if (category->state.frms == 0) {
+ category->state.active = 0;
+ }
+ }
+ sctx->frms += frames;
+ OL_TX_TXQ_GROUP_CREDIT_UPDATE(pdev, txq, -credit, 0);
} else {
- if (category->state.frms == 0) {
- category->state.active = 0;
+ if (OL_TX_IS_TXQ_LAST_SERVICED_QUEUE(pdev, txq)) {
+ /*
+ * The scheduler has looked at all the active tx queues
+ * but none were able to download any of their tx frames.
+ * Nothing is changed, so if none were able to download before,
+ * they wont be able to download now.
+ * Return that no credit has been used, which
+ * will cause the scheduler to stop.
+ */
+ TAILQ_INSERT_HEAD(&category->state.head, txq, list_elem);
+ return 0;
+ } else {
+ TAILQ_INSERT_TAIL(&category->state.head, txq, list_elem);
+ if (!pdev->tx_sched.last_used_txq) {
+ pdev->tx_sched.last_used_txq = txq;
+ }
}
}
- sctx->frms += frames;
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
} else {
used_credits = 0;
diff --git a/CORE/CLD_TXRX/TXRX/ol_tx_send.c b/CORE/CLD_TXRX/TXRX/ol_tx_send.c
index 6fd4792a91ce..f53ee9a90205 100644
--- a/CORE/CLD_TXRX/TXRX/ol_tx_send.c
+++ b/CORE/CLD_TXRX/TXRX/ol_tx_send.c
@@ -57,6 +57,7 @@
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
#include <ol_txrx_encap.h> /* OL_TX_RESTORE_HDR, etc*/
#endif
+#include <ol_tx_queue.h>
#ifdef TX_CREDIT_RECLAIM_SUPPORT
@@ -520,6 +521,10 @@ ol_tx_completion_handler(
tx_desc->status = status;
netbuf = tx_desc->netbuf;
+ if (pdev->cfg.is_high_latency) {
+ OL_TX_DESC_UPDATE_GROUP_CREDIT(pdev, tx_desc_id, 1, 0);
+ }
+
adf_nbuf_trace_update(netbuf, trace_str);
/* Per SDU update of byte count */
byte_cnt += adf_nbuf_len(netbuf);
@@ -568,6 +573,20 @@ ol_tx_completion_handler(
TXRX_STATS_UPDATE_TX_STATS(pdev, status, num_msdus, byte_cnt);
}
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+void
+ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev, u_int16_t tx_desc_id,
+ int credit, u_int8_t absolute)
+{
+ struct ol_tx_desc_t *tx_desc;
+ struct ol_tx_frms_queue_t *txq;
+ union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
+ tx_desc = &td_array[tx_desc_id].tx_desc;
+ txq = (struct ol_tx_frms_queue_t *)(tx_desc->txq);
+ ol_tx_txq_group_credit_update(pdev, txq, credit, absolute);
+}
+#endif
+
/*
* ol_tx_single_completion_handler performs the same tx completion
* processing as ol_tx_completion_handler, but for a single frame.
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx.c b/CORE/CLD_TXRX/TXRX/ol_txrx.c
index 07281feceb6c..227a40498147 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx.c
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx.c
@@ -234,6 +234,83 @@ OL_TXRX_LOCAL_PEER_ID_CLEANUP(struct ol_txrx_pdev_t *pdev)
#define OL_TXRX_LOCAL_PEER_ID_CLEANUP(pdev) /* no-op */
#endif
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+void
+ol_txrx_update_group_credit(
+ struct ol_tx_queue_group_t *group,
+ int32_t credit,
+ u_int8_t absolute)
+{
+ if (absolute) {
+ adf_os_atomic_set(&group->credit, credit);
+ } else {
+ adf_os_atomic_add(credit, &group->credit);
+ }
+}
+
+void
+ol_txrx_update_tx_queue_groups(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t group_id,
+ int32_t credit,
+ u_int8_t absolute,
+ u_int32_t vdev_id_mask,
+ u_int32_t ac_mask
+)
+{
+ struct ol_tx_queue_group_t *group;
+ u_int32_t group_vdev_bit_mask, vdev_bit_mask, group_vdev_id_mask;
+ u_int32_t membership;
+ struct ol_txrx_vdev_t *vdev;
+ group = &pdev->txq_grps[group_id];
+
+ membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask,ac_mask);
+
+ adf_os_spin_lock_bh(&pdev->tx_queue_spinlock);
+ /*
+ * if the membership (vdev id mask and ac mask)
+ * matches then no need to update tx qeue groups.
+ */
+ if (group->membership == membership) {
+ /* Update Credit Only */
+ goto credit_update;
+ }
+
+ /*
+ * membership (vdev id mask and ac mask) is not matching
+ * TODO: ignoring ac mask for now
+ */
+ group_vdev_id_mask =
+ OL_TXQ_GROUP_VDEV_ID_MASK_GET(group->membership);
+
+ TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+ group_vdev_bit_mask =
+ OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(group_vdev_id_mask,vdev->vdev_id);
+ vdev_bit_mask =
+ OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(vdev_id_mask,vdev->vdev_id);
+
+ if (group_vdev_bit_mask != vdev_bit_mask) {
+ /*
+ * Change in vdev tx queue group
+ */
+ if (!vdev_bit_mask) {
+ /* Set Group Pointer (vdev and peer) to NULL */
+ ol_tx_set_vdev_group_ptr(pdev, vdev->vdev_id, NULL);
+ } else {
+ /* Set Group Pointer (vdev and peer) */
+ ol_tx_set_vdev_group_ptr(pdev, vdev->vdev_id, group);
+ }
+ }
+ }
+ /* Update membership */
+ group->membership = membership;
+credit_update:
+ /* Update Credit */
+ ol_txrx_update_group_credit(group, credit, absolute);
+ adf_os_spin_unlock_bh(&pdev->tx_queue_spinlock);
+}
+#endif
+
ol_txrx_pdev_handle
ol_txrx_pdev_attach(
ol_pdev_handle ctrl_pdev,
@@ -294,6 +371,10 @@ ol_txrx_pdev_attach(
/* when freeing up descriptors, keep going until there's a 15% margin */
pdev->tx_queue.rsrc_threshold_hi = (15 * desc_pool_size)/100;
#endif
+ for (i = 0 ; i < OL_TX_MAX_TXQ_GROUPS; i++) {
+ adf_os_atomic_init(&pdev->txq_grps[i].credit);
+ }
+
} else {
/*
* For LL, limit the number of host's tx descriptors to match the
@@ -872,6 +953,7 @@ ol_txrx_vdev_attach(
vdev->txqs[i].flag = ol_tx_queue_empty;
/* aggregation is not applicable for vdev tx queues */
vdev->txqs[i].aggr_state = ol_tx_aggr_disabled;
+ OL_TX_TXQ_SET_GROUP_PTR(&vdev->txqs[i], NULL);
}
}
#endif /* defined(CONFIG_HL_SUPPORT) */
@@ -1094,6 +1176,7 @@ ol_txrx_peer_attach(
#if defined(CONFIG_HL_SUPPORT)
if (ol_cfg_is_high_latency(pdev->ctrl_pdev)) {
+ adf_os_spin_lock_bh(&pdev->tx_queue_spinlock);
for (i = 0; i < OL_TX_NUM_TIDS; i++) {
TAILQ_INIT(&peer->txqs[i].head);
peer->txqs[i].paused_count.total = 0;
@@ -1102,7 +1185,10 @@ ol_txrx_peer_attach(
peer->txqs[i].ext_tid = i;
peer->txqs[i].flag = ol_tx_queue_empty;
peer->txqs[i].aggr_state = ol_tx_aggr_untried;
+ OL_TX_SET_PEER_GROUP_PTR(pdev, peer, vdev->vdev_id, i);
}
+ adf_os_spin_unlock_bh(&pdev->tx_queue_spinlock);
+
/* aggregation is not applicable for mgmt and non-QoS tx queues */
for (i = OL_TX_NUM_QOS_TIDS; i < OL_TX_NUM_TIDS; i++) {
peer->txqs[i].aggr_state = ol_tx_aggr_disabled;
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_internal.h b/CORE/CLD_TXRX/TXRX/ol_txrx_internal.h
index 5191dcd22e44..071a179d6111 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx_internal.h
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx_internal.h
@@ -669,4 +669,12 @@ do {
#define DEBUG_HTT_CREDIT 0
#endif
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+void
+ol_txrx_update_group_credit(
+ struct ol_tx_queue_group_t *group,
+ int32_t credit,
+ u_int8_t absolute);
+#endif
+
#endif /* _OL_TXRX_INTERNAL__H_ */
diff --git a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
index 5559a67da227..94967c62901f 100644
--- a/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
+++ b/CORE/CLD_TXRX/TXRX/ol_txrx_types.h
@@ -154,6 +154,7 @@ struct ol_tx_desc_t {
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
struct ol_txrx_vdev_t* vdev;
#endif
+ void *txq;
};
typedef TAILQ_HEAD(, ol_tx_desc_t) ol_tx_desc_list;
@@ -222,6 +223,19 @@ enum {
ol_tx_aggr_in_progress,
};
+#define OL_TX_MAX_GROUPS_PER_QUEUE 1
+#define OL_TX_MAX_VDEV_ID 16
+#define OL_TXQ_GROUP_VDEV_ID_MASK_GET(_membership) \
+ (((_membership) & 0xffff0000) >> 16)
+#define OL_TXQ_GROUP_VDEV_ID_BIT_MASK_GET(_mask, _vdev_id) \
+ ((_mask >> _vdev_id) & 0x01)
+#define OL_TXQ_GROUP_AC_MASK_GET(_membership) \
+ ((_membership) & 0x0000ffff)
+#define OL_TXQ_GROUP_AC_BIT_MASK_GET(_mask, _ac_mask) \
+ ((_mask >> _ac_mask) & 0x01)
+#define OL_TXQ_GROUP_MEMBERSHIP_GET(_vdev_mask, _ac_mask) \
+ ((_vdev_mask << 16) | _ac_mask)
+
struct ol_tx_frms_queue_t {
/* list_elem -
* Allow individual tx frame queues to be linked together into
@@ -239,6 +253,7 @@ struct ol_tx_frms_queue_t {
u_int32_t bytes;
ol_tx_desc_list head;
enum ol_tx_queue_status flag;
+ struct ol_tx_queue_group_t *group_ptrs[OL_TX_MAX_GROUPS_PER_QUEUE];
};
enum {
@@ -325,6 +340,12 @@ typedef enum _throttle_phase {
typedef void (*ipa_uc_op_cb_type)(u_int8_t *op_msg, void *osif_ctxt);
#endif /* IPA_UC_OFFLOAD */
+struct ol_tx_queue_group_t {
+ adf_os_atomic_t credit;
+ u_int32_t membership;
+};
+#define OL_TX_MAX_TXQ_GROUPS 2
+
/*
* As depicted in the diagram below, the pdev contains an array of
* NUM_EXT_TID ol_tx_active_queues_in_tid_t elements.
@@ -600,6 +621,7 @@ struct ol_txrx_pdev_t {
struct {
enum ol_tx_scheduler_status tx_sched_status;
ol_tx_sched_handle scheduler;
+ struct ol_tx_frms_queue_t *last_used_txq;
} tx_sched;
/*
* tx_queue only applies for HL, but is defined unconditionally to avoid
@@ -691,6 +713,7 @@ struct ol_txrx_pdev_t {
ipa_uc_op_cb_type ipa_uc_op_cb;
void *osif_dev;
#endif /* IPA_UC_OFFLOAD */
+ struct ol_tx_queue_group_t txq_grps[OL_TX_MAX_TXQ_GROUPS];
};
struct ol_txrx_vdev_t {
diff --git a/CORE/HDD/src/wlan_hdd_hostapd.c b/CORE/HDD/src/wlan_hdd_hostapd.c
index 7a642966db24..a419c606d491 100644
--- a/CORE/HDD/src/wlan_hdd_hostapd.c
+++ b/CORE/HDD/src/wlan_hdd_hostapd.c
@@ -1546,6 +1546,8 @@ VOS_STATUS hdd_hostapd_SAPEventCB( tpSap_Event pSapEvent, v_PVOID_t usrDataForCa
__func__, ret);
return VOS_STATUS_E_FAULT;
}
+#else
+ return VOS_STATUS_E_FAILURE;
#endif
case eSAP_DFS_NOL_SET:
hddLog(VOS_TRACE_LEVEL_INFO, FL("Received eSAP_DFS_NOL_SET event"));
@@ -1565,6 +1567,8 @@ VOS_STATUS hdd_hostapd_SAPEventCB( tpSap_Event pSapEvent, v_PVOID_t usrDataForCa
__func__,
pSapEvent->sapevt.sapDfsNolInfo.sDfsList);
}
+#else
+ return VOS_STATUS_E_FAILURE;
#endif
return VOS_STATUS_SUCCESS;
diff --git a/CORE/HDD/src/wlan_hdd_oemdata.c b/CORE/HDD/src/wlan_hdd_oemdata.c
index eb3327a4202f..b10da5ef1332 100644
--- a/CORE/HDD/src/wlan_hdd_oemdata.c
+++ b/CORE/HDD/src/wlan_hdd_oemdata.c
@@ -243,9 +243,6 @@ int iw_set_oem_data_req(
}
-/* Forward declaration */
-static int oem_msg_callback(struct sk_buff *skb);
-
/**---------------------------------------------------------------------------
\brief iw_get_oem_data_cap()
@@ -874,198 +871,181 @@ void hdd_SendPeerStatusIndToOemApp(v_MACADDR_t *peerMac,
return;
}
-/**---------------------------------------------------------------------------
-
- \brief oem_activate_service() - Activate oem message handler
-
- This function registers a handler to receive netlink message from
- an OEM application process.
-
- \param -
- - pAdapter - pointer to HDD adapter
-
- \return - 0 for success, non zero for failure
-
- --------------------------------------------------------------------------*/
-int oem_activate_service(void *pAdapter)
-{
- pHddCtx = (struct hdd_context_s*) pAdapter;
-
- /* Register the msg handler for msgs addressed to WLAN_NL_MSG_OEM */
- nl_srv_register(WLAN_NL_MSG_OEM, oem_msg_callback);
- return 0;
-}
-
/*
* Callback function invoked by Netlink service for all netlink
* messages (from user space) addressed to WLAN_NL_MSG_OEM
*/
-/**---------------------------------------------------------------------------
-
- \brief oem_msg_callback() - callback invoked by netlink service
-
- This function gets invoked by netlink service when a message
- is received from user space addressed to WLAN_NL_MSG_OEM
-
- \param -
- - skb - skb with netlink message
-
- \return - 0 for success, non zero for failure
- --------------------------------------------------------------------------*/
+/**
+ * oem_msg_callback() - callback invoked by netlink service
+ * @skb: skb with netlink message
+ *
+ * This function gets invoked by netlink service when a message
+ * is received from user space addressed to WLAN_NL_MSG_OEM
+ *
+ * Return: zero on success
+ * On error, error number will be returned.
+ */
static int oem_msg_callback(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
tAniMsgHdr *msg_hdr;
+ int ret;
char *sign_str = NULL;
nlh = (struct nlmsghdr *)skb->data;
- if (!nlh)
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Netlink header null", __func__);
- return -1;
- }
-
- if (!pHddCtx)
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: HDD context null", __func__);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid, OEM_ERR_NULL_CONTEXT);
- return -1;
+ if (!nlh) {
+ hddLog(LOGE, FL("Netlink header null"));
+ return -EPERM;
}
- if (pHddCtx->isLogpInProgress)
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s:LOGP in Progress. Ignore!!!", __func__);
- return -EBUSY;
+ ret = wlan_hdd_validate_context(pHddCtx);
+ if (0 != ret) {
+ hddLog(LOGE, FL("HDD context is not valid"));
+ return ret;
}
msg_hdr = NLMSG_DATA(nlh);
- if (!msg_hdr)
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Message header null", __func__);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid, OEM_ERR_NULL_MESSAGE_HEADER);
- return -1;
+ if (!msg_hdr) {
+ hddLog(LOGE, FL("Message header null"));
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid, OEM_ERR_NULL_MESSAGE_HEADER);
+ return -EPERM;
}
- if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(tAniMsgHdr) + msg_hdr->length))
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Invalid nl msg len, nlh->nlmsg_len (%d), msg_hdr->len (%d)",
- __func__, nlh->nlmsg_len, msg_hdr->length);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid, OEM_ERR_INVALID_MESSAGE_LENGTH);
- return -1;
+ if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(tAniMsgHdr) + msg_hdr->length)) {
+ hddLog(LOGE, FL("Invalid nl msg len, nlh->nlmsg_len (%d), msg_hdr->len (%d)"),
+ nlh->nlmsg_len, msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
}
- switch (msg_hdr->type)
- {
- case ANI_MSG_APP_REG_REQ:
- /* Registration request is only allowed for Qualcomm Application */
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "%s: Received App Req Req from App process pid(%d), len(%d)",
- __func__, nlh->nlmsg_pid, msg_hdr->length);
+ switch (msg_hdr->type) {
+ case ANI_MSG_APP_REG_REQ:
+ /* Registration request is only allowed for Qualcomm Application */
+ hddLog(LOG1, FL("Received App Req Req from App process pid(%d), len(%d)"),
+ nlh->nlmsg_pid, msg_hdr->length);
+
+ sign_str = (char *)((char *)msg_hdr + sizeof(tAniMsgHdr));
+ if ((OEM_APP_SIGNATURE_LEN == msg_hdr->length) &&
+ (0 == strncmp(sign_str, OEM_APP_SIGNATURE_STR,
+ OEM_APP_SIGNATURE_LEN))) {
+ hddLog(LOG1, FL("Valid App Req Req from oem app process pid(%d)"),
+ nlh->nlmsg_pid);
+
+ pHddCtx->oem_app_registered = TRUE;
+ pHddCtx->oem_pid = nlh->nlmsg_pid;
+ send_oem_reg_rsp_nlink_msg();
+ } else {
+ hddLog(LOGE, FL("Invalid signature in App Reg Request from pid(%d)"),
+ nlh->nlmsg_pid);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_INVALID_SIGNATURE);
+ return -EPERM;
+ }
+ break;
+
+ case ANI_MSG_OEM_DATA_REQ:
+ hddLog(LOG1, FL("Received Oem Data Request length(%d) from pid: %d"),
+ msg_hdr->length, nlh->nlmsg_pid);
+
+ if ((!pHddCtx->oem_app_registered) ||
+ (nlh->nlmsg_pid != pHddCtx->oem_pid)) {
+ /* either oem app is not registered yet or pid is different */
+ hddLog(LOGE, FL("OEM DataReq: app not registered(%d) or incorrect pid(%d)"),
+ pHddCtx->oem_app_registered, nlh->nlmsg_pid);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_APP_NOT_REGISTERED);
+ return -EPERM;
+ }
- sign_str = (char *)((char *)msg_hdr + sizeof(tAniMsgHdr));
- if ((OEM_APP_SIGNATURE_LEN == msg_hdr->length) &&
- (0 == strncmp(sign_str, OEM_APP_SIGNATURE_STR,
- OEM_APP_SIGNATURE_LEN)))
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "%s: Valid App Req Req from oem app process pid(%d)",
- __func__, nlh->nlmsg_pid);
+ if ((!msg_hdr->length) || (OEM_DATA_REQ_SIZE < msg_hdr->length)) {
+ hddLog(LOGE, FL("Invalid length (%d) in Oem Data Request"),
+ msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
+ }
+ oem_process_data_req_msg(msg_hdr->length,
+ (char *) ((char *)msg_hdr +
+ sizeof(tAniMsgHdr)));
+ break;
+
+ case ANI_MSG_CHANNEL_INFO_REQ:
+ hddLog(LOG1,
+ FL("Received channel info request, num channel(%d) from pid: %d"),
+ msg_hdr->length, nlh->nlmsg_pid);
+
+ if ((!pHddCtx->oem_app_registered) ||
+ (nlh->nlmsg_pid != pHddCtx->oem_pid)) {
+ /* either oem app is not registered yet or pid is different */
+ hddLog(LOGE,
+ FL("Chan InfoReq: app not registered(%d) or incorrect pid(%d)"),
+ pHddCtx->oem_app_registered, nlh->nlmsg_pid);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_APP_NOT_REGISTERED);
+ return -EPERM;
+ }
- pHddCtx->oem_app_registered = TRUE;
- pHddCtx->oem_pid = nlh->nlmsg_pid;
- send_oem_reg_rsp_nlink_msg();
- }
- else
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Invalid signature in App Reg Request from pid(%d)",
- __func__, nlh->nlmsg_pid);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_INVALID_SIGNATURE);
- return -1;
- }
- break;
+ /* message length contains list of channel ids */
+ if ((!msg_hdr->length) ||
+ (WNI_CFG_VALID_CHANNEL_LIST_LEN < msg_hdr->length)) {
+ hddLog(LOGE,
+ FL("Invalid length (%d) in channel info request"),
+ msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_INVALID_MESSAGE_LENGTH);
+ return -EPERM;
+ }
+ oem_process_channel_info_req_msg(msg_hdr->length,
+ (char *)((char*)msg_hdr + sizeof(tAniMsgHdr)));
+ break;
+
+ default:
+ hddLog(LOGE,
+ FL("Received Invalid message type (%d), length (%d)"),
+ msg_hdr->type, msg_hdr->length);
+ send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
+ OEM_ERR_INVALID_MESSAGE_TYPE);
+ return -EPERM;
+ }
+ return 0;
+}
- case ANI_MSG_OEM_DATA_REQ:
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "%s: Received Oem Data Request length(%d) from pid: %d",
- __func__, msg_hdr->length, nlh->nlmsg_pid);
+static int __oem_msg_callback(struct sk_buff *skb)
+{
+ int ret;
- if ((!pHddCtx->oem_app_registered) ||
- (nlh->nlmsg_pid != pHddCtx->oem_pid))
- {
- /* either oem app is not registered yet or pid is different */
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: OEM DataReq: app not registered(%d) or incorrect pid(%d)",
- __func__, pHddCtx->oem_app_registered, nlh->nlmsg_pid);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_APP_NOT_REGISTERED);
- return -1;
- }
+ vos_ssr_protect(__func__);
+ ret = oem_msg_callback(skb);
+ vos_ssr_unprotect(__func__);
- if ((!msg_hdr->length) ||
- (OEM_DATA_REQ_SIZE < msg_hdr->length))
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Invalid length (%d) in Oem Data Request",
- __func__, msg_hdr->length);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_INVALID_MESSAGE_LENGTH);
- return -1;
- }
- oem_process_data_req_msg(msg_hdr->length,
- (char *) ((char *)msg_hdr +
- sizeof(tAniMsgHdr)));
- break;
+ return ret;
+}
- case ANI_MSG_CHANNEL_INFO_REQ:
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
- "%s: Received channel info request, num channel(%d) from pid: %d",
- __func__, msg_hdr->length, nlh->nlmsg_pid);
+/**---------------------------------------------------------------------------
- if ((!pHddCtx->oem_app_registered) ||
- (nlh->nlmsg_pid != pHddCtx->oem_pid))
- {
- /* either oem app is not registered yet or pid is different */
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Chan InfoReq: app not registered(%d) or incorrect pid(%d)",
- __func__, pHddCtx->oem_app_registered, nlh->nlmsg_pid);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_APP_NOT_REGISTERED);
- return -1;
- }
+ \brief oem_activate_service() - Activate oem message handler
- /* message length contains list of channel ids */
- if ((!msg_hdr->length) ||
- (WNI_CFG_VALID_CHANNEL_LIST_LEN < msg_hdr->length))
- {
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Invalid length (%d) in channel info request",
- __func__, msg_hdr->length);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_INVALID_MESSAGE_LENGTH);
- return -1;
- }
- oem_process_channel_info_req_msg(msg_hdr->length,
- (char *)((char*)msg_hdr + sizeof(tAniMsgHdr)));
- break;
+ This function registers a handler to receive netlink message from
+ an OEM application process.
- default:
- VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
- "%s: Received Invalid message type (%d), length (%d)",
- __func__, msg_hdr->type, msg_hdr->length);
- send_oem_err_rsp_nlink_msg(nlh->nlmsg_pid,
- OEM_ERR_INVALID_MESSAGE_TYPE);
- return -1;
- }
+ \param -
+ - pAdapter - pointer to HDD adapter
+
+ \return - 0 for success, non zero for failure
+
+ --------------------------------------------------------------------------*/
+int oem_activate_service(void *pAdapter)
+{
+ pHddCtx = (struct hdd_context_s*) pAdapter;
+
+ /* Register the msg handler for msgs addressed to WLAN_NL_MSG_OEM */
+ nl_srv_register(WLAN_NL_MSG_OEM, __oem_msg_callback);
return 0;
}
+
+
#endif
diff --git a/CORE/MAC/inc/qwlan_version.h b/CORE/MAC/inc/qwlan_version.h
index afd45f322d39..6d775a690c00 100644
--- a/CORE/MAC/inc/qwlan_version.h
+++ b/CORE/MAC/inc/qwlan_version.h
@@ -42,9 +42,9 @@ BRIEF DESCRIPTION:
#define QWLAN_VERSION_MINOR 0
#define QWLAN_VERSION_PATCH 0
#define QWLAN_VERSION_EXTRA ""
-#define QWLAN_VERSION_BUILD 247
+#define QWLAN_VERSION_BUILD 248
-#define QWLAN_VERSIONSTR "1.0.0.247"
+#define QWLAN_VERSIONSTR "1.0.0.248"
#define AR6320_REV1_VERSION 0x5000000
diff --git a/CORE/SAP/src/sapModule.c b/CORE/SAP/src/sapModule.c
index 95b28d488a8c..bce11240f346 100644
--- a/CORE/SAP/src/sapModule.c
+++ b/CORE/SAP/src/sapModule.c
@@ -3559,7 +3559,7 @@ WLANSAP_Get_DfsNol(v_PVOID_t pSapCtx)
current_time = vos_get_monotonic_boottime();
found_time = dfs_nol[i].radar_found_timestamp;
- elapsed_time = abs(current_time - found_time);
+ elapsed_time = current_time - found_time;
/* check if channel is available
* if either channel is usable or available, or timer expired 30mins
diff --git a/CORE/SERVICES/BMI/ol_fw.c b/CORE/SERVICES/BMI/ol_fw.c
index 6da6486006f9..4e85b26d70d6 100644
--- a/CORE/SERVICES/BMI/ol_fw.c
+++ b/CORE/SERVICES/BMI/ol_fw.c
@@ -123,6 +123,10 @@ static int ol_get_fw_files_for_target(struct ol_fw_files *pfw_files,
}
#endif
+#ifdef HIF_USB
+static A_STATUS ol_usb_extra_initialization(struct ol_softc *scn);
+#endif
+
extern int
dbglog_parse_debug_logs(ol_scn_t scn, u_int8_t *datap, u_int32_t len);
@@ -2067,6 +2071,8 @@ int ol_download_firmware(struct ol_softc *scn)
#ifdef HIF_SDIO
status = ol_sdio_extra_initialization(scn);
+#elif defined(HIF_USB)
+ status = ol_usb_extra_initialization(scn);
#endif
return status;
@@ -2500,3 +2506,21 @@ ol_target_ready(struct ol_softc *scn, void *cfg_ctx)
}
}
#endif
+
+#ifdef HIF_USB
+static A_STATUS
+ol_usb_extra_initialization(struct ol_softc *scn)
+{
+ A_STATUS status = !EOK;
+ u_int32_t param = 0;
+
+ param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ status = BMIWriteMemory(scn->hif_hdl,
+ host_interest_item_address(scn->target_type,
+ offsetof(struct host_interest_s,
+ hi_acs_flags)),
+ (u_int8_t *)&param, 4, scn);
+
+ return status;
+}
+#endif
diff --git a/CORE/SERVICES/COMMON/ol_txrx_htt_api.h b/CORE/SERVICES/COMMON/ol_txrx_htt_api.h
index 9a1f164f9b3c..afc9c78f49b5 100644
--- a/CORE/SERVICES/COMMON/ol_txrx_htt_api.h
+++ b/CORE/SERVICES/COMMON/ol_txrx_htt_api.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2013 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -167,6 +167,27 @@ ol_tx_completion_handler(
void
ol_tx_credit_completion_handler(ol_txrx_pdev_handle pdev, int credits);
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+void
+ol_txrx_update_tx_queue_groups(
+ ol_txrx_pdev_handle pdev,
+ u_int8_t group_id,
+ int32_t credit,
+ u_int8_t absolute,
+ u_int32_t vdev_id_mask,
+ u_int32_t ac_mask
+);
+
+void
+ol_tx_desc_update_group_credit(
+ ol_txrx_pdev_handle pdev,
+ u_int16_t tx_desc_id,
+ int credit, u_int8_t absolute);
+#define OL_TX_DESC_UPDATE_GROUP_CREDIT ol_tx_desc_update_group_credit
+#else
+#define OL_TX_DESC_UPDATE_GROUP_CREDIT(pdev, tx_desc_id, credit, absolute) /* no-op */
+#endif
+
/**
* @brief Init the total amount of target credit.
* @details
@@ -630,4 +651,10 @@ ol_rx_in_order_indication_handler(
u_int8_t tid,
u_int8_t is_offload );
+#ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
+u_int32_t ol_tx_get_max_tx_groups_supported(struct ol_txrx_pdev_t *pdev);
+#define OL_TX_GET_MAX_GROUPS ol_tx_get_max_tx_groups_supported
+#else
+#define OL_TX_GET_MAX_GROUPS(pdev) 0
+#endif
#endif /* _OL_TXRX_HTT_API__H_ */
diff --git a/CORE/SERVICES/HTC/htc_services.c b/CORE/SERVICES/HTC/htc_services.c
index b717b22e7197..d45f100ad3a5 100644
--- a/CORE/SERVICES/HTC/htc_services.c
+++ b/CORE/SERVICES/HTC/htc_services.c
@@ -274,7 +274,7 @@ A_STATUS HTCConnectService(HTC_HANDLE HTCHandle,
break;
}
-#if defined(HIF_USB) || defined(HIF_SDIO)
+#if defined(HIF_SDIO)
/*
When AltDataCreditSize is non zero, it indicates the credit size for
HTT and all other services on Mbox0. Mbox1 has WMI_CONTROL_SVC which
@@ -292,6 +292,14 @@ A_STATUS HTCConnectService(HTC_HANDLE HTCHandle,
if ((target->AltDataCreditSize) && HIFIsMailBoxSwapped(target->hif_dev)
&& (pEndpoint->UL_PipeID == 1) && (pEndpoint->DL_PipeID == 0))
pEndpoint->TxCreditSize = target->AltDataCreditSize;
+#elif defined(HIF_USB)
+ /*
+ * Endpoing to pipe is one-to-one mapping in USB.
+ * If AltDataCreditSize is not zero, it indicates the credit size for
+ * HTT_DATA_MSG_SVC services use AltDataCrditSize.
+ */
+ if ((target->AltDataCreditSize) && (pEndpoint->ServiceID == HTT_DATA_MSG_SVC))
+ pEndpoint->TxCreditSize = target->AltDataCreditSize;
#endif
adf_os_assert(!pEndpoint->dl_is_polled); /* not currently supported */
diff --git a/CORE/SERVICES/WMA/wma.c b/CORE/SERVICES/WMA/wma.c
index 5d8ac2a43def..0933cfb32c44 100644
--- a/CORE/SERVICES/WMA/wma.c
+++ b/CORE/SERVICES/WMA/wma.c
@@ -12791,6 +12791,13 @@ static void wma_add_bss(tp_wma_handle wma, tpAddBssParams params)
switch(params->halPersona) {
case VOS_STA_SAP_MODE:
+ /*If current bring up SAP channel matches the previous
+ *radar found channel then reset the last_radar_found_chan
+ *variable to avoid race conditions.
+ */
+ if (params->currentOperChannel ==
+ wma->dfs_ic->last_radar_found_chan)
+ wma->dfs_ic->last_radar_found_chan = 0;
case VOS_P2P_GO_MODE:
wma_add_bss_ap_mode(wma, params);
break;
diff --git a/CORE/VOSS/src/vos_api.c b/CORE/VOSS/src/vos_api.c
index bb539886f0ca..da273fbb80c7 100644
--- a/CORE/VOSS/src/vos_api.c
+++ b/CORE/VOSS/src/vos_api.c
@@ -2397,6 +2397,12 @@ void vos_trigger_recovery(void)
#endif
}
+/**
+ * @brief vos_get_monotonic_boottime()
+ * Get kernel boot time.
+ * @return Time in microseconds
+ */
+
v_U64_t vos_get_monotonic_boottime(void)
{
#ifdef CONFIG_CNSS
@@ -2405,7 +2411,7 @@ v_U64_t vos_get_monotonic_boottime(void)
cnss_get_monotonic_boottime(&ts);
return (((v_U64_t)ts.tv_sec * 1000000) + (ts.tv_nsec / 1000));
#else
- return adf_os_ticks_to_msecs(adf_os_ticks());
+ return adf_os_ticks_to_msecs(adf_os_ticks()) * 1000;
#endif
}
diff --git a/Kbuild b/Kbuild
index 2cf5124fc90a..30ef1e91ab0e 100644
--- a/Kbuild
+++ b/Kbuild
@@ -921,7 +921,8 @@ CDEFINES += -DCONFIG_HL_SUPPORT \
-DCONFIG_AR6320_SUPPORT \
-DSDIO_3_0 \
-DHIF_SDIO \
- -DCONFIG_ATH_PROCFS_DIAG_SUPPORT
+ -DCONFIG_ATH_PROCFS_DIAG_SUPPORT \
+ -DFEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
endif
ifeq ($(CONFIG_QCA_WIFI_SDIO), 1)