summaryrefslogtreecommitdiff
path: root/qdf/linux/src
diff options
context:
space:
mode:
authorChouhan, Anurag <achouhan@codeaurora.org>2016-03-03 18:57:27 +0530
committerGerrit - the friendly Code Review server <code-review@localhost>2016-03-16 12:18:26 -0700
commit5776318d1934b14c3b4eb4eb98c74830d64d00a7 (patch)
tree911b2792f6d5ce402cd339455bc17bebf4e2dfb6 /qdf/linux/src
parent5693683262e239d48dbc3134404db1bf3f380252 (diff)
qcacmn: Add QDF OS abstraction convergence
Converge ADF and CDF API's and move them to QDF folder. MCL/WIN driver use this QDF converged module for OS abstraction. Change-Id: I1d0cdfd8730a5c021aaa50b7dc8549d491d760b3 CRs-Fixed: 981187
Diffstat (limited to 'qdf/linux/src')
-rw-r--r--qdf/linux/src/i_osdep.h200
-rw-r--r--qdf/linux/src/i_qdf_atomic.h146
-rw-r--r--qdf/linux/src/i_qdf_defer.h323
-rw-r--r--qdf/linux/src/i_qdf_event.h55
-rw-r--r--qdf/linux/src/i_qdf_list.h62
-rw-r--r--qdf/linux/src/i_qdf_lock.h336
-rw-r--r--qdf/linux/src/i_qdf_mc_timer.h60
-rw-r--r--qdf/linux/src/i_qdf_mem.h213
-rw-r--r--qdf/linux/src/i_qdf_module.h63
-rw-r--r--qdf/linux/src/i_qdf_nbuf.h1569
-rw-r--r--qdf/linux/src/i_qdf_net_types.h52
-rw-r--r--qdf/linux/src/i_qdf_perf.h88
-rw-r--r--qdf/linux/src/i_qdf_time.h237
-rw-r--r--qdf/linux/src/i_qdf_timer.h154
-rw-r--r--qdf/linux/src/i_qdf_trace.h94
-rw-r--r--qdf/linux/src/i_qdf_types.h292
-rw-r--r--qdf/linux/src/i_qdf_util.h239
-rw-r--r--qdf/linux/src/qdf_defer.c84
-rw-r--r--qdf/linux/src/qdf_event.c260
-rw-r--r--qdf/linux/src/qdf_list.c240
-rw-r--r--qdf/linux/src/qdf_lock.c660
-rw-r--r--qdf/linux/src/qdf_mc_timer.c702
-rw-r--r--qdf/linux/src/qdf_mem.c951
-rw-r--r--qdf/linux/src/qdf_module.c68
-rw-r--r--qdf/linux/src/qdf_nbuf.c1536
-rw-r--r--qdf/linux/src/qdf_perf.c195
-rw-r--r--qdf/linux/src/qdf_threads.c105
-rw-r--r--qdf/linux/src/qdf_trace.c1054
28 files changed, 10038 insertions, 0 deletions
diff --git a/qdf/linux/src/i_osdep.h b/qdf/linux/src/i_osdep.h
new file mode 100644
index 000000000000..ee143191abfe
--- /dev/null
+++ b/qdf/linux/src/i_osdep.h
@@ -0,0 +1,200 @@
+/*
+ * Copyright (q) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_osdep
+ * QCA driver framework OS dependent types
+ */
+
+#ifndef _I_OSDEP_H
+#define _I_OSDEP_H
+
+#ifdef CONFIG_MCL
+#include <cds_queue.h>
+#include <cds_if_upperproto.h>
+#else
+#include <sys/queue.h>
+#endif
+
+/**
+ * enum qdf_bus_type - Supported Bus types
+ * @QDF_BUS_TYPE_PCI: PCI Bus
+ * @QDF_BUS_TYPE_AHB: AHB Bus
+ * @QDF_BUS_TYPE_SNOC: SNOC Bus
+ * @QDF_BUS_TYPE_SIM: Simulator
+ */
+enum qdf_bus_type {
+ QDF_BUS_TYPE_PCI,
+ QDF_BUS_TYPE_AHB,
+ QDF_BUS_TYPE_SNOC,
+ QDF_BUS_TYPE_SIM
+};
+
+/*
+ * Byte Order stuff
+ */
+#define le16toh(_x) le16_to_cpu(_x)
+#define htole16(_x) cpu_to_le16(_x)
+#define htobe16(_x) cpu_to_be16(_x)
+#define le32toh(_x) le32_to_cpu(_x)
+#define htole32(_x) cpu_to_le32(_x)
+#define be16toh(_x) be16_to_cpu(_x)
+#define be32toh(_x) be32_to_cpu(_x)
+#define htobe32(_x) cpu_to_be32(_x)
+
+typedef struct timer_list os_timer_t;
+
+#ifdef CONFIG_SMP
+/* Undo the one provided by the kernel to debug spin locks */
+#undef spin_lock
+#undef spin_unlock
+#undef spin_trylock
+
+#define spin_lock(x) \
+ do { \
+ spin_lock_bh(x); \
+ } while (0)
+
+#define spin_unlock(x) \
+ do { \
+ if (!spin_is_locked(x)) { \
+ WARN_ON(1); \
+ printk(KERN_EMERG " %s:%d unlock addr=%p, %s \n", __func__, __LINE__, x, \
+ !spin_is_locked(x) ? "Not locked" : ""); \
+ } \
+ spin_unlock_bh(x); \
+ } while (0)
+#define spin_trylock(x) spin_trylock_bh(x)
+#define OS_SUPPORT_ASYNC_Q 1 /* support for handling asyn function calls */
+
+#else
+#define OS_SUPPORT_ASYNC_Q 0
+#endif /* ifdef CONFIG_SMP */
+
+/**
+ * struct os_mest_t - maintain attributes of message
+ * @mesg_next: pointer to the nexgt message
+ * @mest_type: type of message
+ * @mesg_len: length of the message
+ */
+typedef struct _os_mesg_t {
+ STAILQ_ENTRY(_os_mesg_t) mesg_next;
+ uint16_t mesg_type;
+ uint16_t mesg_len;
+} os_mesg_t;
+
+/**
+ * struct qdf_bus_context - Bus to hal context handoff
+ * @bc_tag: bus context tag
+ * @bc_handle: bus context handle
+ * @bc_bustype: bus type
+ */
+typedef struct qdf_bus_context {
+ int bc_tag;
+ char *bc_handle;
+ enum qdf_bus_type bc_bustype;
+} QDF_BUS_CONTEXT;
+
+typedef struct _NIC_DEV *osdev_t;
+
+typedef void (*os_mesg_handler_t)(void *ctx, uint16_t mesg_type,
+ uint16_t mesg_len,
+ void *mesg);
+
+
+/**
+ * typedef os_mesg_queue_t - Object to maintain message queue
+ * @dev_handle: OS handle
+ * @num_queued: number of queued messages
+ * @mesg_len: message length
+ * @mesg_queue_buf: pointer to message queue buffer
+ * @mesg_head: queued mesg buffers
+ * @mesg_free_head: free mesg buffers
+ * @lock: spinlock object
+ * @ev_handler_lock: spinlock object to event handler
+ * @task: pointer to task
+ * @_timer: instance of timer
+ * @handler: message handler
+ * @ctx: pointer to context
+ * @is_synchronous: bit to save synchronous status
+ */
+typedef struct {
+ osdev_t dev_handle;
+ int32_t num_queued;
+ int32_t mesg_len;
+ uint8_t *mesg_queue_buf;
+ STAILQ_HEAD(, _os_mesg_t) mesg_head;
+ STAILQ_HEAD(, _os_mesg_t) mesg_free_head;
+ spinlock_t lock;
+ spinlock_t ev_handler_lock;
+#ifdef USE_SOFTINTR
+ void *_task;
+#else
+ os_timer_t _timer;
+#endif
+ os_mesg_handler_t handler;
+ void *ctx;
+ uint8_t is_synchronous:1;
+} os_mesg_queue_t;
+
+/**
+ * struct _NIC_DEV - Definition of OS-dependent device structure.
+ * It'll be opaque to the actual ATH layer.
+ * @bdev: bus device handle
+ * @netdev: net device handle (wifi%d)
+ * @intr_tq: tasklet
+ * @devstats: net device statistics
+ * @bc: hal bus context
+ * @device: generic device
+ * @event_queue: instance to wait queue
+ * @is_device_asleep: keep device status, sleep or awake
+ */
+struct _NIC_DEV {
+ void *bdev;
+ struct net_device *netdev;
+ qdf_bh_t intr_tq;
+ struct net_device_stats devstats;
+ QDF_BUS_CONTEXT bc;
+#ifdef ATH_PERF_PWR_OFFLOAD
+ struct device *device;
+ wait_queue_head_t event_queue;
+#endif /* PERF_PWR_OFFLOAD */
+#if OS_SUPPORT_ASYNC_Q
+ os_mesg_queue_t async_q;
+#endif
+#ifdef ATH_BUS_PM
+ uint8_t is_device_asleep;
+#endif /* ATH_BUS_PM */
+};
+
+#define __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+ proc_dointvec(ctl, write, buffer, lenp, ppos)
+
+#define __QDF_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
+ proc_dostring(ctl, write, filp, buffer, lenp, ppos)
+
+#endif /* _I_OSDEP_H */
diff --git a/qdf/linux/src/i_qdf_atomic.h b/qdf/linux/src/i_qdf_atomic.h
new file mode 100644
index 000000000000..27fd018352f7
--- /dev/null
+++ b/qdf/linux/src/i_qdf_atomic.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_atomic.h
+ * This file provides OS dependent atomic APIs.
+ */
+
+#ifndef I_QDF_ATOMIC_H
+#define I_QDF_ATOMIC_H
+
+#include <qdf_status.h> /* QDF_STATUS */
+#include <linux/atomic.h>
+
+typedef atomic_t __qdf_atomic_t;
+
+/**
+ * __qdf_atomic_init() - initialize an atomic type variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS __qdf_atomic_init(__qdf_atomic_t *v)
+{
+ atomic_set(v, 0);
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_atomic_read() - read the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline int32_t __qdf_atomic_read(__qdf_atomic_t *v)
+{
+ return atomic_read(v);
+}
+
+/**
+ * __qdf_atomic_inc() - increment the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_inc(__qdf_atomic_t *v)
+{
+ atomic_inc(v);
+}
+
+/**
+ * __qdf_atomic_dec() - decrement the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_dec(__qdf_atomic_t *v)
+{
+ atomic_dec(v);
+}
+
+/**
+ * __qdf_atomic_add() - add a value to the value of an atomic variable
+ * @i: The amount by which to increase the atomic counter
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_add(int i, __qdf_atomic_t *v)
+{
+ atomic_add(i, v);
+}
+
+/**
+ * __qdf_atomic_sub() - Subtract a value from an atomic variable
+ * @i: the amount by which to decrease the atomic counter
+ * @v: a pointer to an opaque atomic variable
+ *
+ * Return: none
+ */
+static inline void __qdf_atomic_sub(int i, __qdf_atomic_t *v)
+{
+ atomic_sub(i, v);
+}
+
+/**
+ * __qdf_atomic_dec_and_test() - decrement an atomic variable and check if the
+ * new value is zero
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return:
+ * true (non-zero) if the new value is zero,
+ * false (0) if the new value is non-zero
+ */
+static inline int32_t __qdf_atomic_dec_and_test(__qdf_atomic_t *v)
+{
+ return atomic_dec_and_test(v);
+}
+
+/**
+ * __qdf_atomic_set() - set a value to the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_set(__qdf_atomic_t *v, int i)
+{
+ atomic_set(v, i);
+}
+
+/**
+ * __qdf_atomic_inc_return() - return the incremented value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline int32_t __qdf_atomic_inc_return(__qdf_atomic_t *v)
+{
+ return atomic_inc_return(v);
+}
+
+#endif
diff --git a/qdf/linux/src/i_qdf_defer.h b/qdf/linux/src/i_qdf_defer.h
new file mode 100644
index 000000000000..d2615c7f139e
--- /dev/null
+++ b/qdf/linux/src/i_qdf_defer.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_defer.h
+ * This file provides OS dependent deferred API's.
+ */
+
+#ifndef _I_QDF_DEFER_H
+#define _I_QDF_DEFER_H
+
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_trace.h>
+
+typedef struct tasklet_struct __qdf_bh_t;
+typedef struct workqueue_struct __qdf_workqueue_t;
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+typedef struct work_struct __qdf_work_t;
+typedef struct work_struct __qdf_delayed_work_t;
+#else
+
+/**
+ * __qdf_work_t - wrapper around the real task func
+ * @work: Instance of work
+ * @fn: function pointer to the handler
+ * @arg: pointer to argument
+ */
+typedef struct {
+ struct work_struct work;
+ qdf_defer_fn_t fn;
+ void *arg;
+} __qdf_work_t;
+
+/**
+ * __qdf_delayed_work_t - wrapper around the real work func
+ * @dwork: Instance of delayed work
+ * @fn: function pointer to the handler
+ * @arg: pointer to argument
+ */
+typedef struct {
+ struct delayed_work dwork;
+ qdf_defer_fn_t fn;
+ void *arg;
+} __qdf_delayed_work_t;
+
+extern void __qdf_defer_func(struct work_struct *work);
+extern void __qdf_defer_delayed_func(struct work_struct *work);
+#endif
+
+typedef void (*__qdf_bh_fn_t)(unsigned long arg);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+/**
+ * __qdf_init_work - Initialize a work/task queue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline QDF_STATUS __qdf_init_work(qdf_handle_t hdl,
+ __qdf_work_t *work,
+ qdf_defer_fn_t func, void *arg)
+{
+ /*Initilize func and argument in work struct */
+ INIT_WORK(&work->work, __qdf_defer_func);
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_init_delayed_work - create a work/task, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline uint32_t __qdf_init_delayed_work(qdf_handle_t hdl,
+ __qdf_delayed_work_t *work,
+ qdf_defer_fn_t func, void *arg)
+{
+ INIT_WORK(work, func, arg);
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_queue_work - Queue the work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * Return: none
+ */
+static inline void __qdf_queue_work(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue,
+ __qdf_work_t *work)
+{
+ queue_work(wqueue, work);
+}
+
+/**
+ * __qdf_queue_delayed_work - Queue the delayed work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * @delay: delay interval
+ * Return: none
+ */
+static inline void __qdf_queue_delayed_work(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue,
+ __qdf_delayed_work_t *work,
+ uint32_t delay)
+{
+ queue_delayed_work(wqueue, work, delay);
+}
+
+/**
+ * __qdf_sched_work - Schedule a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Retrun: none
+ */
+static inline QDF_STATUS __qdf_sched_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+ schedule_work(work);
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_flush_work - Flush a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: none
+ */
+static inline uint32_t __qdf_flush_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+ flush_work(work);
+ return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS __qdf_init_work(qdf_handle_t hdl,
+ __qdf_work_t *work,
+ qdf_defer_fn_t func, void *arg)
+{
+ work->fn = func;
+ work->arg = arg;
+#ifdef CONFIG_CNSS
+ cnss_init_work(&work->work, __qdf_defer_func);
+#else
+ INIT_WORK(&work->work, __qdf_defer_func);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+
+static inline uint32_t __qdf_init_delayed_work(qdf_handle_t hdl,
+ __qdf_delayed_work_t *work,
+ qdf_defer_fn_t func, void *arg)
+{
+ /*Initilize func and argument in work struct */
+ work->fn = func;
+ work->arg = arg;
+ INIT_DELAYED_WORK(&work->dwork, __qdf_defer_delayed_func);
+ return QDF_STATUS_SUCCESS;
+}
+
+static inline void __qdf_queue_work(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue,
+ __qdf_work_t *work)
+{
+ queue_work(wqueue, &work->work);
+}
+
+static inline void __qdf_queue_delayed_work(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue,
+ __qdf_delayed_work_t *work,
+ uint32_t delay)
+{
+ queue_delayed_work(wqueue, &work->dwork, delay);
+}
+
+static inline QDF_STATUS __qdf_sched_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+ schedule_work(&work->work);
+ return QDF_STATUS_SUCCESS;
+}
+
+static inline uint32_t __qdf_flush_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+ flush_work(&work->work);
+ return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * __qdf_create_workqueue - create a workqueue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @name: string
+ * Return: pointer of type qdf_workqueue_t
+ */
+static inline __qdf_workqueue_t *__qdf_create_workqueue(char *name)
+{
+ return create_workqueue(name);
+}
+
+/**
+ * __qdf_flush_workqueue - flush the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void __qdf_flush_workqueue(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue)
+{
+ flush_workqueue(wqueue);
+}
+
+/**
+ * __qdf_destroy_workqueue - Destroy the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void __qdf_destroy_workqueue(qdf_handle_t hdl,
+ __qdf_workqueue_t *wqueue)
+{
+ destroy_workqueue(wqueue);
+}
+
+/**
+ * __qdf_init_bh - creates the Bottom half deferred handler
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * @func: deferred function to run at bottom half interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline QDF_STATUS __qdf_init_bh(qdf_handle_t hdl,
+ struct tasklet_struct *bh,
+ qdf_defer_fn_t func, void *arg)
+{
+ tasklet_init(bh, (__qdf_bh_fn_t) func, (unsigned long)arg);
+ return QDF_STATUS_SUCCESS;
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+#else
+#endif
+
+/**
+ * __qdf_sched_bh - schedule a bottom half (DPC)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
+ */
+static inline QDF_STATUS
+__qdf_sched_bh(qdf_handle_t hdl, struct tasklet_struct *bh)
+{
+ tasklet_schedule(bh);
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_disable_work - disable the deferred task (synchronous)
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: unsigned int
+ */
+static inline QDF_STATUS
+__qdf_disable_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+ if (cancel_work_sync(&work->work))
+ return QDF_STATUS_E_ALREADY;
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_disable_bh - destroy the bh (synchronous)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
+ */
+static inline QDF_STATUS
+__qdf_disable_bh(qdf_handle_t hdl, struct tasklet_struct *bh)
+{
+ tasklet_kill(bh);
+ return QDF_STATUS_SUCCESS;
+}
+
+#endif /*_I_QDF_DEFER_H*/
diff --git a/qdf/linux/src/i_qdf_event.h b/qdf/linux/src/i_qdf_event.h
new file mode 100644
index 000000000000..2c634423a720
--- /dev/null
+++ b/qdf/linux/src/i_qdf_event.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_event.h
+ * This file provides OS dependent event API's.
+ */
+
+#if !defined(__I_QDF_EVENT_H)
+#define __I_QDF_EVENT_H
+
+#include <linux/completion.h>
+
+/**
+ * qdf_event_t - manages events
+ * @complete: instance to completion
+ * @cookie: unsigned int
+ */
+typedef struct qdf_evt {
+ struct completion complete;
+ uint32_t cookie;
+} __qdf_event_t;
+
+/* Preprocessor definitions and constants */
+#define LINUX_EVENT_COOKIE 0x12341234
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0))
+#define INIT_COMPLETION(event) reinit_completion(&event)
+#endif
+
+#endif /*__I_QDF_EVENT_H*/
diff --git a/qdf/linux/src/i_qdf_list.h b/qdf/linux/src/i_qdf_list.h
new file mode 100644
index 000000000000..eacdc61ab5d2
--- /dev/null
+++ b/qdf/linux/src/i_qdf_list.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_list.h
+ * This file provides OS dependent list API's.
+ */
+
+#if !defined(__I_QDF_LIST_H)
+#define __I_QDF_LIST_H
+
+#include <linux/list.h>
+
+/* Type declarations */
+typedef struct list_head __qdf_list_node_t;
+
+/* Preprocessor definitions and constants */
+
+typedef struct qdf_list_s {
+ __qdf_list_node_t anchor;
+ uint32_t count;
+ uint32_t max_size;
+} __qdf_list_t;
+
+/**
+ * __qdf_list_create() - Initialize list head
+ * @list: object of list
+ * @max_size: max size of the list
+ * Return: none
+ */
+static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size)
+{
+ INIT_LIST_HEAD(&list->anchor);
+ list->count = 0;
+ list->max_size = max_size;
+}
+
+#endif
diff --git a/qdf/linux/src/i_qdf_lock.h b/qdf/linux/src/i_qdf_lock.h
new file mode 100644
index 000000000000..0a3ba71d6ca5
--- /dev/null
+++ b/qdf/linux/src/i_qdf_lock.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_lock.h
+ * Linux-specific definitions for QDF Lock API's
+ */
+
+#if !defined(__I_QDF_LOCK_H)
+#define __I_QDF_LOCK_H
+
+/* Include Files */
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
+#include <asm/semaphore.h>
+#else
+#include <linux/semaphore.h>
+#endif
+#include <linux/interrupt.h>
+#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif
+
+/* define for flag */
+#define QDF_LINUX_UNLOCK_BH 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+enum {
+ LOCK_RELEASED = 0x11223344,
+ LOCK_ACQUIRED,
+ LOCK_DESTROYED
+};
+
+/**
+ * typedef struct - __qdf_mutex_t
+ * @m_lock: Mutex lock
+ * @cookie: Lock cookie
+ * @process_id: Process ID to track lock
+ * @state: Lock status
+ * @refcount: Reference count for recursive lock
+ */
+struct qdf_lock_s {
+ struct mutex m_lock;
+ uint32_t cookie;
+ int process_id;
+ uint32_t state;
+ uint8_t refcount;
+};
+
+typedef struct qdf_lock_s __qdf_mutex_t;
+
+/**
+ * typedef struct - qdf_spinlock_t
+ * @spinlock: Spin lock
+ * @flags: Lock flag
+ */
+typedef struct __qdf_spinlock {
+ spinlock_t spinlock;
+ unsigned long flags;
+} __qdf_spinlock_t;
+
+typedef struct semaphore __qdf_semaphore_t;
+
+#if defined CONFIG_CNSS
+typedef struct wakeup_source qdf_wake_lock_t;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+typedef struct wake_lock qdf_wake_lock_t;
+#else
+typedef int qdf_wake_lock_t;
+#endif
+
+#define LINUX_LOCK_COOKIE 0x12345678
+
+/* Function declarations and documenation */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+/**
+ * __qdf_semaphore_init() - initialize the semaphore
+ * @m: Semaphore object
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
+{
+ init_MUTEX(m);
+ return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
+{
+ sema_init(m, 1);
+ return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * __qdf_semaphore_acquire() - acquire semaphore
+ * @m: Semaphore object
+ *
+ * Return: 0
+ */
+static inline int __qdf_semaphore_acquire(struct semaphore *m)
+{
+ down(m);
+ return 0;
+}
+
+/**
+ * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space
+ * process that is waiting on a semaphore to be interrupted by the user.
+ * If the operation is interrupted, the function returns a nonzero value,
+ * and the caller does not hold the semaphore.
+ * Always checking the return value and responding accordingly.
+ * @osdev: OS device handle
+ * @m: Semaphore object
+ *
+ * Return: int
+ */
+static inline int __qdf_semaphore_acquire_intr(struct semaphore *m)
+{
+ return down_interruptible(m);
+}
+
+/**
+ * __qdf_semaphore_release() - release semaphore
+ * @m: Semaphore object
+ *
+ * Return: result of UP operation in integer
+ */
+static inline void __qdf_semaphore_release(struct semaphore *m)
+{
+ up(m);
+}
+
+/**
+ * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
+ * @m: semaphore to take
+ * @timeout: maximum time to try to take the semaphore
+ * Return: int
+ */
+static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m,
+ unsigned long timeout)
+{
+ unsigned long jiffie_val = msecs_to_jiffies(timeout);
+ return down_timeout(m, jiffie_val);
+}
+
+/**
+ * __qdf_spinlock_create() - initialize spin lock
+ * @lock: Spin lock object
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock)
+{
+ spin_lock_init(&lock->spinlock);
+ lock->flags = 0;
+ return QDF_STATUS_SUCCESS;
+}
+
+#define __qdf_spinlock_destroy(lock)
+
+/**
+ * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock(__qdf_spinlock_t *lock)
+{
+ spin_lock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock)
+{
+ spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
+ * (Preemptive) and disable IRQs
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock)
+{
+ spin_lock_irqsave(&lock->spinlock, lock->flags);
+}
+
+/**
+ * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
+ * Preemption and enable IRQ
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock)
+{
+ spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+}
+
+/*
+ * Synchronous versions - only for OS' that have interrupt disable
+ */
+#define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags)
+#define __qdf_spin_unlock_irq(_p_lock, _flags) \
+ spin_unlock_irqrestore(_p_lock, _flags)
+
+/**
+ * __qdf_spin_trylock_bh() - spin trylock bottomhalf
+ * @lock: spinlock object
+ *
+ * Retrun: int
+ */
+static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock)
+{
+ if (likely(irqs_disabled() || in_irq() || in_softirq())) {
+ return spin_trylock(&lock->spinlock);
+ } else {
+ if (spin_trylock_bh(&lock->spinlock)) {
+ lock->flags |= QDF_LINUX_UNLOCK_BH;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock)
+{
+ if (likely(irqs_disabled() || in_irq() || in_softirq())) {
+ spin_lock(&lock->spinlock);
+ } else {
+ spin_lock_bh(&lock->spinlock);
+ lock->flags |= QDF_LINUX_UNLOCK_BH;
+ }
+}
+
+/**
+ * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock)
+{
+ if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) {
+ lock->flags &= ~QDF_LINUX_UNLOCK_BH;
+ spin_unlock_bh(&lock->spinlock);
+ } else
+ spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled.
+ * @hdl: OS handle
+ * @lock: spinlock to be held for the critical region
+ * @func: critical region function that to be executed
+ * @context: context of the critical region function
+ * @return - Boolean status returned by the critical region function
+ */
+static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl,
+ __qdf_spinlock_t *lock,
+ qdf_irqlocked_func_t func,
+ void *arg)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&lock->spinlock, flags);
+ ret = func(arg);
+ spin_unlock_irqrestore(&lock->spinlock, flags);
+
+ return ret;
+}
+
+/**
+ * __qdf_in_softirq() - in soft irq context
+ *
+ * Return: true if in softirs context else false
+ */
+static inline bool __qdf_in_softirq(void)
+{
+ return in_softirq();
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __I_QDF_LOCK_H */
diff --git a/qdf/linux/src/i_qdf_mc_timer.h b/qdf/linux/src/i_qdf_mc_timer.h
new file mode 100644
index 000000000000..f7680c39d624
--- /dev/null
+++ b/qdf/linux/src/i_qdf_mc_timer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_mc_timer.h
+ * Linux-specific definitions for QDF timers serialized to MC thread
+ */
+
+#if !defined(__I_QDF_MC_TIMER_H)
+#define __I_QDF_MC_TIMER_H
+
+/* Include Files */
+#include <qdf_mc_timer.h>
+#include <qdf_types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+/* Preprocessor definitions and constants */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+/* Type declarations */
+
+typedef struct qdf_mc_timer_platform_s {
+ struct timer_list timer;
+ int thread_id;
+ uint32_t cookie;
+ spinlock_t spinlock;
+} qdf_mc_timer_platform_t;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __I_QDF_MC_TIMER_H */
diff --git a/qdf/linux/src/i_qdf_mem.h b/qdf/linux/src/i_qdf_mem.h
new file mode 100644
index 000000000000..429bfc4875c4
--- /dev/null
+++ b/qdf/linux/src/i_qdf_mem.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_mem.h
+ * Linux-specific definitions for QDF memory API's
+ */
+
+#ifndef __I_QDF_MEM_H
+#define __I_QDF_MEM_H
+
+#ifdef __KERNEL__
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
+#include <linux/autoconf.h>
+#else
+#include <generated/autoconf.h>
+#endif
+#endif
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h> /* pci_alloc_consistent */
+#if CONFIG_MCL
+#include <cds_queue.h>
+#else
+#include <sys/queue.h>
+#endif
+#else
+/*
+ * Provide dummy defs for kernel data types, functions, and enums
+ * used in this header file.
+ */
+#define GFP_KERNEL 0
+#define GFP_ATOMIC 0
+#define kzalloc(size, flags) NULL
+#define vmalloc(size) NULL
+#define kfree(buf)
+#define vfree(buf)
+#define pci_alloc_consistent(dev, size, paddr) NULL
+#define __qdf_mempool_t
+#endif /* __KERNEL__ */
+#include <qdf_status.h>
+
+#ifdef __KERNEL__
+typedef struct mempool_elem {
+ STAILQ_ENTRY(mempool_elem) mempool_entry;
+} mempool_elem_t;
+
+/**
+ * typedef __qdf_mempool_ctxt_t - Memory pool context
+ * @pool_id: pool identifier
+ * @flags: flags
+ * @elem_size: size of each pool element in bytes
+ * @pool_mem: pool_addr address of the pool created
+ * @mem_size: Total size of the pool in bytes
+ * @free_list: free pool list
+ * @lock: spinlock object
+ * @max_elem: Maximum number of elements in tha pool
+ * @free_cnt: Number of free elements available
+ */
+typedef struct __qdf_mempool_ctxt {
+ int pool_id;
+ u_int32_t flags;
+ size_t elem_size;
+ void *pool_mem;
+ u_int32_t mem_size;
+ STAILQ_HEAD(, mempool_elem) free_list;
+ spinlock_t lock;
+ u_int32_t max_elem;
+ u_int32_t free_cnt;
+} __qdf_mempool_ctxt_t;
+#endif
+typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
+
+/* typedef for dma_data_direction */
+typedef enum dma_data_direction __dma_data_direction;
+
+/**
+ * __qdf_str_cmp() - Compare two strings
+ * @str1: First string
+ * @str2: Second string
+ *
+ * Return: =0 equal
+ * >0 not equal, if str1 sorts lexicographically after str2
+ * <0 not equal, if str1 sorts lexicographically before str2
+ */
+static inline int32_t __qdf_str_cmp(const char *str1, const char *str2)
+{
+ return strcmp(str1, str2);
+}
+
+/**
+ * __qdf_str_lcopy() - Copy from one string to another
+ * @dest: destination string
+ * @src: source string
+ * @bytes: limit of num bytes to copy
+ *
+ * @return: 0 returns the initial value of dest
+ */
+static inline uint32_t __qdf_str_lcopy(char *dest, const char *src,
+ uint32_t bytes)
+{
+ return strlcpy(dest, src, bytes);
+}
+
+/**
+ * __qdf_mem_map_nbytes_single - Map memory for DMA
+ * @osdev: pomter OS device context
+ * @buf: pointer to memory to be dma mapped
+ * @dir: DMA map direction
+ * @nbytes: number of bytes to be mapped.
+ * @phy_addr: ponter to recive physical address.
+ *
+ * Return: success/failure
+ */
+static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
+ void *buf, qdf_dma_dir_t dir,
+ int nbytes,
+ uint32_t *phy_addr)
+{
+ /* assume that the OS only provides a single fragment */
+ *phy_addr = dma_map_single(osdev->dev, buf, nbytes, dir);
+ return dma_mapping_error(osdev->dev, *phy_addr) ?
+ QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
+ *
+ * @osdev: pomter OS device context
+ * @phy_addr: physical address of memory to be dma unmapped
+ * @dir: DMA unmap direction
+ * @nbytes: number of bytes to be unmapped.
+ *
+ * @return - none
+ */
+static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
+ uint32_t phy_addr,
+ qdf_dma_dir_t dir, int nbytes)
+{
+ dma_unmap_single(osdev->dev, phy_addr, nbytes, dir);
+}
+#ifdef __KERNEL__
+
+typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
+
+int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
+ size_t pool_entry_size, u_int32_t flags);
+void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
+void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
+void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
+
+#define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size);
+#endif
+
+/**
+ * __qdf_str_len() - returns the length of a string
+ * @str: input string
+ * Return:
+ * length of string
+ */
+static inline int32_t __qdf_str_len(const char *str)
+{
+ return strlen(str);
+}
+
+/**
+ * __qdf_mem_cmp() - memory compare
+ * @memory1: pointer to one location in memory to compare.
+ * @memory2: pointer to second location in memory to compare.
+ * @num_bytes: the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ * Return:
+ * int32_t - returns a bool value that tells if the memory
+ * locations are equal or not equal.
+ * 0 -- equal
+ * < 0 -- *memory1 is less than *memory2
+ * > 0 -- *memory1 is bigger than *memory2
+ */
+static inline int32_t __qdf_mem_cmp(const void *memory1, const void *memory2,
+ uint32_t num_bytes)
+{
+ return (int32_t) memcmp(memory1, memory2, num_bytes);
+}
+
+#endif /* __I_QDF_MEM_H */
diff --git a/qdf/linux/src/i_qdf_module.h b/qdf/linux/src/i_qdf_module.h
new file mode 100644
index 000000000000..795d3659c6cb
--- /dev/null
+++ b/qdf/linux/src/i_qdf_module.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_module.h
+ * Linux-specific definitions for QDF module API's
+ */
+
+#ifndef _I_QDF_MODULE_H
+#define _I_QDF_MODULE_H
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <qdf_types.h>
+
+
+#define __qdf_virt_module_init(_x) \
+ static int _x##_mod(void) \
+ { \
+ uint32_t st; \
+ st = (_x)(); \
+ if (st != QDF_STATUS_SUCCESS) \
+ return QDF_STATUS_E_INVAL; \
+ else \
+ return 0; \
+ } \
+ module_init(_x##_mod);
+
+#define __qdf_virt_module_exit(_x) module_exit(_x)
+
+#define __qdf_virt_module_name(_name) MODULE_LICENSE("Proprietary");
+
+#define __qdf_export_symbol(_sym) EXPORT_SYMBOL(_sym)
+
+#define __qdf_declare_param(_name, _type) \
+ module_param(_name, _type, 0600)
+
+#endif /* _I_QDF_MODULE_H */
diff --git a/qdf/linux/src/i_qdf_nbuf.h b/qdf/linux/src/i_qdf_nbuf.h
new file mode 100644
index 000000000000..cbc798c141c1
--- /dev/null
+++ b/qdf/linux/src/i_qdf_nbuf.h
@@ -0,0 +1,1569 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_nbuf.h
+ * This file provides OS dependent nbuf API's.
+ */
+
+#ifndef _I_QDF_NBUF_H
+#define _I_QDF_NBUF_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_util.h>
+#include <qdf_net_types.h>
+#include <qdf_mem.h>
+#include <linux/tcp.h>
+#include <qdf_util.h>
+
+/*
+ * Use socket buffer as the underlying implentation as skbuf .
+ * Linux use sk_buff to represent both packet and data,
+ * so we use sk_buffer to represent both skbuf .
+ */
+typedef struct sk_buff *__qdf_nbuf_t;
+
+#define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
+
+/* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
+ * max tx fragments added by the driver
+ * The driver will always add one tx fragment (the tx descriptor)
+ */
+#define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
+
+/*
+ * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
+ */
+typedef union {
+ uint64_t u64;
+ qdf_dma_addr_t dma_addr;
+} qdf_paddr_t;
+
+/**
+ * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
+ * - data passed between layers of the driver.
+ *
+ * Notes:
+ * 1. Hard limited to 48 bytes. Please count your bytes
+ * 2. The size of this structure has to be easily calculatable and
+ * consistently so: do not use any conditional compile flags
+ * 3. Split into a common part followed by a tx/rx overlay
+ * 4. There is only one extra frag, which represents the HTC/HTT header
+ *
+ * @common.paddr : physical addressed retrived by dma_map of nbuf->data
+ * @rx.lro_flags : hardware assisted flags:
+ * @rx.lro_eligible : flag to indicate whether the MSDU is LRO eligible
+ * @rx.tcp_proto : L4 protocol is TCP
+ * @rx.tcp_pure_ack : A TCP ACK packet with no payload
+ * @rx.ipv6_proto : L3 protocol is IPV6
+ * @rx.ip_offset : offset to IP header
+ * @rx.tcp_offset : offset to TCP header
+ * @rx.tcp_udp_chksum : L4 payload checksum
+ * @rx.tcp_seq_num : TCP sequence number
+ * @rx.tcp_ack_num : TCP ACK number
+ * @rx.flow_id_toeplitz: 32-bit 5-tuple Toeplitz hash
+ * @tx.extra_frag : represent HTC/HTT header
+ * @tx.efrag.vaddr : virtual address of ~
+ * @tx.efrag.paddr : physical/DMA address of ~
+ * @tx.efrag.len : length of efrag pointed by the above pointers
+ * @tx.efrag.num : number of extra frags ( 0 or 1)
+ * @tx.efrag.flags.nbuf : flag, nbuf payload to be swapped (wordstream)
+ * @tx.efrag.flags.efrag : flag, efrag payload to be swapped (wordstream)
+ * @tx.efrag.flags.chfrag_start: used by WIN
+ * @tx.efrags.flags.chfrag_end: used by WIN
+ * @tx.data_attr : value that is programmed into CE descr, includes:
+ * + (1) CE classification enablement bit
+ * + (2) packet type (802.3 or Ethernet type II)
+ * + (3) packet offset (usually length of HTC/HTT descr)
+ * @tx.trace : combined structure for DP and protocol trace
+ * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
+ * + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
+ * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
+ * @tx.trace.proto_type : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
+ * + (MGMT_ACTION)] - 4 bits
+ * @tx.trace.dp_trace : flag (Datapath trace)
+ * @tx.trace.htt2_frm : flag (high-latency path only)
+ * @tx.trace.vdev_id : vdev (for protocol trace)
+ * @tx.ipa.owned : packet owned by IPA
+ * @tx.ipa.priv : private data, used by IPA
+ */
+struct qdf_nbuf_cb {
+ /* common */
+ qdf_paddr_t paddr; /* of skb->data */
+ /* valid only in one direction */
+ union {
+ /* Note: MAX: 40 bytes */
+ struct {
+ uint32_t lro_eligible:1,
+ tcp_proto:1,
+ tcp_pure_ack:1,
+ ipv6_proto:1,
+ ip_offset:7,
+ tcp_offset:7;
+ uint32_t tcp_udp_chksum:16,
+ tcp_win:16;
+ uint32_t tcp_seq_num;
+ uint32_t tcp_ack_num;
+ uint32_t flow_id_toeplitz;
+ } rx; /* 20 bytes */
+
+ /* Note: MAX: 40 bytes */
+ struct {
+ struct {
+ unsigned char *vaddr;
+ qdf_paddr_t paddr;
+ uint16_t len;
+ union {
+ struct {
+ uint8_t flag_efrag:1,
+ flag_nbuf:1,
+ num:1,
+ flag_chfrag_start:1,
+ flag_chfrag_end:1,
+ reserved:3;
+ } bits;
+ uint8_t u8;
+ } flags;
+ } extra_frag; /* 19 bytes */
+ union {
+ struct {
+ uint8_t ftype;
+ uint32_t submit_ts;
+ void *fctx;
+ void *vdev_ctx;
+ } win; /* 21 bytes*/
+ struct {
+ uint32_t data_attr; /* 4 bytes */
+ union {
+ struct {
+ uint8_t packet_state;
+ uint8_t packet_track:4,
+ proto_type:4;
+ uint8_t dp_trace:1,
+ htt2_frm:1,
+ rsrvd:6;
+ uint8_t vdev_id;
+ } hl;
+ struct {
+ uint8_t packet_state;
+ uint8_t packet_track:4,
+ proto_type:4;
+ uint8_t dp_trace:1,
+ rsrvd:7;
+ uint8_t vdev_id;
+ } ll; /* low latency */
+ } trace; /* 4 bytes */
+ struct {
+ uint32_t owned:1,
+ priv:31;
+ } ipa; /* 4 */
+ } mcl;/* 12 bytes*/
+ } dev;
+ } tx; /* 40 bytes */
+ } u;
+}; /* struct qdf_nbuf_cb: MAX 48 bytes */
+
+/**
+ * access macros to qdf_nbuf_cb
+ * Note: These macros can be used as L-values as well as R-values.
+ * When used as R-values, they effectively function as "get" macros
+ * When used as L_values, they effectively function as "set" macros
+ */
+
+#define QDF_NBUF_CB_PADDR(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
+
+#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
+#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
+#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
+#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
+#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
+#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
+#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
+#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_seq_num)
+#define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_ack_num)
+#define QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id_toeplitz)
+
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.vaddr)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.paddr.dma_addr)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.len)
+#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.num)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.u8)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.extra_frag.flags.bits.flag_chfrag_start)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.extra_frag.flags.bits.flag_chfrag_end)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.extra_frag.flags.bits.flag_efrag)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.extra_frag.flags.bits.flag_nbuf)
+#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.data_attr)
+#define QDF_NBUF_CB_TX_PACKET_STATE(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_state)
+#define QDF_NBUF_CB_TX_PACKET_TRACK(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_track)
+#define QDF_NBUF_CB_TX_PROTO_TYPE(skb) \
+ (((struct qdf_nbuf_cb *) \
+ ((skb)->cb))->u.tx.dev.mcl.trace.ll.proto_type)
+#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
+ qdf_nbuf_set_state(skb, PACKET_STATE)
+#define QDF_NBUF_GET_PACKET_TRACK(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_track)
+#define QDF_NBUF_CB_TX_DP_TRACE(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.dp_trace)
+#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.hl.htt2_frm)
+#define QDF_NBUF_CB_TX_VDEV_ID(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.vdev_id)
+#define QDF_NBUF_CB_TX_IPA_OWNED(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.ipa.owned)
+#define QDF_NBUF_CB_TX_IPA_PRIV(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.ipa.priv)
+#define QDF_NBUF_CB_TX_FTYPE(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.ftype)
+#define QDF_NBUF_CB_TX_SUBMIT_TS(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.submit_ts)
+#define QDF_NBUF_CB_TX_FCTX(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.fctx)
+#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
+ (((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.vdev_ctx)
+
+
+/* assume the OS provides a single fragment */
+#define __qdf_nbuf_get_num_frags(skb) \
+ (QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
+
+#define __qdf_nbuf_reset_num_frags(skb) \
+ do { \
+ QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0; \
+ } while (0)
+
+/**
+ * end of nbuf->cb access macros
+ */
+
+typedef void (*qdf_nbuf_trace_update_t)(char *);
+
+#define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
+
+#define __qdf_nbuf_mapped_paddr_set(skb, paddr) \
+ do { \
+ QDF_NBUF_CB_PADDR(skb) = paddr; \
+ } while (0)
+
+#define __qdf_nbuf_frag_push_head( \
+ skb, frag_len, frag_vaddr, frag_paddr) \
+ do { \
+ QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1; \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr; \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr; \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len; \
+ } while (0)
+
+#define __qdf_nbuf_get_frag_vaddr(skb, frag_num) \
+ ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
+
+#define __qdf_nbuf_get_frag_vaddr_always(skb) \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
+
+#define __qdf_nbuf_get_frag_paddr(skb, frag_num) \
+ ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) : \
+ /* assume that the OS only provides a single fragment */ \
+ QDF_NBUF_CB_PADDR(skb))
+
+#define __qdf_nbuf_get_frag_len(skb, frag_num) \
+ ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ? \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
+
+#define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
+ ((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
+ ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb)) \
+ : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
+
+#define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm) \
+ do { \
+ if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) \
+ frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS; \
+ if (frag_num) \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = \
+ is_wstrm; \
+ else \
+ QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = \
+ is_wstrm; \
+ } while (0)
+
+#define __qdf_nbuf_set_vdev_ctx(skb, vdev_ctx) \
+ do { \
+ QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_ctx); \
+ } while (0)
+
+#define __qdf_nbuf_get_vdev_ctx(skb) \
+ QDF_NBUF_CB_TX_VDEV_CTX((skb))
+
+#define __qdf_nbuf_set_fctx_type(skb, ctx, type) \
+ do { \
+ QDF_NBUF_CB_TX_FCTX((skb)) = (ctx); \
+ QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
+ } while (0)
+
+#define __qdf_nbuf_get_fctx(skb) \
+ QDF_NBUF_CB_TX_FCTX((skb))
+
+#define __qdf_nbuf_get_ftype(skb) \
+ QDF_NBUF_CB_TX_FTYPE((skb))
+
+#define __qdf_nbuf_set_chfrag_start(skb, val) \
+ do { \
+ (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val; \
+ } while (0)
+
+#define __qdf_nbuf_is_chfrag_start(skb) \
+ (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
+
+#define __qdf_nbuf_set_chfrag_end(skb, val) \
+ do { \
+ (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val; \
+ } while (0)
+
+#define __qdf_nbuf_is_chfrag_end(skb) \
+ (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
+
+#define __qdf_nbuf_trace_set_proto_type(skb, proto_type) \
+ do { \
+ QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type); \
+ } while (0)
+
+#define __qdf_nbuf_trace_get_proto_type(skb) \
+ QDF_NBUF_CB_TX_PROTO_TYPE(skb)
+
+#define __qdf_nbuf_data_attr_get(skb) \
+ QDF_NBUF_CB_TX_DATA_ATTR(skb)
+#define __qdf_nbuf_data_attr_set(skb, data_attr) \
+ do { \
+ QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr); \
+ } while (0)
+
+#define __qdf_nbuf_ipa_owned_get(skb) \
+ QDF_NBUF_CB_TX_IPA_OWNED(skb)
+
+#define __qdf_nbuf_ipa_owned_set(skb) \
+ do { \
+ QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1; \
+ } while (0)
+
+#define __qdf_nbuf_ipa_priv_get(skb) \
+ QDF_NBUF_CB_TX_IPA_PRIV(skb)
+
+#define __qdf_nbuf_ipa_priv_set(skb, priv) \
+ do { \
+ QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv); \
+ } while (0)
+
+/**
+ * __qdf_nbuf_num_frags_init() - init extra frags
+ * @skb: sk buffer
+ *
+ * Return: none
+ */
+static inline
+void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
+{
+ QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
+}
+
+typedef enum {
+ CB_FTYPE_MCAST2UCAST = 1,
+ CB_FTYPE_TSO = 2,
+ CB_FTYPE_TSO_SG = 3,
+ CB_FTYPE_SG = 4,
+} CB_FTYPE;
+
+/*
+ * prototypes. Implemented in qdf_nbuf.c
+ */
+__qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
+ int align, int prio);
+void __qdf_nbuf_free(struct sk_buff *skb);
+QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
+ struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_unmap(__qdf_device_t osdev,
+ struct sk_buff *skb, qdf_dma_dir_t dir);
+QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
+ struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
+ struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
+
+QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
+void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
+void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
+QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
+ qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
+ qdf_dma_dir_t dir, int nbytes);
+#ifndef REMOVE_INIT_DEBUG_CODE
+void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
+ qdf_dma_dir_t dir);
+#endif
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_unmap_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
+uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
+void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg);
+QDF_STATUS __qdf_nbuf_frag_map(
+ qdf_device_t osdev, __qdf_nbuf_t nbuf,
+ int offset, qdf_dma_dir_t dir, int cur_frag);
+QDF_STATUS
+__qdf_nbuf_get_vlan_info(qdf_net_handle_t hdl, struct sk_buff *skb,
+ qdf_net_vlanhdr_t *vlan);
+
+
+#ifdef QCA_PKT_PROTO_TRACE
+void __qdf_nbuf_trace_update(struct sk_buff *buf, char *event_string);
+#else
+#define __qdf_nbuf_trace_update(skb, event_string)
+#endif /* QCA_PKT_PROTO_TRACE */
+
+/**
+ * __qdf_to_status() - OS to QDF status conversion
+ * @error : OS error
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_to_status(signed int error)
+{
+ switch (error) {
+ case 0:
+ return QDF_STATUS_SUCCESS;
+ case ENOMEM:
+ case -ENOMEM:
+ return QDF_STATUS_E_NOMEM;
+ default:
+ return QDF_STATUS_E_NOSUPPORT;
+ }
+}
+
+/**
+ * __qdf_nbuf_len() - return the amount of valid data in the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the amount of valid data in the skb, If there are frags
+ * then it returns total length.
+ *
+ * Return: network buffer length
+ */
+static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
+{
+ int i, extra_frag_len = 0;
+
+ i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
+ if (i > 0)
+ extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
+
+ return extra_frag_len + skb->len;
+}
+
+/**
+ * __qdf_nbuf_cat() - link two nbufs
+ * @dst: Buffer to piggyback into
+ * @src: Buffer to put
+ *
+ * Link tow nbufs the new buf is piggybacked into the older one. The older
+ * (src) skb is released.
+ *
+ * Return: QDF_STATUS (status of the call) if failed the src skb
+ * is released
+ */
+static inline QDF_STATUS
+__qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
+{
+ QDF_STATUS error = 0;
+
+ qdf_assert(dst && src);
+
+ /*
+ * Since pskb_expand_head unconditionally reallocates the skb->head
+ * buffer, first check whether the current buffer is already large
+ * enough.
+ */
+ if (skb_tailroom(dst) < src->len) {
+ error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
+ if (error)
+ return __qdf_to_status(error);
+ }
+ memcpy(skb_tail_pointer(dst), src->data, src->len);
+
+ skb_put(dst, src->len);
+ dev_kfree_skb_any(src);
+
+ return __qdf_to_status(error);
+}
+
+/*
+ * nbuf manipulation routines
+ */
+/**
+ * __qdf_nbuf_headroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
+{
+ return skb_headroom(skb);
+}
+
+/**
+ * __qdf_nbuf_tailroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
+{
+ return skb_tailroom(skb);
+}
+
+/**
+ * __qdf_nbuf_push_head() - Push data in the front
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: New data pointer of this buf after data has been pushed,
+ * or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
+{
+ if (QDF_NBUF_CB_PADDR(skb))
+ QDF_NBUF_CB_PADDR(skb) -= size;
+
+ return skb_push(skb, size);
+}
+
+/**
+ * __qdf_nbuf_put_tail() - Puts data in the end
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: data pointer of this buf where new data has to be
+ * put, or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
+{
+ if (skb_tailroom(skb) < size) {
+ if (unlikely(pskb_expand_head(skb, 0,
+ size - skb_tailroom(skb), GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ }
+ return skb_put(skb, size);
+}
+
+/**
+ * __qdf_nbuf_pull_head() - pull data out from the front
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: New data pointer of this buf after data has been popped,
+ * or NULL if there is not sufficient data to pull.
+ */
+static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
+{
+ if (QDF_NBUF_CB_PADDR(skb))
+ QDF_NBUF_CB_PADDR(skb) += size;
+
+ return skb_pull(skb, size);
+}
+
+/**
+ * __qdf_nbuf_trim_tail() - trim data out from the end
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
+{
+ return skb_trim(skb, skb->len - size);
+}
+
+
+/*
+ * prototypes. Implemented in qdf_nbuf.c
+ */
+qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
+QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
+ qdf_nbuf_rx_cksum_t *cksum);
+uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
+void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
+uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
+void __qdf_nbuf_ref(struct sk_buff *skb);
+int __qdf_nbuf_shared(struct sk_buff *skb);
+
+/*
+ * qdf_nbuf_pool_delete() implementation - do nothing in linux
+ */
+#define __qdf_nbuf_pool_delete(osdev)
+
+/**
+ * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
+ * @skb: Pointer to network buffer
+ *
+ * if GFP_ATOMIC is overkill then we can check whether its
+ * called from interrupt context and then do it or else in
+ * normal case use GFP_KERNEL
+ *
+ * example use "in_irq() || irqs_disabled()"
+ *
+ * Return: cloned skb
+ */
+static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
+{
+ return skb_clone(skb, GFP_ATOMIC);
+}
+
+/**
+ * __qdf_nbuf_copy() - returns a private copy of the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns a private copy of the skb, the skb returned is completely
+ * modifiable by callers
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
+{
+ return skb_copy(skb, GFP_ATOMIC);
+}
+
+#define __qdf_nbuf_reserve skb_reserve
+
+
+/**
+ * __qdf_nbuf_head() - return the pointer the skb's head pointer
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to head buffer
+ */
+static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
+{
+ return skb->head;
+}
+
+/**
+ * __qdf_nbuf_data() - return the pointer to data header in the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to skb data
+ */
+static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+/**
+ * __qdf_nbuf_get_protocol() - return the protocol value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb protocol
+ */
+static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
+{
+ return skb->protocol;
+}
+
+/**
+ * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb ip_summed
+ */
+static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
+{
+ return skb->ip_summed;
+}
+
+/**
+ * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
+ * @skb: Pointer to network buffer
+ * @ip_summed: ip checksum
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
+ uint8_t ip_summed)
+{
+ skb->ip_summed = ip_summed;
+}
+
+/**
+ * __qdf_nbuf_get_priority() - return the priority value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb priority
+ */
+static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
+{
+ return skb->priority;
+}
+
+/**
+ * __qdf_nbuf_set_priority() - sets the priority value of the skb
+ * @skb: Pointer to network buffer
+ * @p: priority
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
+{
+ skb->priority = p;
+}
+
+/**
+ * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * Return: void
+ */
+static inline void
+__qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+ skb->next = skb_next;
+}
+
+/**
+ * __qdf_nbuf_next() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
+{
+ return skb->next;
+}
+
+/**
+ * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * This fn is used to link up extensions to the head skb. Does not handle
+ * linking to the head
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+ skb->next = skb_next;
+}
+
+/**
+ * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
+{
+ return skb->next;
+}
+
+/**
+ * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
+ * @skb_head: head_buf nbuf holding head segment (single)
+ * @ext_list: nbuf list holding linked extensions to the head
+ * @ext_len: Total length of all buffers in the extension list
+ *
+ * This function is used to link up a list of packet extensions (seg1, 2,* ...)
+ * to the nbuf holding the head segment (seg0)
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
+ struct sk_buff *ext_list, size_t ext_len)
+{
+ skb_shinfo(skb_head)->frag_list = ext_list;
+ skb_head->data_len = ext_len;
+ skb_head->len += skb_head->data_len;
+}
+
+/**
+ * __qdf_nbuf_tx_free() - free skb list
+ * @skb: Pointer to network buffer
+ * @tx_err: TX error
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_tx_free(struct sk_buff *bufs, int tx_err)
+{
+ while (bufs) {
+ struct sk_buff *next = __qdf_nbuf_next(bufs);
+ __qdf_nbuf_free(bufs);
+ bufs = next;
+ }
+}
+
+/**
+ * __qdf_nbuf_get_age() - return the checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: checksum value
+ */
+static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
+{
+ return skb->csum;
+}
+
+/**
+ * __qdf_nbuf_set_age() - sets the checksum value of the skb
+ * @skb: Pointer to network buffer
+ * @v: Value
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
+{
+ skb->csum = v;
+}
+
+/**
+ * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
+ * @skb: Pointer to network buffer
+ * @adj: Adjustment value
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
+{
+ skb->csum -= adj;
+}
+
+/**
+ * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
+ * @skb: Pointer to network buffer
+ * @offset: Offset value
+ * @len: Length
+ * @to: Destination pointer
+ *
+ * Return: length of the copy bits for skb
+ */
+static inline int32_t
+__qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
+{
+ return skb_copy_bits(skb, offset, to, len);
+}
+
+/**
+ * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
+ * @skb: Pointer to network buffer
+ * @len: Packet length
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if (unlikely(pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ qdf_assert(0);
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+}
+
+/**
+ * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
+ * @skb: Pointer to network buffer
+ * @protocol: Protocol type
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
+{
+ skb->protocol = protocol;
+}
+
+#define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
+ do { \
+ QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi); \
+ } while (0)
+
+#define __qdf_nbuf_get_tx_htt2_frm(skb) \
+ QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
+
+uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
+ struct qdf_tso_info_t *tso_info);
+
+uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
+
+static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb) &&
+ (skb_is_gso_v6(skb) ||
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
+ return true;
+ else
+ return false;
+}
+
+struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
+
+/**
+ * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
+ * and get hw_classify by peeking
+ * into packet
+ * @nbuf: Network buffer (skb on Linux)
+ * @pkt_type: Pkt type (from enum htt_pkt_type)
+ * @pkt_subtype: Bit 4 of this field in HTT descriptor
+ * needs to be set in case of CE classification support
+ * Is set by this macro.
+ * @hw_classify: This is a flag which is set to indicate
+ * CE classification is enabled.
+ * Do not set this bit for VLAN packets
+ * OR for mcast / bcast frames.
+ *
+ * This macro parses the payload to figure out relevant Tx meta-data e.g.
+ * whether to enable tx_classify bit in CE.
+ *
+ * Overrides pkt_type only if required for 802.3 frames (original ethernet)
+ * If protocol is less than ETH_P_802_3_MIN (0x600), then
+ * it is the length and a 802.3 frame else it is Ethernet Type II
+ * (RFC 894).
+ * Bit 4 in pkt_subtype is the tx_classify bit
+ *
+ * Return: void
+ */
+#define __qdf_nbuf_tx_info_get(skb, pkt_type, \
+ pkt_subtype, hw_classify) \
+do { \
+ struct ethhdr *eh = (struct ethhdr *)skb->data; \
+ uint16_t ether_type = ntohs(eh->h_proto); \
+ bool is_mc_bc; \
+ \
+ is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) || \
+ is_multicast_ether_addr((uint8_t *)eh); \
+ \
+ if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) { \
+ hw_classify = 1; \
+ pkt_subtype = 0x01 << \
+ HTT_TX_CLASSIFY_BIT_S; \
+ } \
+ \
+ if (unlikely(ether_type < ETH_P_802_3_MIN)) \
+ pkt_type = htt_pkt_type_ethernet; \
+ \
+} while (0)
+
+/**
+ * nbuf private buffer routines
+ */
+
+/**
+ * __qdf_nbuf_peek_header() - return the header's addr & m_len
+ * @skb: Pointer to network buffer
+ * @addr: Pointer to store header's addr
+ * @m_len: network buffer length
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
+{
+ *addr = skb->data;
+ *len = skb->len;
+}
+
+/**
+ * typedef struct __qdf_nbuf_queue_t - network buffer queue
+ * @head: Head pointer
+ * @tail: Tail pointer
+ * @qlen: Queue length
+ */
+typedef struct __qdf_nbuf_qhead {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ unsigned int qlen;
+} __qdf_nbuf_queue_t;
+
+/******************Functions *************/
+
+/**
+ * __qdf_nbuf_queue_init() - initiallize the queue head
+ * @qhead: Queue head
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
+{
+ memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it
+ * needs to synchronize
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
+{
+ skb->next = NULL; /*Nullify the next ptr */
+
+ if (!qhead->head)
+ qhead->head = skb;
+ else
+ qhead->tail->next = skb;
+
+ qhead->tail = skb;
+ qhead->qlen++;
+}
+
+/**
+ * __qdf_nbuf_queue_append() - Append src list at the end of dest list
+ * @dest: target netbuf queue
+ * @src: source netbuf queue
+ *
+ * Return: target netbuf queue
+ */
+static inline __qdf_nbuf_queue_t *
+__qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
+{
+ if (!dest)
+ return NULL;
+ else if (!src || !(src->head))
+ return dest;
+
+ if (!(dest->head))
+ dest->head = src->head;
+ else
+ dest->tail->next = src->head;
+
+ dest->tail = src->tail;
+ dest->qlen += src->qlen;
+ return dest;
+}
+
+/**
+ * __qdf_nbuf_queue_insert_head() - add an skb at the head of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it needs to
+ * synchronize
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
+{
+ if (!qhead->head) {
+ /*Empty queue Tail pointer Must be updated */
+ qhead->tail = skb;
+ }
+ skb->next = qhead->head;
+ qhead->head = skb;
+ qhead->qlen++;
+}
+
+/**
+ * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
+ * @qhead: Queue head
+ *
+ * This is a lockless version. Driver should take care of the locks
+ *
+ * Return: skb or NULL
+ */
+static inline
+struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
+{
+ __qdf_nbuf_t tmp = NULL;
+
+ if (qhead->head) {
+ qhead->qlen--;
+ tmp = qhead->head;
+ if (qhead->head == qhead->tail) {
+ qhead->head = NULL;
+ qhead->tail = NULL;
+ } else {
+ qhead->head = tmp->next;
+ }
+ tmp->next = NULL;
+ }
+ return tmp;
+}
+
+/**
+ * __qdf_nbuf_queue_free() - free a queue
+ * @qhead: head of queue
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS
+__qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
+{
+ __qdf_nbuf_t buf = NULL;
+
+ while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
+ __qdf_nbuf_free(buf);
+ return QDF_STATUS_SUCCESS;
+}
+
+
+/**
+ * __qdf_nbuf_queue_first() - returns the first skb in the queue
+ * @qhead: head of queue
+ *
+ * Return: NULL if the queue is empty
+ */
+static inline struct sk_buff *
+__qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
+{
+ return qhead->head;
+}
+
+/**
+ * __qdf_nbuf_queue_len() - return the queue length
+ * @qhead: Queue head
+ *
+ * Return: Queue length
+ */
+static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
+{
+ return qhead->qlen;
+}
+
+/**
+ * __qdf_nbuf_queue_next() - return the next skb from packet chain
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the next skb from packet chain, remember the skb is
+ * still in the queue
+ *
+ * Return: NULL if no packets are there
+ */
+static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
+{
+ return skb->next;
+}
+
+/**
+ * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
+ * @qhead: Queue head
+ *
+ * Return: true if length is 0 else false
+ */
+static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
+{
+ return qhead->qlen == 0;
+}
+
+/*
+ * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+/*
+ * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+static inline void
+__qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
+{
+ return;
+}
+
+/**
+ * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
+ * expands the headroom
+ * in the data region. In case of failure the skb is released.
+ * @skb: sk buff
+ * @headroom: size of headroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
+{
+ if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+ return skb;
+}
+
+/**
+ * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
+ * exapnds the tailroom
+ * in data region. In case of failure it releases the skb.
+ * @skb: sk buff
+ * @tailroom: size of tailroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
+{
+ if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
+ return skb;
+ /**
+ * unlikely path
+ */
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+/**
+ * __qdf_nbuf_unshare() - skb unshare
+ * @skb: sk buff
+ *
+ * create a version of the specified nbuf whose contents
+ * can be safely modified without affecting other
+ * users.If the nbuf is a clone then this function
+ * creates a new copy of the data. If the buffer is not
+ * a clone the original buffer is returned.
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_unshare(struct sk_buff *skb)
+{
+ return skb_unshare(skb, GFP_ATOMIC);
+}
+
+/**
+ * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
+ *@buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
+{
+ return skb_cloned(skb);
+}
+
+/**
+ * __qdf_nbuf_pool_init() - init pool
+ * @net: net handle
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
+{
+ return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * adf_nbuf_pool_delete() implementation - do nothing in linux
+ */
+#define __qdf_nbuf_pool_delete(osdev)
+
+/**
+ * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
+ * release the skb.
+ * @skb: sk buff
+ * @headroom: size of headroom
+ * @tailroom: size of tailroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
+{
+ if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
+ return skb;
+
+ dev_kfree_skb_any(skb);
+ return NULL;
+}
+
+/**
+ * __qdf_nbuf_tx_cksum_info() - tx checksum info
+ *
+ * Return: true/false
+ */
+static inline bool
+__qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
+ uint8_t **where)
+{
+ qdf_assert(0);
+ return false;
+}
+
+/**
+ * __qdf_nbuf_reset_ctxt() - mem zero control block
+ * @nbuf: buffer
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
+{
+ qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
+}
+
+/**
+ * __qdf_nbuf_network_header() - get network header
+ * @buf: buffer
+ *
+ * Return: network header pointer
+ */
+static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
+{
+ return skb_network_header(buf);
+}
+
+/**
+ * __qdf_nbuf_transport_header() - get transport header
+ * @buf: buffer
+ *
+ * Return: transport header pointer
+ */
+static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
+{
+ return skb_transport_header(buf);
+}
+
+/**
+ * __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
+ * passed as part of network buffer by network stack
+ * @skb: sk buff
+ *
+ * Return: TCP MSS size
+ * */
+static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+
+/**
+ * __qdf_nbuf_init() - Re-initializes the skb for re-use
+ * @nbuf: sk buff
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
+{
+ atomic_set(&nbuf->users, 1);
+ nbuf->data = nbuf->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(nbuf);
+}
+
+/**
+ * __qdf_nbuf_set_rx_info() - set rx info
+ * @nbuf: sk buffer
+ * @info: rx info
+ * @len: length
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_rx_info(__qdf_nbuf_t nbuf, void *info, uint32_t len)
+{
+ /* Customer may have skb->cb size increased, e.g. to 96 bytes,
+ * then len's large enough to save the rs status info struct
+ */
+ uint8_t offset = sizeof(struct qdf_nbuf_cb);
+ uint32_t max = sizeof(((struct sk_buff *)0)->cb)-offset;
+
+ len = (len > max ? max : len);
+
+ memcpy(((uint8_t *)(nbuf->cb) + offset), info, len);
+}
+
+/**
+ * __qdf_nbuf_get_rx_info() - get rx info
+ * @nbuf: sk buffer
+ *
+ * Return: rx_info
+ */
+static inline void *
+__qdf_nbuf_get_rx_info(__qdf_nbuf_t nbuf)
+{
+ uint8_t offset = sizeof(struct qdf_nbuf_cb);
+ return (void *)((uint8_t *)(nbuf->cb) + offset);
+}
+
+/*
+ * __qdf_nbuf_get_cb() - returns a pointer to skb->cb
+ * @nbuf: sk buff
+ *
+ * Return: void ptr
+ */
+static inline void *
+__qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
+{
+ return (void *)nbuf->cb;
+}
+
+/**
+ * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
+ * @skb: sk buff
+ *
+ * Return: head size
+ */
+static inline size_t
+__qdf_nbuf_headlen(struct sk_buff *skb)
+{
+ return skb_headlen(skb);
+}
+
+/**
+ * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
+ * @skb: sk buff
+ *
+ * Return: number of fragments
+ */
+static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->nr_frags;
+}
+
+/**
+ * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
+ * @buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
+}
+
+/**
+ * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
+ * @buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
+}
+
+/**
+ * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr lenght of the skb
+ * @skb: sk buff
+ *
+ * Return: size of l2+l3+l4 header length
+ */
+static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
+{
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
+/**
+ * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
+ * @buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
+{
+ if (skb_is_nonlinear(skb))
+ return true;
+ else
+ return false;
+}
+
+/**
+ * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the skb
+ * @buf: sk buff
+ *
+ * Return: TCP sequence number
+ */
+static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
+{
+ return ntohl(tcp_hdr(skb)->seq);
+}
+
+/**
+ * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
+ *@buf: sk buff
+ *
+ * Return: data pointer to typecast into your priv structure
+ */
+static inline uint8_t *
+__qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
+{
+ return &skb->cb[8];
+}
+
+#endif /*_I_QDF_NET_BUF_H */
diff --git a/qdf/linux/src/i_qdf_net_types.h b/qdf/linux/src/i_qdf_net_types.h
new file mode 100644
index 000000000000..0ec7d75fba96
--- /dev/null
+++ b/qdf/linux/src/i_qdf_net_types.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_net_types
+ * This file provides OS dependent net types API's.
+ */
+
+#ifndef _I_QDF_NET_TYPES_H
+#define _I_QDF_NET_TYPES_H
+
+#include <qdf_types.h> /* uint8_t, etc. */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+typedef struct in6_addr __in6_addr_t;
+typedef __wsum __wsum_t;
+
+static inline int32_t __qdf_csum_ipv6(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum sum)
+{
+ return csum_ipv6_magic((struct in6_addr *)saddr,
+ (struct in6_addr *)daddr, len, proto, sum);
+}
+
+#endif /* _I_QDF_NET_TYPES_H */
diff --git a/qdf/linux/src/i_qdf_perf.h b/qdf/linux/src/i_qdf_perf.h
new file mode 100644
index 000000000000..4471860d96c4
--- /dev/null
+++ b/qdf/linux/src/i_qdf_perf.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_perf
+ * This file provides OS dependent perf API's.
+ */
+
+#ifndef _I_QDF_PERF_H
+#define _I_QDF_PERF_H
+
+#ifdef QCA_PERF_PROFILING
+
+#if (QCA_MIPS74K_PERF_PROFILING || QCA_MIPS24KK_PERF_PROFILING)
+#include <qdf_mips_perf_pvt.h>
+#endif
+
+/* #defines required for structures */
+#define MAX_SAMPLES_SHIFT 5 /* change this only*/
+#define MAX_SAMPLES (1 << MAX_SAMPLES_SHIFT)
+#define INC_SAMPLES(x) ((x + 1) & (MAX_SAMPLES - 1))
+#define MAX_SAMPLE_SZ (sizeof(uint32_t) * MAX_SAMPLES)
+#define PER_SAMPLE_SZ sizeof(uint32_t)
+
+/**
+ * typedef qdf_perf_entry_t - performance entry
+ * @list: pointer to next
+ * @child: pointer tochild
+ * @parent: pointer to top
+ * @type: perf cntr
+ * @name: string
+ * @proc: pointer to proc entry
+ * @start_tsc: array at start tsc
+ * @end_tsc: array at ent tsc
+ * @samples: array of samples
+ * @sample_idx: sample index
+ * @lock_irq: lock irq
+ */
+typedef struct qdf_os_perf_entry {
+ struct list_head list;
+ struct list_head child;
+
+ struct qdf_perf_entry *parent;
+
+ qdf_perf_cntr_t type;
+ uint8_t *name;
+
+ struct proc_dir_entry *proc;
+
+ uint64_t start_tsc[MAX_SAMPLES];
+ uint64_t end_tsc[MAX_SAMPLES];
+
+ uint32_t samples[MAX_SAMPLES];
+ uint32_t sample_idx;
+
+ spinlock_t lock_irq;
+
+} qdf_perf_entry_t;
+
+/* typedefs */
+typedef void *__qdf_perf_id_t;
+
+#endif /* QCA_PERF_PROFILING */
+#endif /* _I_QDF_PERF_H */
diff --git a/qdf/linux/src/i_qdf_time.h b/qdf/linux/src/i_qdf_time.h
new file mode 100644
index 000000000000..6623a3dce6fd
--- /dev/null
+++ b/qdf/linux/src/i_qdf_time.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_time
+ * This file provides OS dependent time API's.
+ */
+
+#ifndef _I_QDF_TIME_H
+#define _I_QDF_TIME_H
+
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <asm/arch_timer.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+
+typedef unsigned long __qdf_time_t;
+
+/**
+ * __qdf_system_ticks() - get system ticks
+ *
+ * Return: system tick in jiffies
+ */
+static inline __qdf_time_t __qdf_system_ticks(void)
+{
+ return jiffies;
+}
+
+/**
+ * __qdf_system_ticks_to_msecs() - convert system ticks into milli seconds
+ * @ticks: System ticks
+ *
+ * Return: system tick converted into milli seconds
+ */
+static inline uint32_t __qdf_system_ticks_to_msecs(unsigned long ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+/**
+ * __qdf_system_msecs_to_ticks() - convert milli seconds into system ticks
+ * @msecs: Milli seconds
+ *
+ * Return: milli seconds converted into system ticks
+ */
+static inline __qdf_time_t __qdf_system_msecs_to_ticks(uint32_t msecs)
+{
+ return msecs_to_jiffies(msecs);
+}
+
+/**
+ * __qdf_get_system_uptime() - get system uptime
+ *
+ * Return: system uptime in jiffies
+ */
+static inline __qdf_time_t __qdf_get_system_uptime(void)
+{
+ return jiffies;
+}
+
+static inline __qdf_time_t __qdf_get_system_timestamp(void)
+{
+ return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ);
+}
+
+#ifdef CONFIG_ARM
+/**
+ * __qdf_udelay() - delay execution for given microseconds
+ * @usecs: Micro seconds to delay
+ *
+ * Return: none
+ */
+static inline void __qdf_udelay(uint32_t usecs)
+{
+ /*
+ * This is in support of XScale build. They have a limit on the udelay
+ * value, so we have to make sure we don't approach the limit
+ */
+ uint32_t mticks;
+ uint32_t leftover;
+ int i;
+ /* slice into 1024 usec chunks (simplifies calculation) */
+ mticks = usecs >> 10;
+ leftover = usecs - (mticks << 10);
+ for (i = 0; i < mticks; i++)
+ udelay(1024);
+ udelay(leftover);
+}
+#else
+static inline void __qdf_udelay(uint32_t usecs)
+{
+ /* Normal Delay functions. Time specified in microseconds */
+ udelay(usecs);
+}
+#endif
+
+/**
+ * __qdf_mdelay() - delay execution for given milliseconds
+ * @usecs: Milliseconds to delay
+ *
+ * Return: none
+ */
+static inline void __qdf_mdelay(uint32_t msecs)
+{
+ mdelay(msecs);
+}
+
+/**
+ * __qdf_system_time_after() - Check if a is later than b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a < b else false
+ */
+static inline bool __qdf_system_time_after(__qdf_time_t a, __qdf_time_t b)
+{
+ return (long)(b) - (long)(a) < 0;
+}
+
+/**
+ * __qdf_system_time_before() - Check if a is before b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a is before b else false
+ */
+static inline bool __qdf_system_time_before(__qdf_time_t a, __qdf_time_t b)
+{
+ return __qdf_system_time_after(b, a);
+}
+
+/**
+ * __qdf_system_time_after_eq() - Check if a atleast as recent as b, if not
+ * later
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a >= b else false
+ */
+static inline bool __qdf_system_time_after_eq(__qdf_time_t a, __qdf_time_t b)
+{
+ return (long)(a) - (long)(b) >= 0;
+}
+
+/**
+ * __qdf_get_monotonic_boottime() - get monotonic kernel boot time
+ * This API is similar to qdf_get_system_boottime but it includes
+ * time spent in suspend.
+ *
+ * Return: Time in microseconds
+ */
+#ifdef CONFIG_CNSS
+static inline uint64_t __qdf_get_monotonic_boottime(void)
+{
+ struct timespec ts;
+
+ cnss_get_monotonic_boottime(&ts);
+
+ return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+}
+#else
+static inline uint64_t __qdf_get_monotonic_boottime(void)
+{
+ return __qdf_system_ticks_to_msecs(__qdf_system_ticks()) * 1000;
+}
+#endif /* CONFIG_CNSS */
+
+#ifdef QCA_WIFI_3_0_ADRASTEA
+
+/**
+ * __qdf_get_log_timestamp() - get QTIMER ticks
+ *
+ * Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds
+ * divide it by 19200.
+ *
+ * Return: QTIMER(19.2 MHz) clock ticks
+ */
+static inline uint64_t __qdf_get_log_timestamp(void)
+{
+ return arch_counter_get_cntpct();
+}
+#else
+
+/**
+ * __qdf_get_log_timestamp - get time stamp for logging
+ * For adrastea this API returns QTIMER tick which is needed to synchronize
+ * host and fw log timestamps
+ * For ROME and other discrete solution this API returns system boot time stamp
+ *
+ * Return:
+ * QTIMER ticks(19.2MHz) for adrastea
+ * System tick for rome and other future discrete solutions
+ */
+static inline uint64_t __qdf_get_log_timestamp(void)
+{
+#ifdef CONFIG_CNSS
+ struct timespec ts;
+
+ cnss_get_boottime(&ts);
+
+ return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+#else
+ return __qdf_system_ticks_to_msecs(__qdf_system_ticks()) * 1000;
+#endif /* CONFIG_CNSS */
+}
+#endif /* QCA_WIFI_3_0_ADRASTEA */
+
+#endif
diff --git a/qdf/linux/src/i_qdf_timer.h b/qdf/linux/src/i_qdf_timer.h
new file mode 100644
index 000000000000..7909015627d1
--- /dev/null
+++ b/qdf/linux/src/i_qdf_timer.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_timer
+ * This file provides OS dependent timer API's.
+ */
+
+#ifndef _I_QDF_TIMER_H
+#define _I_QDF_TIMER_H
+
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <qdf_types.h>
+
+/* timer data type */
+typedef struct timer_list __qdf_timer_t;
+
+typedef void (*qdf_dummy_timer_func_t)(unsigned long arg);
+
+/**
+ * __qdf_timer_init() - initialize a softirq timer
+ * @hdl: OS handle
+ * @timer: Pointer to timer object
+ * @func: Function pointer
+ * @arg: Arguement
+ * @type: deferrable or non deferrable timer type
+ *
+ * Timer type QDF_TIMER_TYPE_SW means its a deferrable sw timer which will
+ * not cause CPU wake upon expiry
+ * Timer type QDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
+ * will cause CPU wake up on expiry
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS __qdf_timer_init(qdf_handle_t hdl,
+ struct timer_list *timer,
+ qdf_timer_func_t func, void *arg,
+ QDF_TIMER_TYPE type)
+{
+ if (QDF_TIMER_TYPE_SW == type)
+ init_timer_deferrable(timer);
+ else
+ init_timer(timer);
+ timer->function = (qdf_dummy_timer_func_t) func;
+ timer->data = (unsigned long)arg;
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_timer_start() - start a qdf softirq timer
+ * @timer: Pointer to timer object
+ * @delay: Delay in milli seconds
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS __qdf_timer_start(struct timer_list *timer,
+ uint32_t delay)
+{
+ timer->expires = jiffies + msecs_to_jiffies(delay);
+ add_timer(timer);
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_timer_mod() - modify a timer
+ * @timer: Pointer to timer object
+ * @delay: Delay in milli seconds
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS __qdf_timer_mod(struct timer_list *timer,
+ uint32_t delay)
+{
+ mod_timer(timer, jiffies + msecs_to_jiffies(delay));
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_timer_stop() - cancel a timer
+ * @timer: Pointer to timer object
+ *
+ * Return: true if timer was cancelled and deactived,
+ * false if timer was cancelled but already got fired.
+ */
+static inline bool __qdf_timer_stop(struct timer_list *timer)
+{
+ if (likely(del_timer(timer)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * __qdf_timer_free() - free a qdf timer
+ * @timer: Pointer to timer object
+ *
+ * Return: true if timer was cancelled and deactived,
+ * false if timer was cancelled but already got fired.
+ */
+static inline void __qdf_timer_free(struct timer_list *timer)
+{
+ del_timer_sync(timer);
+}
+
+/**
+ * __qdf_sostirq_timer_sync_cancel() - Synchronously canel a timer
+ * @timer: Pointer to timer object
+ *
+ * Synchronization Rules:
+ * 1. caller must make sure timer function will not use
+ * qdf_set_timer to add iteself again.
+ * 2. caller must not hold any lock that timer function
+ * is likely to hold as well.
+ * 3. It can't be called from interrupt context.
+ *
+ * Return: true if timer was cancelled and deactived,
+ * false if timer was cancelled but already got fired.
+ */
+static inline bool __qdf_timer_sync_cancel(struct timer_list *timer)
+{
+ return del_timer_sync(timer);
+}
+
+#endif /*_QDF_TIMER_PVT_H*/
diff --git a/qdf/linux/src/i_qdf_trace.h b/qdf/linux/src/i_qdf_trace.h
new file mode 100644
index 000000000000..6eb603bee4e7
--- /dev/null
+++ b/qdf/linux/src/i_qdf_trace.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_trace.h
+ *
+ * Linux-specific definitions for QDF trace
+ *
+ */
+
+#if !defined(__I_QDF_TRACE_H)
+#define __I_QDF_TRACE_H
+
+#if !defined(__printf)
+#define __printf(a, b)
+#endif
+
+/* Include Files */
+#include <cds_packet.h>
+
+#define QDF_ENABLE_TRACING
+
+#ifdef QDF_ENABLE_TRACING
+
+#define QDF_ASSERT(_condition) \
+ do { \
+ if (!(_condition)) { \
+ pr_err("QDF ASSERT in %s Line %d\n", \
+ __func__, __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+
+#else
+
+/* This code will be used for compilation if tracing is to be compiled out */
+/* of the code so these functions/macros are 'do nothing' */
+static inline void qdf_trace_msg(QDF_MODULE_ID module, ...)
+{
+}
+
+#define QDF_ASSERT(_condition)
+
+#endif
+
+#ifdef PANIC_ON_BUG
+
+#define QDF_BUG(_condition) \
+ do { \
+ if (!(_condition)) { \
+ pr_err("QDF BUG in %s Line %d\n", \
+ __func__, __LINE__); \
+ BUG_ON(1); \
+ } \
+ } while (0)
+
+#else
+
+#define QDF_BUG(_condition) \
+ do { \
+ if (!(_condition)) { \
+ pr_err("QDF BUG in %s Line %d\n", \
+ __func__, __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+
+#endif
+
+#endif /* __I_QDF_TRACE_H */
diff --git a/qdf/linux/src/i_qdf_types.h b/qdf/linux/src/i_qdf_types.h
new file mode 100644
index 000000000000..5a17bdf1b7ae
--- /dev/null
+++ b/qdf/linux/src/i_qdf_types.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_types.h
+ * This file provides OS dependent types API's.
+ */
+
+#if !defined(__I_QDF_TYPES_H)
+#define __I_QDF_TYPES_H
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+#include <qdf_status.h>
+
+#ifndef __KERNEL__
+#define __iomem
+#endif
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+
+#ifdef __KERNEL__
+#include <generated/autoconf.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/wireless.h>
+#include <linux/if.h>
+#else
+
+/*
+ * Hack - coexist with prior defs of dma_addr_t.
+ * Eventually all other defs of dma_addr_t should be removed.
+ * At that point, the "already_defined" wrapper can be removed.
+ */
+#ifndef __dma_addr_t_already_defined__
+#define __dma_addr_t_already_defined__
+typedef unsigned long dma_addr_t;
+#endif
+
+#ifndef __ahdecl
+#ifdef __i386__
+#define __ahdecl __attribute__((regparm(0)))
+#else
+#define __ahdecl
+#endif
+#endif
+
+#define SIOCGIWAP 0
+#define IWEVCUSTOM 0
+#define IWEVREGISTERED 0
+#define IWEVEXPIRED 0
+#define SIOCGIWSCAN 0
+#define DMA_TO_DEVICE 0
+#define DMA_FROM_DEVICE 0
+#define __iomem
+#endif /* __KERNEL__ */
+
+/*
+ * max sg that we support
+ */
+#define __QDF_MAX_SCATTER 1
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define QDF_LITTLE_ENDIAN_MACHINE
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define QDF_BIG_ENDIAN_MACHINE
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) || !defined(__KERNEL__)
+#ifndef __bool_already_defined__
+#define __bool_already_defined__
+
+/**
+ * bool - This is an enum for boolean
+ * @false: zero
+ * @true: one
+ */
+typedef enum bool {
+ false = 0,
+ true = 1,
+} bool;
+#endif /* __bool_already_defined__ */
+#endif
+
+#define __qdf_packed __attribute__((packed))
+
+typedef int (*__qdf_os_intr)(void *);
+/**
+ * Private definitions of general data types
+ */
+typedef dma_addr_t __qdf_dma_addr_t;
+typedef size_t __qdf_dma_size_t;
+typedef dma_addr_t __qdf_dma_context_t;
+
+#define qdf_dma_mem_context(context) dma_addr_t context
+#define qdf_get_dma_mem_context(var, field) ((qdf_dma_context_t)(var->field))
+
+/**
+ * typedef struct __qdf_resource_t - qdf resource type
+ * @paddr: Physical address
+ * @paddr: Virtual address
+ * @len: Length
+ */
+typedef struct __qdf_resource {
+ unsigned long paddr;
+ void __iomem *vaddr;
+ unsigned long len;
+} __qdf_resource_t;
+
+struct __qdf_mempool_ctxt;
+
+#define MAX_MEM_POOLS 64
+
+/**
+ * struct __qdf_device - generic qdf device type
+ * @drv: Pointer to driver
+ * @drv_hdl: Pointer to driver handle
+ * @drv_name: Pointer to driver name
+ * @irq: IRQ
+ * @dev: Pointer to device
+ * @res: QDF resource
+ * @func: Interrupt handler
+ * @mem_pool: array to pointer to mem context
+ */
+struct __qdf_device {
+ void *drv;
+ void *drv_hdl;
+ char *drv_name;
+ int irq;
+ struct device *dev;
+ __qdf_resource_t res;
+ __qdf_os_intr func;
+ struct __qdf_mempool_ctxt *mem_pool[MAX_MEM_POOLS];
+};
+typedef struct __qdf_device *__qdf_device_t;
+
+typedef size_t __qdf_size_t;
+typedef off_t __qdf_off_t;
+typedef uint8_t __iomem *__qdf_iomem_t;
+
+typedef uint32_t ath_dma_addr_t;
+
+/**
+ * typedef __qdf_segment_t - segment of memory
+ * @daddr: dma address
+ * @len: lenght of segment
+ */
+typedef struct __qdf_segment {
+ dma_addr_t daddr;
+ uint32_t len;
+} __qdf_segment_t;
+
+/**
+ * __qdf_dma_map - dma map of memory
+ * @mapped: mapped address
+ * @nsegs: number of segments
+ * @coherent: coherency status
+ * @seg: segment of memory
+ */
+struct __qdf_dma_map {
+ uint32_t mapped;
+ uint32_t nsegs;
+ uint32_t coherent;
+ __qdf_segment_t seg[__QDF_MAX_SCATTER];
+};
+typedef struct __qdf_dma_map *__qdf_dma_map_t;
+typedef uint32_t ath_dma_addr_t;
+
+/**
+ * __qdf_net_wireless_evcode - enum for event code
+ * @__QDF_IEEE80211_ASSOC: association event code
+ * @__QDF_IEEE80211_REASSOC: reassociation event code
+ * @__QDF_IEEE80211_DISASSOC: disassociation event code
+ * @__QDF_IEEE80211_JOIN: join event code
+ * @__QDF_IEEE80211_LEAVE: leave event code
+ * @__QDF_IEEE80211_SCAN: scan event code
+ * @__QDF_IEEE80211_REPLAY: replay event code
+ * @__QDF_IEEE80211_MICHAEL:michael event code
+ * @__QDF_IEEE80211_REJOIN: rejoin event code
+ * @__QDF_CUSTOM_PUSH_BUTTON: push button event code
+ */
+enum __qdf_net_wireless_evcode {
+ __QDF_IEEE80211_ASSOC = SIOCGIWAP,
+ __QDF_IEEE80211_REASSOC = IWEVCUSTOM,
+ __QDF_IEEE80211_DISASSOC = SIOCGIWAP,
+ __QDF_IEEE80211_JOIN = IWEVREGISTERED,
+ __QDF_IEEE80211_LEAVE = IWEVEXPIRED,
+ __QDF_IEEE80211_SCAN = SIOCGIWSCAN,
+ __QDF_IEEE80211_REPLAY = IWEVCUSTOM,
+ __QDF_IEEE80211_MICHAEL = IWEVCUSTOM,
+ __QDF_IEEE80211_REJOIN = IWEVCUSTOM,
+ __QDF_CUSTOM_PUSH_BUTTON = IWEVCUSTOM,
+};
+
+#define __qdf_print printk
+#define __qdf_vprint vprintk
+#define __qdf_snprint snprintf
+#define __qdf_vsnprint vsnprintf
+
+#define __QDF_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
+#define __QDF_DMA_TO_DEVICE DMA_TO_DEVICE
+#define __QDF_DMA_FROM_DEVICE DMA_FROM_DEVICE
+#define __qdf_inline inline
+
+/*
+ * 1. GNU C/C++ Compiler
+ *
+ * How to detect gcc : __GNUC__
+ * How to detect gcc version :
+ * major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x)
+ * minor version : __GNUC_MINOR__
+ *
+ * 2. Microsoft C/C++ Compiler
+ *
+ * How to detect msc : _MSC_VER
+ * How to detect msc version :
+ * _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...)
+ *
+ */
+
+/*
+ * MACROs to help with compiler and OS specifics. May need to get a little
+ * more sophisticated than this and define these to specific 'VERSIONS' of
+ * the compiler and OS. Until we have a need for that, lets go with this
+ */
+#if defined(_MSC_VER)
+
+#define QDF_COMPILER_MSC
+/* assuming that if we build with MSC, OS is WinMobile */
+#define QDF_OS_WINMOBILE
+
+#elif defined(__GNUC__)
+
+#define QDF_COMPILER_GNUC
+#define QDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */
+
+#endif
+
+#if defined(QDF_COMPILER_MSC)
+
+
+/*
+ * Does nothing on Windows. packing individual structs is not
+ * supported on the Windows compiler
+ */
+#define QDF_PACK_STRUCT_1
+#define QDF_PACK_STRUCT_2
+#define QDF_PACK_STRUCT_4
+#define QDF_PACK_STRUCT_8
+#define QDF_PACK_STRUCT_16
+
+#elif defined(QDF_COMPILER_GNUC)
+
+#else
+#error "Compiling with an unknown compiler!!"
+#endif
+
+#endif /* __I_QDF_TYPES_H */
diff --git a/qdf/linux/src/i_qdf_util.h b/qdf/linux/src/i_qdf_util.h
new file mode 100644
index 000000000000..94aa20c94ae2
--- /dev/null
+++ b/qdf/linux/src/i_qdf_util.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_util.h
+ * This file provides OS dependent API's.
+ */
+
+#ifndef _I_QDF_UTIL_H
+#define _I_QDF_UTIL_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <errno.h>
+
+#include <linux/random.h>
+
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <asm/byteorder.h>
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 8)
+#include <asm/system.h>
+#else
+#if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
+#include <asm/dec/system.h>
+#else
+#endif
+#endif
+
+#include <qdf_types.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#ifdef QCA_PARTNER_PLATFORM
+#include "ath_carr_pltfrm.h"
+#else
+#include <linux/byteorder/generic.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros if defined by the OS
+ */
+#define __qdf_unlikely(_expr) unlikely(_expr)
+#define __qdf_likely(_expr) likely(_expr)
+
+/**
+ * __qdf_status_to_os_return() - translates qdf_status types to linux return types
+ * @status: status to translate
+ *
+ * Translates error types that linux may want to handle specially.
+ *
+ * return: 0 or the linux error code that most closely matches the QDF_STATUS.
+ * defaults to -1 (EPERM)
+ */
+static inline int __qdf_status_to_os_return(QDF_STATUS status)
+{
+ switch (status) {
+ case QDF_STATUS_SUCCESS:
+ return 0;
+ case QDF_STATUS_E_RESOURCES:
+ return -EBUSY;
+ case QDF_STATUS_E_NOMEM:
+ return -ENOMEM;
+ case QDF_STATUS_E_AGAIN:
+ return -EAGAIN;
+ case QDF_STATUS_E_INVAL:
+ return -EINVAL;
+ case QDF_STATUS_E_FAULT:
+ return -EFAULT;
+ case QDF_STATUS_E_ALREADY:
+ return -EALREADY;
+ case QDF_STATUS_E_BADMSG:
+ return -EBADMSG;
+ case QDF_STATUS_E_BUSY:
+ return -EBUSY;
+ case QDF_STATUS_E_CANCELED:
+ return -ECANCELED;
+ case QDF_STATUS_E_ABORTED:
+ return -ECONNABORTED;
+ case QDF_STATUS_E_PERM:
+ return -EPERM;
+ case QDF_STATUS_E_EXISTS:
+ return -EEXIST;
+ case QDF_STATUS_E_NOENT:
+ return -ENOENT;
+ case QDF_STATUS_E_E2BIG:
+ return -E2BIG;
+ case QDF_STATUS_E_NOSPC:
+ return -ENOSPC;
+ case QDF_STATUS_E_ADDRNOTAVAIL:
+ return -EADDRNOTAVAIL;
+ case QDF_STATUS_E_ENXIO:
+ return -ENXIO;
+ case QDF_STATUS_E_NETDOWN:
+ return -ENETDOWN;
+ case QDF_STATUS_E_IO:
+ return -EIO;
+ case QDF_STATUS_E_NETRESET:
+ return -ENETRESET;
+ default:
+ return -EPERM;
+ }
+}
+
+
+/**
+ * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast'
+ * @mac_addr: pointer to the qdf MacAddress to set to broadcast
+ *
+ * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast
+ * MacAddress contains all 0xFF bytes.
+ *
+ * Return: none
+ */
+static inline void __qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr)
+{
+ memset(mac_addr, 0xff, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * __qdf_zero_macaddr() - zero out a MacAddress
+ * @mac_addr: pointer to the struct qdf_mac_addr to zero.
+ *
+ * This function zeros out a QDF MacAddress type.
+ *
+ * Return: none
+ */
+static inline void __qdf_zero_macaddr(struct qdf_mac_addr *mac_addr)
+{
+ memset(mac_addr, 0, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * __qdf_is_macaddr_equal() - compare two QDF MacAddress
+ * @mac_addr1: Pointer to one qdf MacAddress to compare
+ * @mac_addr2: Pointer to the other qdf MacAddress to compare
+ *
+ * This function returns a bool that tells if a two QDF MacAddress'
+ * are equivalent.
+ *
+ * Return: true if the MacAddress's are equal
+ * not true if the MacAddress's are not equal
+ */
+static inline bool __qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1,
+ struct qdf_mac_addr *mac_addr2)
+{
+ return 0 == memcmp(mac_addr1, mac_addr2, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * qdf_in_interrupt - returns true if in interrupt context
+ */
+#define qdf_in_interrupt in_interrupt
+
+/**
+ * @brief memory barriers.
+ */
+#define __qdf_min(_a, _b) ((_a) < (_b) ? _a : _b)
+#define __qdf_max(_a, _b) ((_a) > (_b) ? _a : _b)
+
+/**
+ * @brief Assert
+ */
+#define __qdf_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s:%s %s:%d\n", \
+ # expr, __func__, __FILE__, __LINE__); \
+ dump_stack(); \
+ BUG_ON(1); \
+ } \
+} while (0)
+
+/**
+ * @brief Assert
+ */
+#define __qdf_target_assert(expr) do { \
+ if (unlikely(!(expr))) { \
+ qdf_print("Assertion failed! %s:%s %s:%d\n", \
+ #expr, __FUNCTION__, __FILE__, __LINE__); \
+ dump_stack(); \
+ panic("Take care of the TARGET ASSERT first\n"); \
+ } \
+} while (0)
+
+#define __qdf_cpu_to_le64 cpu_to_le64
+#define __qdf_container_of(ptr, type, member) container_of(ptr, type, member)
+
+#define __qdf_ntohs ntohs
+#define __qdf_ntohl ntohl
+
+#define __qdf_htons htons
+#define __qdf_htonl htonl
+
+#define __qdf_cpu_to_le16 cpu_to_le16
+#define __qdf_cpu_to_le32 cpu_to_le32
+#define __qdf_cpu_to_le64 cpu_to_le64
+
+#define __qdf_le16_to_cpu le16_to_cpu
+#define __qdf_le32_to_cpu le32_to_cpu
+
+#define __qdf_be32_to_cpu be32_to_cpu
+#define __qdf_be64_to_cpu be64_to_cpu
+#define __qdf_le64_to_cpu le64_to_cpu
+#define __qdf_le16_to_cpu le16_to_cpu
+
+/**
+ * @brief memory barriers.
+ */
+#define __qdf_wmb() wmb()
+#define __qdf_rmb() rmb()
+#define __qdf_mb() mb()
+
+#endif /*_I_QDF_UTIL_H*/
diff --git a/qdf/linux/src/qdf_defer.c b/qdf/linux/src/qdf_defer.c
new file mode 100644
index 000000000000..de3678bc1fde
--- /dev/null
+++ b/qdf/linux/src/qdf_defer.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_defer.c
+ * This file provides OS dependent deferred API's.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+
+#include "i_qdf_defer.h"
+
+/**
+ * __qdf_defer_func() - defer work handler
+ * @work: Pointer to defer work
+ *
+ * Return: none
+ */
+void __qdf_defer_func(struct work_struct *work)
+{
+ __qdf_work_t *ctx = container_of(work, __qdf_work_t, work);
+ if (ctx->fn == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "No callback registered !!");
+ return;
+ }
+ ctx->fn(ctx->arg);
+}
+EXPORT_SYMBOL(__qdf_defer_func);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+/**
+ * __qdf_defer_delayed_func() - defer work handler
+ * @dwork: Pointer to defer work
+ *
+ * Return: none
+ */
+void
+__qdf_defer_delayed_func(struct work_struct *dwork)
+{
+ return;
+}
+#else
+void
+__qdf_defer_delayed_func(struct work_struct *dwork)
+{
+ __qdf_delayed_work_t *ctx = container_of(dwork, __qdf_delayed_work_t,
+ dwork.work);
+ if (ctx->fn == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "BugCheck: Callback is not initilized while creating delayed work queue");
+ return;
+ }
+ ctx->fn(ctx->arg);
+}
+#endif
+EXPORT_SYMBOL(__qdf_defer_delayed_func);
diff --git a/qdf/linux/src/qdf_event.c b/qdf/linux/src/qdf_event.c
new file mode 100644
index 000000000000..53a159961382
--- /dev/null
+++ b/qdf/linux/src/qdf_event.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_event.c
+ *
+ * This source file contains linux specific definitions for QDF event APIs
+ * The APIs mentioned in this file are used for initializing, setting,
+ * resetting, destroying an event and waiting on an occurance of an event
+ * among multiple events.
+ */
+
+/* Include Files */
+#include "qdf_event.h"
+
+/* Function Definitions and Documentation */
+
+/**
+ * qdf_event_create() - initializes a QDF event
+ * @event: Pointer to the opaque event object to initialize
+ *
+ * The qdf_event_create() function initializes the specified event. Upon
+ * successful initialization, the state of the event becomes initialized
+ * and not signalled.
+ *
+ * An event must be initialized before it may be used in any other event
+ * functions.
+ * Attempting to initialize an already initialized event results in
+ * a failure.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_event_create(qdf_event_t *event)
+{
+ /* check for null pointer */
+ if (NULL == event) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "NULL event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check for 'already initialized' event */
+ if (LINUX_EVENT_COOKIE == event->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Initialized event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_BUSY;
+ }
+
+ /* initialize new event */
+ init_completion(&event->complete);
+ event->cookie = LINUX_EVENT_COOKIE;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_event_create);
+
+/**
+ * qdf_event_set() - sets a QDF event
+ * @event: The event to set to the signalled state
+ *
+ * The state of the specified event is set to signalled by calling
+ * qdf_event_set().
+ *
+ * Any threads waiting on the event as a result of a qdf_event_wait() will
+ * be unblocked and available to be scheduled for execution when the event
+ * is signaled by a call to qdf_event_set().
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_event_set(qdf_event_t *event)
+{
+ /* check for null pointer */
+ if (NULL == event) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "NULL event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check if event refers to an initialized object */
+ if (LINUX_EVENT_COOKIE != event->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Uninitialized event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ complete(&event->complete);
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_event_set);
+
+/**
+ * qdf_event_reset() - resets a QDF event
+ * @event: The event to set to the NOT signalled state
+ *
+ * This function isn't required for Linux. Therefore, it doesn't do much.
+ *
+ * The state of the specified event is set to 'NOT signalled' by calling
+ * qdf_event_reset(). The state of the event remains NOT signalled until an
+ * explicit call to qdf_event_set().
+ *
+ * This function sets the event to a NOT signalled state even if the event was
+ * signalled multiple times before being signaled.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_event_reset(qdf_event_t *event)
+{
+ /* check for null pointer */
+ if (NULL == event) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "NULL event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check to make sure it is an 'already initialized' event */
+ if (LINUX_EVENT_COOKIE != event->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Uninitialized event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* (re)initialize event */
+ INIT_COMPLETION(event->complete);
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_event_reset);
+
+/**
+ * qdf_event_destroy() - Destroys a QDF event
+ * @event: The event object to be destroyed.
+ *
+ * This function doesn't do much in Linux. There is no need for the caller
+ * to explicitly destroy an event after use.
+ *
+ * The os_event_destroy() function shall destroy the event object
+ * referenced by event. After a successful return from qdf_event_destroy()
+ * the event object becomes, in effect, uninitialized.
+ *
+ * A destroyed event object can be reinitialized using qdf_event_create();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined. Calls to QDF event functions to manipulate the lock such
+ * as qdf_event_set() will fail if the event is destroyed. Therefore,
+ * don't use the event after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_event_destroy(qdf_event_t *event)
+{
+ /* check for null pointer */
+ if (NULL == event) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "NULL event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check to make sure it is an 'already initialized' event */
+ if (LINUX_EVENT_COOKIE != event->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Uninitialized event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* make sure nobody is waiting on the event */
+ complete_all(&event->complete);
+
+ /* destroy the event */
+ memset(event, 0, sizeof(qdf_event_t));
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_event_destroy);
+
+/**
+ * qdf_wait_single_event() - Waits for a single event to be set.
+ * This API waits for the event to be set.
+ *
+ * @event: Pointer to an event to wait on.
+ * @timeout: Timeout value (in milliseconds). This function returns
+ * if this interval elapses, regardless if any of the events have
+ * been set. An input value of 0 for this timeout parameter means
+ * to wait infinitely, meaning a timeout will never occur.
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_wait_single_event(qdf_event_t *event, uint32_t timeout)
+{
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check for null pointer */
+ if (NULL == event) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "NULL event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check if cookie is same as that of initialized event */
+ if (LINUX_EVENT_COOKIE != event->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Uninitialized event passed into %s", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (timeout) {
+ long ret;
+ ret = wait_for_completion_timeout(&event->complete,
+ msecs_to_jiffies(timeout));
+ if (0 >= ret)
+ return QDF_STATUS_E_TIMEOUT;
+ } else {
+ QDF_ASSERT(0);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Zero timeout value passed into %s", __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wait_single_event);
diff --git a/qdf/linux/src/qdf_list.c b/qdf/linux/src/qdf_list.c
new file mode 100644
index 000000000000..9412a1ac977b
--- /dev/null
+++ b/qdf/linux/src/qdf_list.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_list.c
+ *
+ * QCA driver framework list manipulation APIs. QDF linked list
+ * APIs are NOT thread safe so make sure to use appropriate locking mechanisms
+ * to assure operations on the list are thread safe.
+ */
+
+/* Include files */
+#include <qdf_list.h>
+
+/* Function declarations and documenation */
+
+/**
+ * qdf_list_insert_front() - insert input node at front of the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node)
+{
+ list_add(node, &list->anchor);
+ list->count++;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_front);
+
+/**
+ * qdf_list_insert_back() - insert input node at back of the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node)
+{
+ list_add_tail(node, &list->anchor);
+ list->count++;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_back);
+
+/**
+ * qdf_list_insert_back_size() - insert input node at back of list and save
+ * list size
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ * @p_size: Pointer to store list size
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list,
+ qdf_list_node_t *node, uint32_t *p_size)
+{
+ list_add_tail(node, &list->anchor);
+ list->count++;
+ *p_size = list->count;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_back_size);
+
+/**
+ * qdf_list_remove_front() - remove node from front of the list
+ * @list: Pointer to list
+ * @node2: Double pointer to store the node which is removed from list
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node2)
+{
+ struct list_head *listptr;
+
+ if (list_empty(&list->anchor))
+ return QDF_STATUS_E_EMPTY;
+
+ listptr = list->anchor.next;
+ *node2 = listptr;
+ list_del(list->anchor.next);
+ list->count--;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_front);
+
+/**
+ * qdf_list_remove_back() - remove node from end of the list
+ * @list: Pointer to list
+ * @node2: Double pointer to store node which is removed from list
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node2)
+{
+ struct list_head *listptr;
+
+ if (list_empty(&list->anchor))
+ return QDF_STATUS_E_EMPTY;
+
+ listptr = list->anchor.prev;
+ *node2 = listptr;
+ list_del(list->anchor.prev);
+ list->count--;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_back);
+
+/**
+ * qdf_list_remove_node() - remove input node from list
+ * @list: Pointer to list
+ * @node_to_remove: Pointer to node which needs to be removed
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_node(qdf_list_t *list,
+ qdf_list_node_t *node_to_remove)
+{
+ qdf_list_node_t *tmp;
+ int found = 0;
+
+ if (list_empty(&list->anchor))
+ return QDF_STATUS_E_EMPTY;
+
+ /* verify that node_to_remove is indeed part of list list */
+ list_for_each(tmp, &list->anchor) {
+ if (tmp == node_to_remove) {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0)
+ return QDF_STATUS_E_INVAL;
+
+ list_del(node_to_remove);
+ list->count--;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_node);
+
+/**
+ * qdf_list_peek_front() - peek front node from list
+ * @list: Pointer to list
+ * @node2: Double pointer to store peeked node pointer
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node2)
+{
+ struct list_head *listptr;
+ if (list_empty(&list->anchor))
+ return QDF_STATUS_E_EMPTY;
+
+ listptr = list->anchor.next;
+ *node2 = listptr;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_peek_front);
+
+/**
+ * qdf_list_peek_next() - peek next node of input node in the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ * @node2: Double pointer to store peeked node pointer
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_peek_next(qdf_list_t *list, qdf_list_node_t *node,
+ qdf_list_node_t **node2)
+{
+ struct list_head *listptr;
+ int found = 0;
+ qdf_list_node_t *tmp;
+
+ if ((list == NULL) || (node == NULL) || (node2 == NULL))
+ return QDF_STATUS_E_FAULT;
+
+ if (list_empty(&list->anchor))
+ return QDF_STATUS_E_EMPTY;
+
+ /* verify that node is indeed part of list list */
+ list_for_each(tmp, &list->anchor) {
+ if (tmp == node) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found == 0)
+ return QDF_STATUS_E_INVAL;
+
+ listptr = node->next;
+ if (listptr == &list->anchor)
+ return QDF_STATUS_E_EMPTY;
+
+ *node2 = listptr;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_peek_next);
+
+/**
+ * qdf_list_empty() - check if the list is empty
+ * @list: pointer to the list
+ *
+ * Return: true if the list is empty and false otherwise.
+ */
+bool qdf_list_empty(qdf_list_t *list)
+{
+ return list_empty(&list->anchor);
+}
+EXPORT_SYMBOL(qdf_list_empty);
diff --git a/qdf/linux/src/qdf_lock.c b/qdf/linux/src/qdf_lock.c
new file mode 100644
index 000000000000..0afb06790bf4
--- /dev/null
+++ b/qdf/linux/src/qdf_lock.c
@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/module.h>
+#include <qdf_lock.h>
+#include <qdf_trace.h>
+
+#include <qdf_types.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#ifdef CONFIG_MCL
+#include <i_host_diag_core_event.h>
+#include <cds_api.h>
+#endif
+#include <ani_global.h>
+#include <i_qdf_lock.h>
+#include <hif.h>
+
+/* Function declarations and documenation */
+typedef __qdf_mutex_t qdf_mutex_t;
+
+/**
+ * qdf_mutex_create() - Initialize a mutex
+ * @m: mutex to initialize
+ *
+ * Returns: QDF_STATUS
+ * =0 success
+ * else fail status
+ */
+QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock)
+{
+ /* check for invalid pointer */
+ if (lock == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: NULL pointer passed in", __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+ /* check for 'already initialized' lock */
+ if (LINUX_LOCK_COOKIE == lock->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: already initialized lock", __func__);
+ return QDF_STATUS_E_BUSY;
+ }
+
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* initialize new lock */
+ mutex_init(&lock->m_lock);
+ lock->cookie = LINUX_LOCK_COOKIE;
+ lock->state = LOCK_RELEASED;
+ lock->process_id = 0;
+ lock->refcount = 0;
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_create);
+
+/**
+ * qdf_mutex_acquire() - acquire a QDF lock
+ * @lock: Pointer to the opaque lock object to acquire
+ *
+ * A lock object is acquired by calling qdf_mutex_acquire(). If the lock
+ * is already locked, the calling thread shall block until the lock becomes
+ * available. This operation shall return with the lock object referenced by
+ * lock in the locked state with the calling thread as its owner.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock)
+{
+ int rc;
+ /* check for invalid pointer */
+ if (lock == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: NULL pointer passed in", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+ /* check if lock refers to an initialized object */
+ if (LINUX_LOCK_COOKIE != lock->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: uninitialized lock", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+ if ((lock->process_id == current->pid) &&
+ (lock->state == LOCK_ACQUIRED)) {
+ lock->refcount++;
+#ifdef QDF_NESTED_LOCK_DEBUG
+ pe_err("%s: %x %d %d", __func__, lock, current->pid,
+ lock->refcount);
+#endif
+ return QDF_STATUS_SUCCESS;
+ }
+ /* acquire a Lock */
+ mutex_lock(&lock->m_lock);
+ rc = mutex_is_locked(&lock->m_lock);
+ if (rc == 0) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: unable to lock mutex (rc = %d)", __func__, rc);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAILURE;
+ }
+#ifdef QDF_NESTED_LOCK_DEBUG
+ pe_err("%s: %x %d", __func__, lock, current->pid);
+#endif
+ if (LOCK_DESTROYED != lock->state) {
+ lock->process_id = current->pid;
+ lock->refcount++;
+ lock->state = LOCK_ACQUIRED;
+ return QDF_STATUS_SUCCESS;
+ } else {
+ /* lock is already destroyed */
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Lock is already destroyed", __func__);
+ mutex_unlock(&lock->m_lock);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAILURE;
+ }
+}
+EXPORT_SYMBOL(qdf_mutex_acquire);
+
+/**
+ * qdf_mutex_release() - release a QDF lock
+ * @lock: Pointer to the opaque lock object to be released
+ *
+ * qdf_mutex_release() function shall release the lock object
+ * referenced by 'lock'.
+ *
+ * If a thread attempts to release a lock that it unlocked or is not
+ * initialized, an error is returned.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock)
+{
+ /* check for invalid pointer */
+ if (lock == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: NULL pointer passed in", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check if lock refers to an uninitialized object */
+ if (LINUX_LOCK_COOKIE != lock->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: uninitialized lock", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* current_thread = get_current_thread_id();
+ * Check thread ID of caller against thread ID
+ * of the thread which acquire the lock
+ */
+ if (lock->process_id != current->pid) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: current task pid does not match original task pid!!",
+ __func__);
+#ifdef QDF_NESTED_LOCK_DEBUG
+ pe_err("%s: Lock held by=%d being released by=%d",
+ __func__, lock->process_id, current->pid);
+#endif
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_PERM;
+ }
+ if ((lock->process_id == current->pid) &&
+ (lock->state == LOCK_ACQUIRED)) {
+ if (lock->refcount > 0)
+ lock->refcount--;
+ }
+#ifdef QDF_NESTED_LOCK_DEBUG
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id,
+ lock->refcount);
+#endif
+ if (lock->refcount)
+ return QDF_STATUS_SUCCESS;
+
+ lock->process_id = 0;
+ lock->refcount = 0;
+ lock->state = LOCK_RELEASED;
+ /* release a Lock */
+ mutex_unlock(&lock->m_lock);
+#ifdef QDF_NESTED_LOCK_DEBUG
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id,
+ lock->refcount);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_release);
+
+/**
+ * qdf_wake_lock_name() - This function returns the name of the wakelock
+ * @lock: Pointer to the wakelock
+ *
+ * This function returns the name of the wakelock
+ *
+ * Return: Pointer to the name if it is valid or a default string
+ */
+static const char *qdf_wake_lock_name(qdf_wake_lock_t *lock)
+{
+#if defined CONFIG_CNSS
+ if (lock->name)
+ return lock->name;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ if (lock->ws.name)
+ return lock->ws.name;
+#endif
+ return "UNNAMED_WAKELOCK";
+}
+EXPORT_SYMBOL(qdf_wake_lock_name);
+
+/**
+ * qdf_wake_lock_create() - initializes a wake lock
+ * @lock: The wake lock to initialize
+ * @name: Name of wake lock
+ *
+ * Return:
+ * QDF status success: if wake lock is initialized
+ * QDF status failure: if wake lock was not initialized
+ */
+QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name)
+{
+#if defined CONFIG_CNSS
+ cnss_pm_wake_lock_init(lock, name);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ wake_lock_init(lock, WAKE_LOCK_SUSPEND, name);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_create);
+
+/**
+ * qdf_wake_lock_acquire() - acquires a wake lock
+ * @lock: The wake lock to acquire
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason)
+{
+#ifdef CONFIG_MCL
+ host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
+ WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+ WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+#endif
+#if defined CONFIG_CNSS
+ cnss_pm_wake_lock(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ wake_lock(lock);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_acquire);
+
+/**
+ * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
+ * @lock: The wake lock to acquire
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec,
+ uint32_t reason)
+{
+ /* Wakelock for Rx is frequent.
+ * It is reported only during active debug
+ */
+#ifdef CONFIG_MCL
+ if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
+ && (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
+ (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
+ host_diag_log_wlock(reason, qdf_wake_lock_name(lock), msec,
+ WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+ }
+#endif
+#if defined CONFIG_CNSS
+ cnss_pm_wake_lock_timeout(lock, msec);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ wake_lock_timeout(lock, msecs_to_jiffies(msec));
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_timeout_acquire);
+
+/**
+ * qdf_wake_lock_release() - releases a wake lock
+ * @lock: the wake lock to release
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason)
+{
+#ifdef CONFIG_MCL
+ host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
+ WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+ WIFI_POWER_EVENT_WAKELOCK_RELEASED);
+#endif
+#if defined CONFIG_CNSS
+ cnss_pm_wake_lock_release(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ wake_unlock(lock);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_release);
+
+/**
+ * qdf_wake_lock_destroy() - destroys a wake lock
+ * @lock: The wake lock to destroy
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock)
+{
+#if defined CONFIG_CNSS
+ cnss_pm_wake_lock_destroy(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+ wake_lock_destroy(lock);
+#endif
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_destroy);
+
+/**
+ * qdf_runtime_pm_get() - do a get opperation on the device
+ *
+ * A get opperation will prevent a runtime suspend untill a
+ * corresponding put is done. This api should be used when sending
+ * data.
+ *
+ * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
+ * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
+ *
+ * return: success if the bus is up and a get has been issued
+ * otherwise an error code.
+ */
+QDF_STATUS qdf_runtime_pm_get(void)
+{
+ void *ol_sc;
+ int ret;
+
+ ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+ if (ol_sc == NULL) {
+ QDF_ASSERT(0);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: HIF context is null!", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ ret = hif_pm_runtime_get(ol_sc);
+
+ if (ret)
+ return QDF_STATUS_E_FAILURE;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_get);
+
+/**
+ * qdf_runtime_pm_put() - do a put opperation on the device
+ *
+ * A put opperation will allow a runtime suspend after a corresponding
+ * get was done. This api should be used when sending data.
+ *
+ * This api will return a failure if the hif module hasn't been
+ * initialized
+ *
+ * return: QDF_STATUS_SUCCESS if the put is performed
+ */
+QDF_STATUS qdf_runtime_pm_put(void)
+{
+ void *ol_sc;
+ int ret;
+
+ ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+ if (ol_sc == NULL) {
+ QDF_ASSERT(0);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: HIF context is null!", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ ret = hif_pm_runtime_put(ol_sc);
+
+ if (ret)
+ return QDF_STATUS_E_FAILURE;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_put);
+
+/**
+ * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
+ * @lock: an opaque context for tracking
+ *
+ * The lock can only be acquired once per lock context and is tracked.
+ *
+ * return: QDF_STATUS_SUCCESS or failure code.
+ */
+QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t lock)
+{
+ void *ol_sc;
+ int ret;
+
+ ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+ if (ol_sc == NULL) {
+ QDF_ASSERT(0);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: HIF context is null!", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ ret = hif_pm_runtime_prevent_suspend(ol_sc, lock);
+
+ if (ret)
+ return QDF_STATUS_E_FAILURE;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_prevent_suspend);
+
+/**
+ * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
+ * @lock: an opaque context for tracking
+ *
+ * The lock can only be acquired once per lock context and is tracked.
+ *
+ * return: QDF_STATUS_SUCCESS or failure code.
+ */
+QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t lock)
+{
+ void *ol_sc;
+ int ret;
+ ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+ if (ol_sc == NULL) {
+ QDF_ASSERT(0);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: HIF context is null!", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ ret = hif_pm_runtime_allow_suspend(ol_sc, lock);
+ if (ret)
+ return QDF_STATUS_E_FAILURE;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_allow_suspend);
+
+/**
+ * qdf_runtime_lock_init() - initialize runtime lock
+ * @name: name of the runtime lock
+ *
+ * Initialize a runtime pm lock. This lock can be used
+ * to prevent the runtime pm system from putting the bus
+ * to sleep.
+ *
+ * Return: runtime_pm_lock_t
+ */
+qdf_runtime_lock_t qdf_runtime_lock_init(const char *name)
+{
+ return hif_runtime_lock_init(name);
+}
+EXPORT_SYMBOL(qdf_runtime_lock_init);
+
+/**
+ * qdf_runtime_lock_deinit() - deinitialize runtime pm lock
+ * @lock: the lock to deinitialize
+ *
+ * Ensures the lock is released. Frees the runtime lock.
+ *
+ * Return: void
+ */
+void qdf_runtime_lock_deinit(qdf_runtime_lock_t lock)
+{
+ void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
+ hif_runtime_lock_deinit(hif_ctx, lock);
+}
+EXPORT_SYMBOL(qdf_runtime_lock_deinit);
+
+/**
+ * qdf_spinlock_acquire() - acquires a spin lock
+ * @lock: Spin lock to acquire
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ */
+QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock)
+{
+ spin_lock(&lock->spinlock);
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_spinlock_acquire);
+
+
+/**
+ * qdf_spinlock_release() - release a spin lock
+ * @lock: Spin lock to release
+ *
+ * Return:
+ * QDF status success : if wake lock is acquired
+ */
+QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock)
+{
+ spin_unlock(&lock->spinlock);
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_spinlock_release);
+
+/**
+ * qdf_mutex_destroy() - destroy a QDF lock
+ * @lock: Pointer to the opaque lock object to be destroyed
+ *
+ * function shall destroy the lock object referenced by lock. After a
+ * successful return from qdf_mutex_destroy()
+ * the lock object becomes, in effect, uninitialized.
+ *
+ * A destroyed lock object can be reinitialized using qdf_mutex_create();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined. Calls to QDF lock functions to manipulate the lock such
+ * as qdf_mutex_acquire() will fail if the lock is destroyed. Therefore,
+ * don't use the lock after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock)
+{
+ /* check for invalid pointer */
+ if (NULL == lock) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: NULL pointer passed in", __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ if (LINUX_LOCK_COOKIE != lock->cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: uninitialized lock", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check if lock is released */
+ if (!mutex_trylock(&lock->m_lock)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: lock is not released", __func__);
+ return QDF_STATUS_E_BUSY;
+ }
+ lock->cookie = 0;
+ lock->state = LOCK_DESTROYED;
+ lock->process_id = 0;
+ lock->refcount = 0;
+
+ mutex_unlock(&lock->m_lock);
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_destroy);
+
+/**
+ * qdf_spin_trylock_bh_outline() - spin trylock bottomhalf
+ * @lock: spinlock object
+ * Retrun: int
+ */
+int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock)
+{
+ return qdf_spin_trylock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_trylock_bh_outline);
+
+/**
+ * qdf_spin_lock_bh_outline() - locks the spinlock in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
+ */
+void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock)
+{
+ qdf_spin_lock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_lock_bh_outline);
+
+/**
+ * qdf_spin_unlock_bh_outline() - unlocks spinlock in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
+ */
+void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock)
+{
+ qdf_spin_unlock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_unlock_bh_outline);
diff --git a/qdf/linux/src/qdf_mc_timer.c b/qdf/linux/src/qdf_mc_timer.c
new file mode 100644
index 000000000000..858c325f4412
--- /dev/null
+++ b/qdf/linux/src/qdf_mc_timer.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_mc_timer
+ * QCA driver framework timer APIs serialized to MC thread
+ */
+
+/* Include Files */
+#include <qdf_mc_timer.h>
+#include <qdf_lock.h>
+#include "qdf_lock.h"
+#include "qdf_list.h"
+#include "qdf_mem.h"
+#ifdef CONFIG_MCL
+#include <cds_mc_timer.h>
+#endif
+/* Preprocessor definitions and constants */
+
+#define LINUX_TIMER_COOKIE 0x12341234
+#define LINUX_INVALID_TIMER_COOKIE 0xfeedface
+#define TMR_INVALID_ID (0)
+
+/* Type declarations */
+
+/* Static Variable Definitions */
+static unsigned int persistent_timer_count;
+static qdf_mutex_t persistent_timer_count_lock;
+
+/* Function declarations and documenation */
+
+/**
+ * qdf_try_allowing_sleep() - clean up timer states after it has been deactivated
+ * @type: timer type
+ *
+ * Clean up timer states after it has been deactivated check and try to allow
+ * sleep after a timer has been stopped or expired.
+ *
+ * Return: none
+ */
+void qdf_try_allowing_sleep(QDF_TIMER_TYPE type)
+{
+ if (QDF_TIMER_TYPE_WAKE_APPS == type) {
+
+ persistent_timer_count--;
+ if (0 == persistent_timer_count) {
+ /* since the number of persistent timers has
+ decreased from 1 to 0, the timer should allow
+ sleep
+ */
+ }
+ }
+}
+EXPORT_SYMBOL(qdf_try_allowing_sleep);
+
+/**
+ * qdf_mc_timer_get_current_state() - get the current state of the timer
+ * @timer: Pointer to timer object
+ *
+ * Return:
+ * QDF_TIMER_STATE - qdf timer state
+ */
+QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer)
+{
+ if (NULL == timer) {
+ QDF_ASSERT(0);
+ return QDF_TIMER_STATE_UNUSED;
+ }
+
+ switch (timer->state) {
+ case QDF_TIMER_STATE_STOPPED:
+ case QDF_TIMER_STATE_STARTING:
+ case QDF_TIMER_STATE_RUNNING:
+ case QDF_TIMER_STATE_UNUSED:
+ return timer->state;
+ default:
+ QDF_ASSERT(0);
+ return QDF_TIMER_STATE_UNUSED;
+ }
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_current_state);
+
+/**
+ * qdf_timer_module_init() - initializes a QDF timer module.
+ *
+ * This API initializes the QDF timer module. This needs to be called
+ * exactly once prior to using any QDF timers.
+ *
+ * Return: none
+ */
+void qdf_timer_module_init(void)
+{
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "Initializing the QDF MC timer module");
+ qdf_mutex_create(&persistent_timer_count_lock);
+}
+EXPORT_SYMBOL(qdf_timer_module_init);
+
+#ifdef TIMER_MANAGER
+
+qdf_list_t qdf_timer_list;
+qdf_spinlock_t qdf_timer_list_lock;
+
+static void qdf_timer_clean(void);
+
+/**
+ * qdf_mc_timer_manager_init() - initialize QDF debug timer manager
+ *
+ * This API initializes QDF timer debug functionality.
+ *
+ * Return: none
+ */
+void qdf_mc_timer_manager_init(void)
+{
+ qdf_list_create(&qdf_timer_list, 1000);
+ qdf_spinlock_create(&qdf_timer_list_lock);
+ return;
+}
+EXPORT_SYMBOL(qdf_mc_timer_manager_init);
+
+/**
+ * qdf_timer_clean() - clean up QDF timer debug functionality
+ *
+ * This API cleans up QDF timer debug functionality and prints which QDF timers
+ * are leaked. This is called during driver unload.
+ *
+ * Return: none
+ */
+static void qdf_timer_clean(void)
+{
+ uint32_t list_size;
+ qdf_list_node_t *node;
+ QDF_STATUS qdf_status;
+
+ qdf_mc_timer_node_t *timer_node;
+
+ list_size = qdf_list_size(&qdf_timer_list);
+
+ if (!list_size)
+ return;
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "%s: List is not Empty. list_size %d ",
+ __func__, (int)list_size);
+
+ do {
+ qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+ qdf_status = qdf_list_remove_front(&qdf_timer_list, &node);
+ qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+ if (QDF_STATUS_SUCCESS == qdf_status) {
+ timer_node = (qdf_mc_timer_node_t *) node;
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+ "timer Leak@ File %s, @Line %d",
+ timer_node->file_name,
+ (int)timer_node->line_num);
+ qdf_mem_free(timer_node);
+ }
+ } while (qdf_status == QDF_STATUS_SUCCESS);
+}
+EXPORT_SYMBOL(qdf_timer_clean);
+
+/**
+ * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality
+ *
+ * This API exists QDF timer debug functionality
+ *
+ * Return: none
+ */
+void qdf_mc_timer_manager_exit(void)
+{
+ qdf_timer_clean();
+ qdf_list_destroy(&qdf_timer_list);
+}
+EXPORT_SYMBOL(qdf_mc_timer_manager_exit);
+#endif
+
+/**
+ * qdf_mc_timer_init() - initialize a QDF timer
+ * @timer: Pointer to timer object
+ * @timer_type: Type of timer
+ * @callback: Callback to be called after timer expiry
+ * @ser_data: User data which will be passed to callback function
+ *
+ * This API initializes a QDF timer object.
+ *
+ * qdf_mc_timer_init() initializes a QDF timer object. A timer must be
+ * initialized by calling qdf_mc_timer_initialize() before it may be used in
+ * any other timer functions.
+ *
+ * Attempting to initialize timer that is already initialized results in
+ * a failure. A destroyed timer object can be re-initialized with a call to
+ * qdf_mc_timer_init(). The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to QDF timer functions to manipulate the timer such
+ * as qdf_mc_timer_set() will fail if the timer is not initialized or has
+ * been destroyed. Therefore, don't use the timer after it has been
+ * destroyed until it has been re-initialized.
+ *
+ * All callback will be executed within the CDS main thread unless it is
+ * initialized from the Tx thread flow, in which case it will be executed
+ * within the tx thread flow.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer,
+ QDF_TIMER_TYPE timer_type,
+ qdf_mc_timer_callback_t callback,
+ void *user_data, char *file_name,
+ uint32_t line_num)
+{
+ QDF_STATUS qdf_status;
+
+ /* check for invalid pointer */
+ if ((timer == NULL) || (callback == NULL)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Null params being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ timer->timer_node = qdf_mem_malloc(sizeof(qdf_mc_timer_node_t));
+
+ if (timer->timer_node == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Not able to allocate memory for time_node",
+ __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ qdf_mem_set(timer->timer_node, sizeof(qdf_mc_timer_node_t), 0);
+
+ timer->timer_node->file_name = file_name;
+ timer->timer_node->line_num = line_num;
+ timer->timer_node->qdf_timer = timer;
+
+ qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+ qdf_status = qdf_list_insert_front(&qdf_timer_list,
+ &timer->timer_node->node);
+ qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+ if (QDF_STATUS_SUCCESS != qdf_status) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Unable to insert node into List qdf_status %d",
+ __func__, qdf_status);
+ }
+
+ /* set the various members of the timer structure
+ * with arguments passed or with default values
+ */
+ spin_lock_init(&timer->platform_info.spinlock);
+ if (QDF_TIMER_TYPE_SW == timer_type)
+ init_timer_deferrable(&(timer->platform_info.timer));
+ else
+ init_timer(&(timer->platform_info.timer));
+#ifdef CONFIG_MCL
+ timer->platform_info.timer.function = cds_linux_timer_callback;
+#else
+ timer->platform_info.timer.function = NULL;
+#endif
+ timer->platform_info.timer.data = (unsigned long)timer;
+ timer->callback = callback;
+ timer->user_data = user_data;
+ timer->type = timer_type;
+ timer->platform_info.cookie = LINUX_TIMER_COOKIE;
+ timer->platform_info.thread_id = 0;
+ timer->state = QDF_TIMER_STATE_STOPPED;
+
+ return QDF_STATUS_SUCCESS;
+}
+#else
+QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type,
+ qdf_mc_timer_callback_t callback,
+ void *user_data)
+{
+ /* check for invalid pointer */
+ if ((timer == NULL) || (callback == NULL)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Null params being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* set the various members of the timer structure
+ * with arguments passed or with default values
+ */
+ spin_lock_init(&timer->platform_info.spinlock);
+ if (QDF_TIMER_TYPE_SW == timer_type)
+ init_timer_deferrable(&(timer->platform_info.timer));
+ else
+ init_timer(&(timer->platform_info.timer));
+#ifdef CONFIG_MCL
+ timer->platform_info.timer.function = cds_linux_timer_callback;
+#else
+ timer->platform_info.timer.function = NULL;
+#endif
+ timer->platform_info.timer.data = (unsigned long)timer;
+ timer->callback = callback;
+ timer->user_data = user_data;
+ timer->type = timer_type;
+ timer->platform_info.cookie = LINUX_TIMER_COOKIE;
+ timer->platform_info.thread_id = 0;
+ timer->state = QDF_TIMER_STATE_STOPPED;
+
+ return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * qdf_mc_timer_destroy() - destroy QDF timer
+ * @timer: Pointer to timer object
+ *
+ * qdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a qdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * qdf_mc_timer_init(). The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to QDF timer functions to manipulate the timer, such
+ * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS - timer is initialized successfully
+ * QDF failure status - timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer)
+{
+ QDF_STATUS v_status = QDF_STATUS_SUCCESS;
+ unsigned long flags;
+
+ /* check for invalid pointer */
+ if (NULL == timer) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Null timer pointer being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* Check if timer refers to an uninitialized object */
+ if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot destroy uninitialized timer", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+ v_status = qdf_list_remove_node(&qdf_timer_list,
+ &timer->timer_node->node);
+ qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+ if (v_status != QDF_STATUS_SUCCESS) {
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+ qdf_mem_free(timer->timer_node);
+
+ spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+ switch (timer->state) {
+
+ case QDF_TIMER_STATE_STARTING:
+ v_status = QDF_STATUS_E_BUSY;
+ break;
+
+ case QDF_TIMER_STATE_RUNNING:
+ /* Stop the timer first */
+ del_timer(&(timer->platform_info.timer));
+ v_status = QDF_STATUS_SUCCESS;
+ break;
+ case QDF_TIMER_STATE_STOPPED:
+ v_status = QDF_STATUS_SUCCESS;
+ break;
+
+ case QDF_TIMER_STATE_UNUSED:
+ v_status = QDF_STATUS_E_ALREADY;
+ break;
+
+ default:
+ v_status = QDF_STATUS_E_FAULT;
+ break;
+ }
+
+ if (QDF_STATUS_SUCCESS == v_status) {
+ timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE;
+ timer->state = QDF_TIMER_STATE_UNUSED;
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+ return v_status;
+ }
+
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "%s: Cannot destroy timer in state = %d", __func__,
+ timer->state);
+ QDF_ASSERT(0);
+
+ return v_status;
+}
+EXPORT_SYMBOL(qdf_mc_timer_destroy);
+
+#else
+
+/**
+ * qdf_mc_timer_destroy() - destroy QDF timer
+ * @timer: Pointer to timer object
+ *
+ * qdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a qdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * qdf_mc_timer_init(). The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to QDF timer functions to manipulate the timer, such
+ * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS - timer is initialized successfully
+ * QDF failure status - timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer)
+{
+ QDF_STATUS v_status = QDF_STATUS_SUCCESS;
+ unsigned long flags;
+
+ /* check for invalid pointer */
+ if (NULL == timer) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Null timer pointer being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ /* check if timer refers to an uninitialized object */
+ if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot destroy uninitialized timer", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+ spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+ switch (timer->state) {
+
+ case QDF_TIMER_STATE_STARTING:
+ v_status = QDF_STATUS_E_BUSY;
+ break;
+
+ case QDF_TIMER_STATE_RUNNING:
+ /* Stop the timer first */
+ del_timer(&(timer->platform_info.timer));
+ v_status = QDF_STATUS_SUCCESS;
+ break;
+
+ case QDF_TIMER_STATE_STOPPED:
+ v_status = QDF_STATUS_SUCCESS;
+ break;
+
+ case QDF_TIMER_STATE_UNUSED:
+ v_status = QDF_STATUS_E_ALREADY;
+ break;
+
+ default:
+ v_status = QDF_STATUS_E_FAULT;
+ break;
+ }
+
+ if (QDF_STATUS_SUCCESS == v_status) {
+ timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE;
+ timer->state = QDF_TIMER_STATE_UNUSED;
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+ return v_status;
+ }
+
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "%s: Cannot destroy timer in state = %d", __func__,
+ timer->state);
+ QDF_ASSERT(0);
+
+ return v_status;
+}
+EXPORT_SYMBOL(qdf_mc_timer_destroy);
+#endif
+
+/**
+ * qdf_mc_timer_start() - start a QDF timer object
+ * @timer: Pointer to timer object
+ * @expiration_time: Time to expire
+ *
+ * qdf_mc_timer_start() function starts a timer to expire after the
+ * specified interval, thus running the timer callback function when
+ * the interval expires.
+ *
+ * A timer only runs once (a one-shot timer). To re-start the
+ * timer, qdf_mc_timer_start() has to be called after the timer runs
+ * or has been cancelled.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time)
+{
+ unsigned long flags;
+
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "timer Addr inside qdf_mc_timer_start : 0x%p ", timer);
+
+ /* check for invalid pointer */
+ if (NULL == timer) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s Null timer pointer being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* check if timer refers to an uninitialized object */
+ if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot start uninitialized timer", __func__);
+ QDF_ASSERT(0);
+
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* check if timer has expiration time less than 10 ms */
+ if (expiration_time < 10) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot start a timer with expiration less than 10 ms",
+ __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* make sure the remainer of the logic isn't interrupted */
+ spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+ /* ensure if the timer can be started */
+ if (QDF_TIMER_STATE_STOPPED != timer->state) {
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot start timer in state = %d ", __func__,
+ timer->state);
+ return QDF_STATUS_E_ALREADY;
+ }
+
+ /* start the timer */
+ mod_timer(&(timer->platform_info.timer),
+ jiffies + msecs_to_jiffies(expiration_time));
+
+ timer->state = QDF_TIMER_STATE_RUNNING;
+
+ /* get the thread ID on which the timer is being started */
+ timer->platform_info.thread_id = current->pid;
+
+ if (QDF_TIMER_TYPE_WAKE_APPS == timer->type) {
+ persistent_timer_count++;
+ if (1 == persistent_timer_count) {
+ /* since we now have one persistent timer,
+ * we need to disallow sleep
+ * sleep_negate_okts(sleep_client_handle);
+ */
+ }
+ }
+
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mc_timer_start);
+
+/**
+ * qdf_mc_timer_stop() - stop a QDF timer
+ * @timer: Pointer to timer object
+ * qdf_mc_timer_stop() function stops a timer that has been started but
+ * has not expired, essentially cancelling the 'start' request.
+ *
+ * After a timer is stopped, it goes back to the state it was in after it
+ * was created and can be started again via a call to qdf_mc_timer_start().
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer)
+{
+ unsigned long flags;
+
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "%s: timer Addr inside qdf_mc_timer_stop : 0x%p",
+ __func__, timer);
+
+ /* check for invalid pointer */
+ if (NULL == timer) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s Null timer pointer being passed", __func__);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* check if timer refers to an uninitialized object */
+ if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Cannot stop uninitialized timer", __func__);
+ QDF_ASSERT(0);
+
+ return QDF_STATUS_E_INVAL;
+ }
+
+ /* ensure the timer state is correct */
+ spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+ if (QDF_TIMER_STATE_RUNNING != timer->state) {
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+ "%s: Cannot stop timer in state = %d",
+ __func__, timer->state);
+ return QDF_STATUS_SUCCESS;
+ }
+
+ timer->state = QDF_TIMER_STATE_STOPPED;
+
+ del_timer(&(timer->platform_info.timer));
+
+ spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+ qdf_try_allowing_sleep(timer->type);
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mc_timer_stop);
+
+/**
+ * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
+
+ * qdf_mc_timer_get_system_ticks() function returns the current number
+ * of timer ticks in 10msec intervals. This function is suitable timestamping
+ * and calculating time intervals by calculating the difference between two
+ * timestamps.
+ *
+ * Return:
+ * The current system tick count (in 10msec intervals). This
+ * function cannot fail.
+ */
+unsigned long qdf_mc_timer_get_system_ticks(void)
+{
+ return jiffies_to_msecs(jiffies) / 10;
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_system_ticks);
+
+/**
+ * qdf_mc_timer_get_system_time() - Get the system time in milliseconds
+ *
+ * qdf_mc_timer_get_system_time() function returns the number of milliseconds
+ * that have elapsed since the system was started
+ *
+ * Return:
+ * The current system time in milliseconds
+ */
+unsigned long qdf_mc_timer_get_system_time(void)
+{
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_system_time);
diff --git a/qdf/linux/src/qdf_mem.c b/qdf/linux/src/qdf_mem.c
new file mode 100644
index 000000000000..a178f4368699
--- /dev/null
+++ b/qdf/linux/src/qdf_mem.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_mem
+ * This file provides OS dependent memory management APIs
+ */
+
+#include "qdf_mem.h"
+#include "qdf_nbuf.h"
+#include "qdf_lock.h"
+#include "qdf_mc_timer.h"
+#include "qdf_module.h"
+
+#if defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+
+#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
+#include <net/cnss_prealloc.h>
+#endif
+
+#ifdef MEMORY_DEBUG
+#include <qdf_list.h>
+qdf_list_t qdf_mem_list;
+qdf_spinlock_t qdf_mem_list_lock;
+
+static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+ 0x67, 0x68 };
+static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+ 0x86, 0x87 };
+
+/**
+ * struct s_qdf_mem_struct - memory object to dubug
+ * @node: node to the list
+ * @filename: name of file
+ * @line_num: line number
+ * @size: size of the file
+ * @header: array that contains header
+ */
+struct s_qdf_mem_struct {
+ qdf_list_node_t node;
+ char *file_name;
+ unsigned int line_num;
+ unsigned int size;
+ uint8_t header[8];
+};
+#endif
+
+/* Preprocessor Definitions and Constants */
+#define QDF_GET_MEMORY_TIME_THRESHOLD 3000
+
+int qdf_dbg_mask;
+qdf_declare_param(qdf_dbg_mask, int);
+
+u_int8_t prealloc_disabled = 1;
+qdf_declare_param(prealloc_disabled, byte);
+
+/**
+ * __qdf_mempool_init() - Create and initialize memory pool
+ *
+ * @osdev: platform device object
+ * @pool_addr: address of the pool created
+ * @elem_cnt: no. of elements in pool
+ * @elem_size: size of each pool element in bytes
+ * @flags: flags
+ *
+ * return: Handle to memory pool or NULL if allocation failed
+ */
+int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
+ int elem_cnt, size_t elem_size, u_int32_t flags)
+{
+ __qdf_mempool_ctxt_t *new_pool = NULL;
+ u_int32_t align = L1_CACHE_BYTES;
+ unsigned long aligned_pool_mem;
+ int pool_id;
+ int i;
+
+ if (prealloc_disabled) {
+ /* TBD: We can maintain a list of pools in qdf_device_t
+ * to help debugging
+ * when pre-allocation is not enabled
+ */
+ new_pool = (__qdf_mempool_ctxt_t *)
+ kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
+ if (new_pool == NULL)
+ return QDF_STATUS_E_NOMEM;
+
+ memset(new_pool, 0, sizeof(*new_pool));
+ /* TBD: define flags for zeroing buffers etc */
+ new_pool->flags = flags;
+ new_pool->elem_size = elem_size;
+ new_pool->max_elem = elem_cnt;
+ *pool_addr = new_pool;
+ return 0;
+ }
+
+ for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
+ if (osdev->mem_pool[pool_id] == NULL)
+ break;
+ }
+
+ if (pool_id == MAX_MEM_POOLS)
+ return -ENOMEM;
+
+ new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
+ kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
+ if (new_pool == NULL)
+ return -ENOMEM;
+
+ memset(new_pool, 0, sizeof(*new_pool));
+ /* TBD: define flags for zeroing buffers etc */
+ new_pool->flags = flags;
+ new_pool->pool_id = pool_id;
+
+ /* Round up the element size to cacheline */
+ new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
+ new_pool->mem_size = elem_cnt * new_pool->elem_size +
+ ((align)?(align - 1):0);
+
+ new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
+ if (new_pool->pool_mem == NULL) {
+ /* TBD: Check if we need get_free_pages above */
+ kfree(new_pool);
+ osdev->mem_pool[pool_id] = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&new_pool->lock);
+
+ /* Initialize free list */
+ aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
+ ((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
+ STAILQ_INIT(&new_pool->free_list);
+
+ for (i = 0; i < elem_cnt; i++)
+ STAILQ_INSERT_TAIL(&(new_pool->free_list),
+ (mempool_elem_t *)(aligned_pool_mem +
+ (new_pool->elem_size * i)), mempool_entry);
+
+
+ new_pool->free_cnt = elem_cnt;
+ *pool_addr = new_pool;
+ return 0;
+}
+EXPORT_SYMBOL(__qdf_mempool_init);
+
+/**
+ * __qdf_mempool_destroy() - Destroy memory pool
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ *
+ * Returns: none
+ */
+void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
+{
+ int pool_id = 0;
+
+ if (!pool)
+ return;
+
+ if (prealloc_disabled) {
+ kfree(pool);
+ return;
+ }
+
+ pool_id = pool->pool_id;
+
+ /* TBD: Check if free count matches elem_cnt if debug is enabled */
+ kfree(pool->pool_mem);
+ kfree(pool);
+ osdev->mem_pool[pool_id] = NULL;
+}
+EXPORT_SYMBOL(__qdf_mempool_destroy);
+
+/**
+ * __qdf_mempool_alloc() - Allocate an element memory pool
+ *
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ *
+ * Return: Pointer to the allocated element or NULL if the pool is empty
+ */
+void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
+{
+ void *buf = NULL;
+
+ if (!pool)
+ return NULL;
+
+ if (prealloc_disabled)
+ return qdf_mem_malloc(pool->elem_size);
+
+ spin_lock_bh(&pool->lock);
+
+ buf = STAILQ_FIRST(&pool->free_list);
+ if (buf != NULL) {
+ STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
+ pool->free_cnt--;
+ }
+
+ /* TBD: Update free count if debug is enabled */
+ spin_unlock_bh(&pool->lock);
+
+ return buf;
+}
+EXPORT_SYMBOL(__qdf_mempool_alloc);
+
+/**
+ * __qdf_mempool_free() - Free a memory pool element
+ * @osdev: Platform device object
+ * @pool: Handle to memory pool
+ * @buf: Element to be freed
+ *
+ * Returns: none
+ */
+void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
+{
+ if (!pool)
+ return;
+
+
+ if (prealloc_disabled)
+ return qdf_mem_free(buf);
+
+ spin_lock_bh(&pool->lock);
+ pool->free_cnt++;
+
+ STAILQ_INSERT_TAIL
+ (&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
+ spin_unlock_bh(&pool->lock);
+}
+EXPORT_SYMBOL(__qdf_mempool_free);
+
+/**
+ * qdf_mem_alloc_outline() - allocation QDF memory
+ * @osdev: platform device object
+ * @size: Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory. If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns NULL.
+ */
+void *
+qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
+{
+ return qdf_mem_malloc(size);
+}
+EXPORT_SYMBOL(qdf_mem_alloc_outline);
+
+/**
+ * qdf_mem_free_outline() - QDF memory free API
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'. It also checks
+ * is memory is corrupted or getting double freed and panic.
+ *
+ * Return: none
+ */
+void
+qdf_mem_free_outline(void *buf)
+{
+ qdf_mem_free(buf);
+}
+EXPORT_SYMBOL(qdf_mem_free_outline);
+
+/**
+ * qdf_mem_zero_outline() - zero out memory
+ * @buf: pointer to memory that will be set to zero
+ * @size: number of bytes zero
+ *
+ * This function sets the memory location to all zeros, essentially clearing
+ * the memory.
+ *
+ * Return: none
+ */
+void
+qdf_mem_zero_outline(void *buf, qdf_size_t size)
+{
+ qdf_mem_zero(buf, size);
+}
+EXPORT_SYMBOL(qdf_mem_zero_outline);
+
+/* External Function implementation */
+#ifdef MEMORY_DEBUG
+
+/**
+ * qdf_mem_init() - initialize qdf memory debug functionality
+ *
+ * Return: none
+ */
+void qdf_mem_init(void)
+{
+ /* Initalizing the list with maximum size of 60000 */
+ qdf_list_create(&qdf_mem_list, 60000);
+ qdf_spinlock_create(&qdf_mem_list_lock);
+ qdf_net_buf_debug_init();
+ return;
+}
+EXPORT_SYMBOL(qdf_mem_init);
+
+/**
+ * qdf_mem_clean() - display memory leak debug info and free leaked pointers
+ *
+ * Return: none
+ */
+void qdf_mem_clean(void)
+{
+ uint32_t list_size;
+ list_size = qdf_list_size(&qdf_mem_list);
+ qdf_net_buf_debug_clean();
+ if (list_size) {
+ qdf_list_node_t *node;
+ QDF_STATUS qdf_status;
+
+ struct s_qdf_mem_struct *mem_struct;
+ char *prev_mleak_file = "";
+ unsigned int prev_mleak_line_num = 0;
+ unsigned int prev_mleak_sz = 0;
+ unsigned int mleak_cnt = 0;
+
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+ "%s: List is not Empty. list_size %d ",
+ __func__, (int)list_size);
+
+ do {
+ qdf_spin_lock(&qdf_mem_list_lock);
+ qdf_status =
+ qdf_list_remove_front(&qdf_mem_list, &node);
+ qdf_spin_unlock(&qdf_mem_list_lock);
+ if (QDF_STATUS_SUCCESS == qdf_status) {
+ mem_struct = (struct s_qdf_mem_struct *)node;
+ /* Take care to log only once multiple memory
+ leaks from the same place */
+ if (strcmp(prev_mleak_file,
+ mem_struct->file_name)
+ || (prev_mleak_line_num !=
+ mem_struct->line_num)
+ || (prev_mleak_sz != mem_struct->size)) {
+ if (mleak_cnt != 0) {
+ QDF_TRACE(QDF_MODULE_ID_QDF,
+ QDF_TRACE_LEVEL_FATAL,
+ "%d Time Memory Leak@ File %s, @Line %d, size %d",
+ mleak_cnt,
+ prev_mleak_file,
+ prev_mleak_line_num,
+ prev_mleak_sz);
+ }
+ prev_mleak_file = mem_struct->file_name;
+ prev_mleak_line_num =
+ mem_struct->line_num;
+ prev_mleak_sz = mem_struct->size;
+ mleak_cnt = 0;
+ }
+ mleak_cnt++;
+ kfree((void *)mem_struct);
+ }
+ } while (qdf_status == QDF_STATUS_SUCCESS);
+
+ /* Print last memory leak from the module */
+ if (mleak_cnt) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+ "%d Time memory Leak@ File %s, @Line %d, size %d",
+ mleak_cnt, prev_mleak_file,
+ prev_mleak_line_num, prev_mleak_sz);
+ }
+#ifdef CONFIG_HALT_KMEMLEAK
+ BUG_ON(0);
+#endif
+ }
+}
+EXPORT_SYMBOL(qdf_mem_clean);
+
+/**
+ * qdf_mem_exit() - exit qdf memory debug functionality
+ *
+ * Return: none
+ */
+void qdf_mem_exit(void)
+{
+ qdf_net_buf_debug_exit();
+ qdf_mem_clean();
+ qdf_list_destroy(&qdf_mem_list);
+}
+EXPORT_SYMBOL(qdf_mem_exit);
+
+/**
+ * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
+ * @size: Number of bytes of memory to allocate.
+ * @file_name: File name from which memory allocation is called
+ * @line_num: Line number from which memory allocation is called
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory and ad it in qdf tracking list to check against memory leaks and
+ * corruptions
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory. If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns %NULL.
+ */
+void *qdf_mem_malloc_debug(size_t size,
+ char *file_name, uint32_t line_num)
+{
+ struct s_qdf_mem_struct *mem_struct;
+ void *mem_ptr = NULL;
+ uint32_t new_size;
+ int flags = GFP_KERNEL;
+ unsigned long time_before_kzalloc;
+
+ if (size > (1024 * 1024) || size == 0) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: called with invalid arg; passed in %zu !!!",
+ __func__, size);
+ return NULL;
+ }
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+ if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
+ void *pmem;
+ pmem = wcnss_prealloc_get(size);
+ if (NULL != pmem) {
+ memset(pmem, 0, size);
+ return pmem;
+ }
+ }
+#endif
+
+ if (in_interrupt() || irqs_disabled() || in_atomic())
+ flags = GFP_ATOMIC;
+
+ new_size = size + sizeof(struct s_qdf_mem_struct) + 8;/*TBD: what is 8*/
+ time_before_kzalloc = qdf_mc_timer_get_system_time();
+ mem_struct = (struct s_qdf_mem_struct *)kzalloc(new_size, flags);
+ /**
+ * If time taken by kmalloc is greater than
+ * QDF_GET_MEMORY_TIME_THRESHOLD msec
+ */
+ if (qdf_mc_timer_get_system_time() - time_before_kzalloc >=
+ QDF_GET_MEMORY_TIME_THRESHOLD)
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: kzalloc took %lu msec for size %zu called from %p_s at line %d",
+ __func__,
+ qdf_mc_timer_get_system_time() - time_before_kzalloc,
+ size, (void *)_RET_IP_, line_num);
+
+ if (mem_struct != NULL) {
+ QDF_STATUS qdf_status;
+
+ mem_struct->file_name = file_name;
+ mem_struct->line_num = line_num;
+ mem_struct->size = size;
+
+ qdf_mem_copy(&mem_struct->header[0],
+ &WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
+
+ qdf_mem_copy((uint8_t *) (mem_struct + 1) + size,
+ &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
+
+ qdf_spin_lock_irqsave(&qdf_mem_list_lock);
+ qdf_status = qdf_list_insert_front(&qdf_mem_list,
+ &mem_struct->node);
+ qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
+ if (QDF_STATUS_SUCCESS != qdf_status) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Unable to insert node into List qdf_status %d",
+ __func__, qdf_status);
+ }
+
+ mem_ptr = (void *)(mem_struct + 1);
+ }
+ return mem_ptr;
+}
+EXPORT_SYMBOL(qdf_mem_malloc_debug);
+
+/**
+ * qdf_mem_free() - QDF memory free API
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'. It also checks
+ * is memory is corrupted or getting double freed and panic.
+ *
+ * Return: none
+ */
+void qdf_mem_free(void *ptr)
+{
+ if (ptr != NULL) {
+ QDF_STATUS qdf_status;
+ struct s_qdf_mem_struct *mem_struct =
+ ((struct s_qdf_mem_struct *)ptr) - 1;
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+ if (wcnss_prealloc_put(ptr))
+ return;
+#endif
+
+ qdf_spin_lock_irqsave(&qdf_mem_list_lock);
+ qdf_status =
+ qdf_list_remove_node(&qdf_mem_list, &mem_struct->node);
+ qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
+
+ if (QDF_STATUS_SUCCESS == qdf_status) {
+ if (qdf_mem_cmp(mem_struct->header,
+ &WLAN_MEM_HEADER[0],
+ sizeof(WLAN_MEM_HEADER))) {
+ QDF_TRACE(QDF_MODULE_ID_QDF,
+ QDF_TRACE_LEVEL_FATAL,
+ "Memory Header is corrupted. mem_info: Filename %s, line_num %d",
+ mem_struct->file_name,
+ (int)mem_struct->line_num);
+ QDF_BUG(0);
+ }
+ if (qdf_mem_cmp((uint8_t *) ptr + mem_struct->size,
+ &WLAN_MEM_TAIL[0],
+ sizeof(WLAN_MEM_TAIL))) {
+ QDF_TRACE(QDF_MODULE_ID_QDF,
+ QDF_TRACE_LEVEL_FATAL,
+ "Memory Trailer is corrupted. mem_info: Filename %s, line_num %d",
+ mem_struct->file_name,
+ (int)mem_struct->line_num);
+ QDF_BUG(0);
+ }
+ kfree((void *)mem_struct);
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+ "%s: Unallocated memory (double free?)",
+ __func__);
+ QDF_BUG(0);
+ }
+ }
+}
+EXPORT_SYMBOL(qdf_mem_free);
+#else
+
+/**
+ * qdf_mem_malloc() - allocation QDF memory
+ * @size: Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory. If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns NULL.
+ */
+void *qdf_mem_malloc(size_t size)
+{
+ int flags = GFP_KERNEL;
+
+ if (in_interrupt() || irqs_disabled())
+ flags = GFP_ATOMIC;
+
+ return kzalloc(size, flags);
+}
+EXPORT_SYMBOL(qdf_mem_malloc);
+
+/**
+ * qdf_mem_free() - free QDF memory
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'.
+ *
+ * Return: None
+ */
+void qdf_mem_free(void *ptr)
+{
+ if (ptr == NULL)
+ return;
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+ if (wcnss_prealloc_put(ptr))
+ return;
+#endif
+ kfree(ptr);
+}
+EXPORT_SYMBOL(qdf_mem_free);
+#endif
+
+/**
+ * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
+ * @osdev: OS device handle pointer
+ * @pages: Multi page information storage
+ * @element_size: Each element size
+ * @element_num: Total number of elements should be allocated
+ * @memctxt: Memory context
+ * @cacheable: Coherent memory or cacheable memory
+ *
+ * This function will allocate large size of memory over multiple pages.
+ * Large size of contiguous memory allocation will fail frequently, then
+ * instead of allocate large memory by one shot, allocate through multiple, non
+ * contiguous memory and combine pages when actual usage
+ *
+ * Return: None
+ */
+void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
+ struct qdf_mem_multi_page_t *pages,
+ size_t element_size, uint16_t element_num,
+ qdf_dma_context_t memctxt, bool cacheable)
+{
+ uint16_t page_idx;
+ struct qdf_mem_dma_page_t *dma_pages;
+ void **cacheable_pages = NULL;
+ uint16_t i;
+
+ pages->num_element_per_page = PAGE_SIZE / element_size;
+ if (!pages->num_element_per_page) {
+ qdf_print("Invalid page %d or element size %d",
+ (int)PAGE_SIZE, (int)element_size);
+ goto out_fail;
+ }
+
+ pages->num_pages = element_num / pages->num_element_per_page;
+ if (element_num % pages->num_element_per_page)
+ pages->num_pages++;
+
+ if (cacheable) {
+ /* Pages information storage */
+ pages->cacheable_pages = qdf_mem_malloc(
+ pages->num_pages * sizeof(pages->cacheable_pages));
+ if (!pages->cacheable_pages) {
+ qdf_print("Cacheable page storage alloc fail");
+ goto out_fail;
+ }
+
+ cacheable_pages = pages->cacheable_pages;
+ for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+ cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
+ if (!cacheable_pages[page_idx]) {
+ qdf_print("cacheable page alloc fail, pi %d",
+ page_idx);
+ goto page_alloc_fail;
+ }
+ }
+ pages->dma_pages = NULL;
+ } else {
+ pages->dma_pages = qdf_mem_malloc(
+ pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
+ if (!pages->dma_pages) {
+ qdf_print("dmaable page storage alloc fail");
+ goto out_fail;
+ }
+
+ dma_pages = pages->dma_pages;
+ for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+ dma_pages->page_v_addr_start =
+ qdf_mem_alloc_consistent(osdev, osdev->dev,
+ PAGE_SIZE,
+ &dma_pages->page_p_addr);
+ if (!dma_pages->page_v_addr_start) {
+ qdf_print("dmaable page alloc fail pi %d",
+ page_idx);
+ goto page_alloc_fail;
+ }
+ dma_pages->page_v_addr_end =
+ dma_pages->page_v_addr_start + PAGE_SIZE;
+ dma_pages++;
+ }
+ pages->cacheable_pages = NULL;
+ }
+ return;
+
+page_alloc_fail:
+ if (cacheable) {
+ for (i = 0; i < page_idx; i++)
+ qdf_mem_free(pages->cacheable_pages[i]);
+ qdf_mem_free(pages->cacheable_pages);
+ } else {
+ dma_pages = pages->dma_pages;
+ for (i = 0; i < page_idx; i++) {
+ qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
+ dma_pages->page_v_addr_start,
+ dma_pages->page_p_addr, memctxt);
+ dma_pages++;
+ }
+ qdf_mem_free(pages->dma_pages);
+ }
+
+out_fail:
+ pages->cacheable_pages = NULL;
+ pages->dma_pages = NULL;
+ pages->num_pages = 0;
+ return;
+}
+EXPORT_SYMBOL(qdf_mem_multi_pages_alloc);
+
+/**
+ * qdf_mem_multi_pages_free() - free large size of kernel memory
+ * @osdev: OS device handle pointer
+ * @pages: Multi page information storage
+ * @memctxt: Memory context
+ * @cacheable: Coherent memory or cacheable memory
+ *
+ * This function will free large size of memory over multiple pages.
+ *
+ * Return: None
+ */
+void qdf_mem_multi_pages_free(qdf_device_t osdev,
+ struct qdf_mem_multi_page_t *pages,
+ qdf_dma_context_t memctxt, bool cacheable)
+{
+ unsigned int page_idx;
+ struct qdf_mem_dma_page_t *dma_pages;
+
+ if (cacheable) {
+ for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
+ qdf_mem_free(pages->cacheable_pages[page_idx]);
+ qdf_mem_free(pages->cacheable_pages);
+ } else {
+ dma_pages = pages->dma_pages;
+ for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+ qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
+ dma_pages->page_v_addr_start,
+ dma_pages->page_p_addr, memctxt);
+ dma_pages++;
+ }
+ qdf_mem_free(pages->dma_pages);
+ }
+
+ pages->cacheable_pages = NULL;
+ pages->dma_pages = NULL;
+ pages->num_pages = 0;
+ return;
+}
+EXPORT_SYMBOL(qdf_mem_multi_pages_free);
+
+/**
+ * qdf_mem_copy() - copy memory
+ * @dst_addr: Pointer to destination memory location (to copy to)
+ * @src_addr: Pointer to source memory location (to copy from)
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Copy host memory from one location to another, similar to memcpy in
+ * standard C. Note this function does not specifically handle overlapping
+ * source and destination memory locations. Calling this function with
+ * overlapping source and destination memory locations will result in
+ * unpredictable results. Use qdf_mem_move() if the memory locations
+ * for the source and destination are overlapping (or could be overlapping!)
+ *
+ * Return: none
+ */
+void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
+{
+ if (0 == num_bytes) {
+ /* special case where dst_addr or src_addr can be NULL */
+ return;
+ }
+
+ if ((dst_addr == NULL) || (src_addr == NULL)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s called with NULL parameter, source:%p destination:%p",
+ __func__, src_addr, dst_addr);
+ QDF_ASSERT(0);
+ return;
+ }
+ memcpy(dst_addr, src_addr, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_copy);
+
+/**
+ * qdf_mem_zero() - zero out memory
+ * @ptr: pointer to memory that will be set to zero
+ * @num_bytes: number of bytes zero
+ *
+ * This function sets the memory location to all zeros, essentially clearing
+ * the memory.
+ *
+ * Return: None
+ */
+void qdf_mem_zero(void *ptr, uint32_t num_bytes)
+{
+ if (0 == num_bytes) {
+ /* special case where ptr can be NULL */
+ return;
+ }
+
+ if (ptr == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s called with NULL parameter ptr", __func__);
+ return;
+ }
+ memset(ptr, 0, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_zero);
+
+/**
+ * qdf_mem_set() - set (fill) memory with a specified byte value.
+ * @ptr: Pointer to memory that will be set
+ * @num_bytes: Number of bytes to be set
+ * @value: Byte set in memory
+ *
+ * Return: None
+ */
+void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
+{
+ if (ptr == NULL) {
+ qdf_print("%s called with NULL parameter ptr", __func__);
+ return;
+ }
+ memset(ptr, value, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_set);
+
+/**
+ * qdf_mem_move() - move memory
+ * @dst_addr: pointer to destination memory location (to move to)
+ * @src_addr: pointer to source memory location (to move from)
+ * @num_bytes: number of bytes to move.
+ *
+ * Move host memory from one location to another, similar to memmove in
+ * standard C. Note this function *does* handle overlapping
+ * source and destination memory locations.
+
+ * Return: None
+ */
+void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
+{
+ if (0 == num_bytes) {
+ /* special case where dst_addr or src_addr can be NULL */
+ return;
+ }
+
+ if ((dst_addr == NULL) || (src_addr == NULL)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s called with NULL parameter, source:%p destination:%p",
+ __func__, src_addr, dst_addr);
+ QDF_ASSERT(0);
+ return;
+ }
+ memmove(dst_addr, src_addr, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_move);
+
+/**
+ * qdf_mem_alloc_consistent() - allocates consistent qdf memory
+ * @osdev: OS device handle
+ * @dev: Pointer to device handle
+ * @size: Size to be allocated
+ * @phy_addr: Physical address
+ *
+ * Return: pointer of allocated memory or null if memory alloc fails
+ */
+void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
+ qdf_dma_addr_t *phy_addr)
+{
+#if defined(A_SIMOS_DEVHOST)
+ static int first = 1;
+ void *vaddr;
+
+ if (first) {
+ first = 0;
+ qdf_print("Warning: bypassing %s\n", __func__);
+ }
+
+ vaddr = qdf_mem_malloc(size);
+ *phy_addr = ((qdf_dma_addr_t) vaddr);
+ return vaddr;
+#else
+ int flags = GFP_KERNEL;
+ void *alloc_mem = NULL;
+
+ if (in_interrupt() || irqs_disabled() || in_atomic())
+ flags = GFP_ATOMIC;
+
+ alloc_mem = dma_alloc_coherent(dev, size, phy_addr, flags);
+ if (alloc_mem == NULL)
+ qdf_print("%s Warning: unable to alloc consistent memory of size %zu!\n",
+ __func__, size);
+ return alloc_mem;
+#endif
+}
+EXPORT_SYMBOL(qdf_mem_alloc_consistent);
+
+/**
+ * qdf_mem_free_consistent() - free consistent qdf memory
+ * @osdev: OS device handle
+ * @size: Size to be allocated
+ * @vaddr: virtual address
+ * @phy_addr: Physical address
+ * @mctx: Pointer to DMA context
+ *
+ * Return: none
+ */
+inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
+ qdf_size_t size, void *vaddr,
+ qdf_dma_addr_t phy_addr,
+ qdf_dma_context_t memctx)
+{
+#if defined(A_SIMOS_DEVHOST)
+ static int first = 1;
+
+ if (first) {
+ first = 0;
+ qdf_print("Warning: bypassing %s\n", __func__);
+ }
+ qdf_mem_free(vaddr);
+ return;
+#else
+ dma_free_coherent(dev, size, vaddr, phy_addr);
+#endif
+}
+EXPORT_SYMBOL(qdf_mem_free_consistent);
+
+/**
+ * qdf_mem_dma_sync_single_for_device() - assign memory to device
+ * @osdev: OS device handle
+ * @bus_addr: dma address to give to the device
+ * @size: Size of the memory block
+ * @direction: direction data will be dma'ed
+ *
+ * Assign memory to the remote device.
+ * The cache lines are flushed to ram or invalidated as needed.
+ *
+ * Return: none
+ */
+inline void
+qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, qdf_dma_addr_t bus_addr,
+ qdf_size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_single_for_device(osdev->dev, bus_addr, size, direction);
+}
+EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
+
diff --git a/qdf/linux/src/qdf_module.c b/qdf/linux/src/qdf_module.c
new file mode 100644
index 000000000000..ce4db058d13c
--- /dev/null
+++ b/qdf/linux/src/qdf_module.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_module.h
+ * Linux-specific definitions for QDF module API's
+ */
+
+#include <linux/module.h>
+#include <qdf_perf.h>
+
+MODULE_AUTHOR("Qualcomm Atheros Inc.");
+MODULE_DESCRIPTION("Qualcomm Atheros Device Framework Module");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
+
+/**
+ * qdf_mod_init() - module initialization
+ *
+ * Return: int
+ */
+static int __init
+qdf_mod_init(void)
+{
+ qdf_perfmod_init();
+ return 0;
+}
+module_init(qdf_mod_init);
+
+/**
+ * qdf_mod_exit() - module remove
+ *
+ * Return: int
+ */
+static void __exit
+qdf_mod_exit(void)
+{
+ qdf_perfmod_exit();
+}
+module_exit(qdf_mod_exit);
+
diff --git a/qdf/linux/src/qdf_nbuf.c b/qdf/linux/src/qdf_nbuf.c
new file mode 100644
index 000000000000..6c27b9f8fd66
--- /dev/null
+++ b/qdf/linux/src/qdf_nbuf.c
@@ -0,0 +1,1536 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_nbuf.c
+ * QCA driver framework(QDF) network buffer management APIs
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <qdf_types.h>
+#include <qdf_nbuf.h>
+#include <qdf_mem.h>
+#include <qdf_status.h>
+#include <qdf_lock.h>
+#include <qdf_trace.h>
+
+#if defined(FEATURE_TSO)
+#include <net/ipv6.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#endif /* FEATURE_TSO */
+
+/* Packet Counter */
+static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
+static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
+
+/**
+ * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
+ *
+ * Return: none
+ */
+void qdf_nbuf_tx_desc_count_display(void)
+{
+ qdf_print("Current Snapshot of the Driver:\n");
+ qdf_print("Data Packets:\n");
+ qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
+ (nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
+ nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
+ nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
+ nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HTT] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
+ qdf_print(" HTC %d HIF %d CE %d TX_COMP %d\n",
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
+ nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
+ nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
+ nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
+ nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
+ qdf_print("Mgmt Packets:\n");
+ qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
+ nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_display);
+
+/**
+ * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
+ * @packet_type : packet type either mgmt/data
+ * @current_state : layer at which the packet currently present
+ *
+ * Return: none
+ */
+static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
+ uint8_t current_state)
+{
+ switch (packet_type) {
+ case QDF_NBUF_TX_PKT_MGMT_TRACK:
+ nbuf_tx_mgmt[current_state]++;
+ break;
+ case QDF_NBUF_TX_PKT_DATA_TRACK:
+ nbuf_tx_data[current_state]++;
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_update);
+
+/**
+ * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
+ *
+ * Return: none
+ */
+void qdf_nbuf_tx_desc_count_clear(void)
+{
+ memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
+ memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_clear);
+
+/**
+ * qdf_nbuf_set_state() - Updates the packet state
+ * @nbuf: network buffer
+ * @current_state : layer at which the packet currently is
+ *
+ * This function updates the packet state to the layer at which the packet
+ * currently is
+ *
+ * Return: none
+ */
+void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
+{
+ /*
+ * Only Mgmt, Data Packets are tracked. WMI messages
+ * such as scan commands are not tracked
+ */
+ uint8_t packet_type;
+ packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
+
+ if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
+ (packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
+ return;
+ }
+ QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
+ qdf_nbuf_tx_desc_count_update(packet_type,
+ current_state);
+}
+EXPORT_SYMBOL(qdf_nbuf_set_state);
+
+/* globals do not need to be initialized to NULL/0 */
+qdf_nbuf_trace_update_t qdf_trace_update_cb;
+
+/**
+ * __qdf_nbuf_alloc() - Allocate nbuf
+ * @hdl: Device handle
+ * @size: Netbuf requested size
+ * @reserve: headroom to start with
+ * @align: Align
+ * @prio: Priority
+ *
+ * This allocates an nbuf aligns if needed and reserves some space in the front,
+ * since the reserve is done after alignment the reserve value if being
+ * unaligned will result in an unaligned address.
+ *
+ * Return: nbuf or %NULL if no memory
+ */
+struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
+ int align, int prio)
+{
+ struct sk_buff *skb;
+ unsigned long offset;
+
+ if (align)
+ size += (align - 1);
+
+ skb = dev_alloc_skb(size);
+
+ if (!skb) {
+ pr_err("ERROR:NBUF alloc failed\n");
+ return NULL;
+ }
+ memset(skb->cb, 0x0, sizeof(skb->cb));
+
+ /*
+ * The default is for netbuf fragments to be interpreted
+ * as wordstreams rather than bytestreams.
+ */
+ QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
+ QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
+
+ /*
+ * XXX:how about we reserve first then align
+ * Align & make sure that the tail & data are adjusted properly
+ */
+
+ if (align) {
+ offset = ((unsigned long)skb->data) % align;
+ if (offset)
+ skb_reserve(skb, align - offset);
+ }
+
+ /*
+ * NOTE:alloc doesn't take responsibility if reserve unaligns the data
+ * pointer
+ */
+ skb_reserve(skb, reserve);
+
+ return skb;
+}
+EXPORT_SYMBOL(__qdf_nbuf_alloc);
+
+/**
+ * __qdf_nbuf_free() - free the nbuf its interrupt safe
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __qdf_nbuf_free(struct sk_buff *skb)
+{
+ if (qdf_nbuf_ipa_owned_get(skb))
+ /* IPA cleanup function will need to be called here */
+ QDF_BUG(1);
+ else
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_free);
+
+/**
+ * __qdf_nbuf_map() - map a buffer to local bus address space
+ * @osdev: OS device
+ * @bmap: Bitmap
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef QDF_OS_DEBUG
+QDF_STATUS
+__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ qdf_assert((dir == QDF_DMA_TO_DEVICE)
+ || (dir == QDF_DMA_FROM_DEVICE));
+
+ /*
+ * Assume there's only a single fragment.
+ * To support multiple fragments, it would be necessary to change
+ * qdf_nbuf_t to be a separate object that stores meta-info
+ * (including the bus address for each fragment) and a pointer
+ * to the underlying sk_buff.
+ */
+ qdf_assert(sh->nr_frags == 0);
+
+ return __qdf_nbuf_map_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map);
+
+#else
+QDF_STATUS
+__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+ return __qdf_nbuf_map_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map);
+#endif
+/**
+ * __qdf_nbuf_unmap() - to unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: dma direction
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
+ qdf_dma_dir_t dir)
+{
+ qdf_assert((dir == QDF_DMA_TO_DEVICE)
+ || (dir == QDF_DMA_FROM_DEVICE));
+
+ /*
+ * Assume there's a single fragment.
+ * If this is not true, the assertion in __qdf_nbuf_map will catch it.
+ */
+ __qdf_nbuf_unmap_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap);
+
+/**
+ * __qdf_nbuf_map_single() - map a single buffer to local bus address space
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS
+__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+ qdf_dma_addr_t paddr;
+
+ QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_single);
+#else
+QDF_STATUS
+__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+ qdf_dma_addr_t paddr;
+
+ /* assume that the OS only provides a single fragment */
+ QDF_NBUF_CB_PADDR(buf) = paddr =
+ dma_map_single(osdev->dev, buf->data,
+ skb_end_pointer(buf) - buf->data, dir);
+ return dma_mapping_error(osdev->dev, paddr)
+ ? QDF_STATUS_E_FAILURE
+ : QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_single);
+#endif
+/**
+ * __qdf_nbuf_unmap_single() - unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
+ qdf_dma_dir_t dir)
+{
+ return;
+}
+#else
+void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
+ qdf_dma_dir_t dir)
+{
+ dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+ skb_end_pointer(buf) - buf->data, dir);
+}
+#endif
+EXPORT_SYMBOL(__qdf_nbuf_unmap_single);
+
+/**
+ * __qdf_nbuf_set_rx_cksum() - set rx checksum
+ * @skb: Pointer to network buffer
+ * @cksum: Pointer to checksum value
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS
+__qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
+{
+ switch (cksum->l4_result) {
+ case QDF_NBUF_RX_CKSUM_NONE:
+ skb->ip_summed = CHECKSUM_NONE;
+ break;
+ case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = cksum->val;
+ break;
+ default:
+ pr_err("Unknown checksum type\n");
+ qdf_assert(0);
+ return QDF_STATUS_E_NOSUPPORT;
+ }
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_set_rx_cksum);
+
+/**
+ * __qdf_nbuf_get_tx_cksum() - get tx checksum
+ * @skb: Pointer to network buffer
+ *
+ * Return: TX checksum value
+ */
+qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
+{
+ switch (skb->ip_summed) {
+ case CHECKSUM_NONE:
+ return QDF_NBUF_TX_CKSUM_NONE;
+ case CHECKSUM_PARTIAL:
+ /* XXX ADF and Linux checksum don't map with 1-to-1. This is
+ * not 100% correct */
+ return QDF_NBUF_TX_CKSUM_TCP_UDP;
+ case CHECKSUM_COMPLETE:
+ return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
+ default:
+ return QDF_NBUF_TX_CKSUM_NONE;
+ }
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tx_cksum);
+
+/**
+ * __qdf_nbuf_get_tid() - get tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: tid
+ */
+uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
+{
+ return skb->priority;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tid);
+
+/**
+ * __qdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
+{
+ skb->priority = tid;
+}
+EXPORT_SYMBOL(__qdf_nbuf_set_tid);
+
+/**
+ * __qdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
+{
+ return QDF_NBUF_EXEMPT_NO_EXEMPTION;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_exemption_type);
+
+/**
+ * __qdf_nbuf_reg_trace_cb() - register trace callback
+ * @cb_func_ptr: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
+{
+ qdf_trace_update_cb = cb_func_ptr;
+ return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_reg_trace_cb);
+
+#ifdef QCA_PKT_PROTO_TRACE
+/**
+ * __qdf_nbuf_trace_update() - update trace event
+ * @skb: Pointer to network buffer
+ * @event_string: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __qdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
+{
+ char string_buf[QDF_NBUF_PKT_TRAC_MAX_STRING];
+
+ if ((!qdf_trace_update_cb) || (!event_string))
+ return;
+
+ if (!qdf_nbuf_trace_get_proto_type(buf))
+ return;
+
+ /* Buffer over flow */
+ if (QDF_NBUF_PKT_TRAC_MAX_STRING <=
+ (qdf_str_len(event_string) + QDF_NBUF_PKT_TRAC_PROTO_STRING)) {
+ return;
+ }
+
+ qdf_mem_zero(string_buf, QDF_NBUF_PKT_TRAC_MAX_STRING);
+ qdf_mem_copy(string_buf, event_string, qdf_str_len(event_string));
+ if (QDF_NBUF_PKT_TRAC_TYPE_EAPOL & qdf_nbuf_trace_get_proto_type(buf)) {
+ qdf_mem_copy(string_buf + qdf_str_len(event_string),
+ "EPL", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+ } else if (QDF_NBUF_PKT_TRAC_TYPE_DHCP &
+ qdf_nbuf_trace_get_proto_type(buf)) {
+ qdf_mem_copy(string_buf + qdf_str_len(event_string),
+ "DHC", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+ } else if (QDF_NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
+ qdf_nbuf_trace_get_proto_type(buf)) {
+ qdf_mem_copy(string_buf + qdf_str_len(event_string),
+ "MACT", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+ }
+
+ qdf_trace_update_cb(string_buf);
+ return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_trace_update);
+#endif /* QCA_PKT_PROTO_TRACE */
+
+#ifdef MEMORY_DEBUG
+#define QDF_NET_BUF_TRACK_MAX_SIZE (1024)
+
+/**
+ * struct qdf_nbuf_track_t - Network buffer track structure
+ *
+ * @p_next: Pointer to next
+ * @net_buf: Pointer to network buffer
+ * @file_name: File name
+ * @line_num: Line number
+ * @size: Size
+ */
+struct qdf_nbuf_track_t {
+ struct qdf_nbuf_track_t *p_next;
+ qdf_nbuf_t net_buf;
+ uint8_t *file_name;
+ uint32_t line_num;
+ size_t size;
+};
+
+spinlock_t g_qdf_net_buf_track_lock;
+typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
+
+QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
+
+/**
+ * qdf_net_buf_debug_init() - initialize network buffer debug functionality
+ *
+ * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
+ * in a hash table and when driver is unloaded it reports about leaked SKBs.
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
+ * reported as memory leak.
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_init(void)
+{
+ uint32_t i;
+ unsigned long irq_flag;
+
+ spin_lock_init(&g_qdf_net_buf_track_lock);
+
+ spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+ for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++)
+ gp_qdf_net_buf_track_tbl[i] = NULL;
+
+ spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_init);
+
+/**
+ * qdf_net_buf_debug_init() - exit network buffer debug functionality
+ *
+ * Exit network buffer tracking debug functionality and log SKB memory leaks
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_exit(void)
+{
+ uint32_t i;
+ unsigned long irq_flag;
+ QDF_NBUF_TRACK *p_node;
+ QDF_NBUF_TRACK *p_prev;
+
+ spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+ for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+ p_node = gp_qdf_net_buf_track_tbl[i];
+ while (p_node) {
+ p_prev = p_node;
+ p_node = p_node->p_next;
+ qdf_print(
+ "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
+ p_prev->file_name, p_prev->line_num,
+ p_prev->size);
+ }
+ }
+
+ spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_exit);
+
+/**
+ * qdf_net_buf_debug_clean() - clean up network buffer debug functionality
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_clean(void)
+{
+ uint32_t i;
+ unsigned long irq_flag;
+ QDF_NBUF_TRACK *p_node;
+ QDF_NBUF_TRACK *p_prev;
+
+ spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+ for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+ p_node = gp_qdf_net_buf_track_tbl[i];
+ while (p_node) {
+ p_prev = p_node;
+ p_node = p_node->p_next;
+ qdf_mem_free(p_prev);
+ }
+ }
+
+ spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_clean);
+
+/**
+ * qdf_net_buf_debug_hash() - hash network buffer pointer
+ *
+ * Return: hash value
+ */
+uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
+{
+ uint32_t i;
+
+ i = (uint32_t) ((uintptr_t) net_buf & (QDF_NET_BUF_TRACK_MAX_SIZE - 1));
+
+ return i;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_hash);
+
+/**
+ * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
+ *
+ * Return: If skb is found in hash table then return pointer to network buffer
+ * else return %NULL
+ */
+QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
+{
+ uint32_t i;
+ QDF_NBUF_TRACK *p_node;
+
+ i = qdf_net_buf_debug_hash(net_buf);
+ p_node = gp_qdf_net_buf_track_tbl[i];
+
+ while (p_node) {
+ if (p_node->net_buf == net_buf)
+ return p_node;
+ p_node = p_node->p_next;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_look_up);
+
+/**
+ * qdf_net_buf_debug_add_node() - store skb in debug hash table
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
+ uint8_t *file_name, uint32_t line_num)
+{
+ uint32_t i;
+ unsigned long irq_flag;
+ QDF_NBUF_TRACK *p_node;
+
+ spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+ i = qdf_net_buf_debug_hash(net_buf);
+ p_node = qdf_net_buf_debug_look_up(net_buf);
+
+ if (p_node) {
+ qdf_print(
+ "Double allocation of skb ! Already allocated from %s %d",
+ p_node->file_name, p_node->line_num);
+ QDF_ASSERT(0);
+ goto done;
+ } else {
+ p_node = (QDF_NBUF_TRACK *)
+ qdf_mem_malloc(sizeof(*p_node));
+ if (p_node) {
+ p_node->net_buf = net_buf;
+ p_node->file_name = file_name;
+ p_node->line_num = line_num;
+ p_node->size = size;
+ p_node->p_next = gp_qdf_net_buf_track_tbl[i];
+ gp_qdf_net_buf_track_tbl[i] = p_node;
+ } else {
+ qdf_print(
+ "Mem alloc failed ! Could not track skb from %s %d of size %zu",
+ file_name, line_num, size);
+ QDF_ASSERT(0);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_add_node);
+
+/**
+ * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
+{
+ uint32_t i;
+ bool found = false;
+ QDF_NBUF_TRACK *p_head;
+ QDF_NBUF_TRACK *p_node;
+ unsigned long irq_flag;
+ QDF_NBUF_TRACK *p_prev;
+
+ spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+ i = qdf_net_buf_debug_hash(net_buf);
+ p_head = gp_qdf_net_buf_track_tbl[i];
+
+ /* Unallocated SKB */
+ if (!p_head)
+ goto done;
+
+ p_node = p_head;
+ /* Found at head of the table */
+ if (p_head->net_buf == net_buf) {
+ gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
+ qdf_mem_free((void *)p_node);
+ found = true;
+ goto done;
+ }
+
+ /* Search in collision list */
+ while (p_node) {
+ p_prev = p_node;
+ p_node = p_node->p_next;
+ if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
+ p_prev->p_next = p_node->p_next;
+ qdf_mem_free((void *)p_node);
+ found = true;
+ break;
+ }
+ }
+
+done:
+ if (!found) {
+ qdf_print(
+ "Unallocated buffer ! Double free of net_buf %p ?",
+ net_buf);
+ QDF_ASSERT(0);
+ }
+
+ spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_delete_node);
+
+/**
+ * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
+ *
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call this API before returning SKB to network stack such
+ * that the SKB is not reported as memory leak.
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
+{
+ qdf_net_buf_debug_delete_node(net_buf);
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_release_skb);
+
+#endif /*MEMORY_DEBUG */
+#if defined(FEATURE_TSO)
+
+struct qdf_tso_cmn_seg_info_t {
+ uint16_t ethproto;
+ uint16_t ip_tcp_hdr_len;
+ uint16_t l2_len;
+ unsigned char *eit_hdr;
+ unsigned int eit_hdr_len;
+ struct tcphdr *tcphdr;
+ uint16_t ipv4_csum_en;
+ uint16_t tcp_ipv4_csum_en;
+ uint16_t tcp_ipv6_csum_en;
+ uint16_t ip_id;
+ uint32_t tcp_seq_num;
+};
+
+/**
+ * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
+ * information
+ *
+ * Get the TSO information that is common across all the TCP
+ * segments of the jumbo packet
+ *
+ * Return: 0 - success 1 - failure
+ */
+uint8_t __qdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
+ struct qdf_tso_cmn_seg_info_t *tso_info)
+{
+ /* Get ethernet type and ethernet header length */
+ tso_info->ethproto = vlan_get_protocol(skb);
+
+ /* Determine whether this is an IPv4 or IPv6 packet */
+ if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
+ /* for IPv4, get the IP ID and enable TCP and IP csum */
+ struct iphdr *ipv4_hdr = ip_hdr(skb);
+ tso_info->ip_id = ntohs(ipv4_hdr->id);
+ tso_info->ipv4_csum_en = 1;
+ tso_info->tcp_ipv4_csum_en = 1;
+ if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
+ qdf_print("TSO IPV4 proto 0x%x not TCP\n",
+ ipv4_hdr->protocol);
+ return 1;
+ }
+ } else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
+ /* for IPv6, enable TCP csum. No IP ID or IP csum */
+ tso_info->tcp_ipv6_csum_en = 1;
+ } else {
+ qdf_print("TSO: ethertype 0x%x is not supported!\n",
+ tso_info->ethproto);
+ return 1;
+ }
+
+ tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
+ tso_info->tcphdr = tcp_hdr(skb);
+ tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
+ /* get pointer to the ethernet + IP + TCP header and their length */
+ tso_info->eit_hdr = skb->data;
+ tso_info->eit_hdr_len = (skb_transport_header(skb)
+ - skb_mac_header(skb)) + tcp_hdrlen(skb);
+ tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
+ return 0;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_cmn_seg_info);
+
+
+/**
+ * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
+ *
+ * Returns the high and low 32-bits of the DMA addr in the provided ptrs
+ *
+ * Return: N/A
+ */
+static inline void qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
+ uint32_t *lo, uint32_t *hi)
+{
+ if (sizeof(dmaaddr) > sizeof(uint32_t)) {
+ *lo = (uint32_t) (dmaaddr & 0x0ffffffff);
+ *hi = (uint32_t) (dmaaddr >> 32);
+ } else {
+ *lo = dmaaddr;
+ *hi = 0;
+ }
+}
+EXPORT_SYMBOL(qdf_dmaaddr_to_32s);
+
+
+/**
+ * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf: network buffer to be segmented
+ * @tso_info: This is the output. The information about the
+ * TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: number of TSO segments
+ */
+uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
+ struct qdf_tso_info_t *tso_info)
+{
+ /* common accross all segments */
+ struct qdf_tso_cmn_seg_info_t tso_cmn_info;
+
+ /* segment specific */
+ char *tso_frag_vaddr;
+ qdf_dma_addr_t tso_frag_paddr = 0;
+ uint32_t tso_frag_paddr_lo, tso_frag_paddr_hi;
+ uint32_t num_seg = 0;
+ struct qdf_tso_seg_elem_t *curr_seg;
+ const struct skb_frag_struct *frag = NULL;
+ uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
+ uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
+ uint32_t foffset = 0; /* offset into the skb's fragment */
+ uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
+ uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
+
+ memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
+
+ if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
+ qdf_print("TSO: error getting common segment info\n");
+ return 0;
+ }
+ curr_seg = tso_info->tso_seg_list;
+
+ /* length of the first chunk of data in the skb */
+ skb_proc = skb_frag_len = skb_headlen(skb);
+
+ /* the 0th tso segment's 0th fragment always contains the EIT header */
+ /* update the remaining skb fragment length and TSO segment length */
+ skb_frag_len -= tso_cmn_info.eit_hdr_len;
+ skb_proc -= tso_cmn_info.eit_hdr_len;
+
+ /* get the address to the next tso fragment */
+ tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
+ /* get the length of the next tso fragment */
+ tso_frag_len = min(skb_frag_len, tso_seg_size);
+ tso_frag_paddr = dma_map_single(osdev->dev,
+ tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
+ qdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo,
+ &tso_frag_paddr_hi);
+
+ num_seg = tso_info->num_segs;
+ tso_info->num_segs = 0;
+ tso_info->is_tso = 1;
+
+ while (num_seg && curr_seg) {
+ int i = 1; /* tso fragment index */
+ int j = 0; /* skb fragment index */
+ uint8_t more_tso_frags = 1;
+ uint8_t from_frag_table = 0;
+
+ /* Initialize the flags to 0 */
+ memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
+ tso_info->num_segs++;
+
+ /* The following fields remain the same across all segments of
+ a jumbo packet */
+ curr_seg->seg.tso_flags.tso_enable = 1;
+ curr_seg->seg.tso_flags.partial_checksum_en = 0;
+ curr_seg->seg.tso_flags.ipv4_checksum_en =
+ tso_cmn_info.ipv4_csum_en;
+ curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
+ tso_cmn_info.tcp_ipv6_csum_en;
+ curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
+ tso_cmn_info.tcp_ipv4_csum_en;
+ curr_seg->seg.tso_flags.l2_len = 0;
+ curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
+ curr_seg->seg.num_frags = 0;
+
+ /* The following fields change for the segments */
+ curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
+ tso_cmn_info.ip_id++;
+
+ curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
+ curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
+ curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
+ curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
+ curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
+ curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
+ curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
+
+ curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
+
+ /* First fragment for each segment always contains the ethernet,
+ IP and TCP header */
+ curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
+ curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
+ tso_info->total_len = curr_seg->seg.tso_frags[0].length;
+ {
+ qdf_dma_addr_t mapped;
+ uint32_t lo, hi;
+
+ mapped = dma_map_single(osdev->dev,
+ tso_cmn_info.eit_hdr,
+ tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
+ qdf_dmaaddr_to_32s(mapped, &lo, &hi);
+ curr_seg->seg.tso_frags[0].paddr_low_32 = lo;
+ curr_seg->seg.tso_frags[0].paddr_upper_16 =
+ (hi & 0xffff);
+ }
+ curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
+ curr_seg->seg.num_frags++;
+
+ while (more_tso_frags) {
+ curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
+ curr_seg->seg.tso_frags[i].length = tso_frag_len;
+ tso_info->total_len +=
+ curr_seg->seg.tso_frags[i].length;
+ curr_seg->seg.tso_flags.ip_len +=
+ curr_seg->seg.tso_frags[i].length;
+ curr_seg->seg.num_frags++;
+ skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
+
+ /* increment the TCP sequence number */
+ tso_cmn_info.tcp_seq_num += tso_frag_len;
+ curr_seg->seg.tso_frags[i].paddr_upper_16 =
+ (tso_frag_paddr_hi & 0xffff);
+ curr_seg->seg.tso_frags[i].paddr_low_32 =
+ tso_frag_paddr_lo;
+
+ /* if there is no more data left in the skb */
+ if (!skb_proc)
+ return tso_info->num_segs;
+
+ /* get the next payload fragment information */
+ /* check if there are more fragments in this segment */
+ if ((tso_seg_size - tso_frag_len)) {
+ more_tso_frags = 1;
+ i++;
+ } else {
+ more_tso_frags = 0;
+ /* reset i and the tso payload size */
+ i = 1;
+ tso_seg_size = skb_shinfo(skb)->gso_size;
+ }
+
+ /* if the next fragment is contiguous */
+ if (tso_frag_len < skb_frag_len) {
+ skb_frag_len = skb_frag_len - tso_frag_len;
+ tso_frag_len = min(skb_frag_len, tso_seg_size);
+ tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
+ if (from_frag_table) {
+ tso_frag_paddr =
+ skb_frag_dma_map(osdev->dev,
+ frag, foffset,
+ tso_frag_len,
+ DMA_TO_DEVICE);
+ qdf_dmaaddr_to_32s(tso_frag_paddr,
+ &tso_frag_paddr_lo,
+ &tso_frag_paddr_hi);
+ } else {
+ tso_frag_paddr =
+ dma_map_single(osdev->dev,
+ tso_frag_vaddr,
+ tso_frag_len,
+ DMA_TO_DEVICE);
+ qdf_dmaaddr_to_32s(tso_frag_paddr,
+ &tso_frag_paddr_lo,
+ &tso_frag_paddr_hi);
+ }
+ } else { /* the next fragment is not contiguous */
+ tso_frag_len = min(skb_frag_len, tso_seg_size);
+ frag = &skb_shinfo(skb)->frags[j];
+ skb_frag_len = skb_frag_size(frag);
+
+ tso_frag_vaddr = skb_frag_address(frag);
+ tso_frag_paddr = skb_frag_dma_map(osdev->dev,
+ frag, 0, tso_frag_len,
+ DMA_TO_DEVICE);
+ qdf_dmaaddr_to_32s(tso_frag_paddr,
+ &tso_frag_paddr_lo,
+ &tso_frag_paddr_hi);
+ foffset += tso_frag_len;
+ from_frag_table = 1;
+ j++;
+ }
+ }
+ num_seg--;
+ /* if TCP FIN flag was set, set it in the last segment */
+ if (!num_seg)
+ curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
+
+ curr_seg = curr_seg->next;
+ }
+ return tso_info->num_segs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_info);
+
+/**
+ * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf: network buffer to be segmented
+ * @tso_info: This is the output. The information about the
+ * TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: 0 - success, 1 - failure
+ */
+uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
+{
+ uint32_t gso_size, tmp_len, num_segs = 0;
+
+ gso_size = skb_shinfo(skb)->gso_size;
+ tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
+ + tcp_hdrlen(skb));
+ while (tmp_len) {
+ num_segs++;
+ if (tmp_len > gso_size)
+ tmp_len -= gso_size;
+ else
+ break;
+ }
+ return num_segs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_num_seg);
+
+struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
+{
+ atomic_inc(&skb->users);
+ return skb;
+}
+EXPORT_SYMBOL(__qdf_nbuf_inc_users);
+
+#endif /* FEATURE_TSO */
+
+
+/**
+ * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
+ * @skb: sk_buff handle
+ *
+ * Return: none
+ */
+
+void __qdf_nbuf_ref(struct sk_buff *skb)
+{
+ skb_get(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_ref);
+
+/**
+ * __qdf_nbuf_shared() - Check whether the buffer is shared
+ * @skb: sk_buff buffer
+ *
+ * Return: true if more than one person has a reference to this buffer.
+ */
+int __qdf_nbuf_shared(struct sk_buff *skb)
+{
+ return skb_shared(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_shared);
+
+/**
+ * __qdf_nbuf_dmamap_create() - create a DMA map.
+ * @osdev: qdf device handle
+ * @dmap: dma map handle
+ *
+ * This can later be used to map networking buffers. They :
+ * - need space in adf_drv's software descriptor
+ * - are typically created during adf_drv_create
+ * - need to be created before any API(qdf_nbuf_map) that uses them
+ *
+ * Return: QDF STATUS
+ */
+QDF_STATUS
+__qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
+{
+ QDF_STATUS error = QDF_STATUS_SUCCESS;
+ /*
+ * driver can tell its SG capablity, it must be handled.
+ * Bounce buffers if they are there
+ */
+ (*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
+ if (!(*dmap))
+ error = QDF_STATUS_E_NOMEM;
+
+ return error;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_create);
+/**
+ * __qdf_nbuf_dmamap_destroy() - delete a dma map
+ * @osdev: qdf device handle
+ * @dmap: dma map handle
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
+{
+ kfree(dmap);
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_destroy);
+
+/**
+ * __qdf_nbuf_map_nbytes_single() - map nbytes
+ * @osdev: os device
+ * @buf: buffer
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf,
+ qdf_dma_dir_t dir, int nbytes)
+{
+ qdf_dma_addr_t paddr;
+
+ QDF_NBUF_CB_PADDR(buf) = paddr = (uint32_t) buf->data;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
+#else
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf,
+ qdf_dma_dir_t dir, int nbytes)
+{
+ qdf_dma_addr_t paddr;
+
+ /* assume that the OS only provides a single fragment */
+ QDF_NBUF_CB_PADDR(buf) = paddr =
+ dma_map_single(osdev->dev, buf->data,
+ nbytes, dir);
+ return dma_mapping_error(osdev->dev, paddr) ?
+ QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
+#endif
+/**
+ * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
+ * @osdev: os device
+ * @buf: buffer
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void
+__qdf_nbuf_unmap_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
+{
+ return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
+
+#else
+void
+__qdf_nbuf_unmap_nbytes_single(
+ qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
+{
+ if (0 == QDF_NBUF_CB_PADDR(buf)) {
+ qdf_print("ERROR: NBUF mapped physical address is NULL\n");
+ return;
+ }
+ dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+ nbytes, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
+#endif
+/**
+ * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
+ * @osdev: os device
+ * @skb: skb handle
+ * @dir: dma direction
+ * @nbytes: number of bytes to be mapped
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef QDF_OS_DEBUG
+QDF_STATUS
+__qdf_nbuf_map_nbytes(
+ qdf_device_t osdev,
+ struct sk_buff *skb,
+ qdf_dma_dir_t dir,
+ int nbytes)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+ /*
+ * Assume there's only a single fragment.
+ * To support multiple fragments, it would be necessary to change
+ * adf_nbuf_t to be a separate object that stores meta-info
+ * (including the bus address for each fragment) and a pointer
+ * to the underlying sk_buff.
+ */
+ qdf_assert(sh->nr_frags == 0);
+
+ return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
+#else
+QDF_STATUS
+__qdf_nbuf_map_nbytes(
+ qdf_device_t osdev,
+ struct sk_buff *skb,
+ qdf_dma_dir_t dir,
+ int nbytes)
+{
+ return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
+#endif
+/**
+ * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: skb handle
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_unmap_nbytes(
+ qdf_device_t osdev,
+ struct sk_buff *skb,
+ qdf_dma_dir_t dir,
+ int nbytes)
+{
+ qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+ /*
+ * Assume there's a single fragment.
+ * If this is not true, the assertion in __adf_nbuf_map will catch it.
+ */
+ __qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes);
+
+/**
+ * __qdf_nbuf_dma_map_info() - return the dma map info
+ * @bmap: dma map
+ * @sg: dma map info
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
+{
+ qdf_assert(bmap->mapped);
+ qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
+
+ memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
+ sizeof(struct __qdf_segment));
+ sg->nsegs = bmap->nsegs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dma_map_info);
+/**
+ * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
+ * specified by the index
+ * @skb: sk buff
+ * @sg: scatter/gather list of all the frags
+ *
+ * Return: none
+ */
+#if defined(__QDF_SUPPORT_FRAG_MEM)
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
+{
+ qdf_assert(skb != NULL);
+ sg->sg_segs[0].vaddr = skb->data;
+ sg->sg_segs[0].len = skb->len;
+ sg->nsegs = 1;
+
+ for (int i = 1; i <= sh->nr_frags; i++) {
+ skb_frag_t *f = &sh->frags[i - 1];
+ sg->sg_segs[i].vaddr = (uint8_t *)(page_address(f->page) +
+ f->page_offset);
+ sg->sg_segs[i].len = f->size;
+
+ qdf_assert(i < QDF_MAX_SGLIST);
+ }
+ sg->nsegs += i;
+
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#else
+#ifdef QDF_OS_DEBUG
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
+{
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ qdf_assert(skb != NULL);
+ sg->sg_segs[0].vaddr = skb->data;
+ sg->sg_segs[0].len = skb->len;
+ sg->nsegs = 1;
+
+ qdf_assert(sh->nr_frags == 0);
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#else
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t *sg)
+{
+ sg->sg_segs[0].vaddr = skb->data;
+ sg->sg_segs[0].len = skb->len;
+ sg->nsegs = 1;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#endif
+#endif
+/**
+ * __qdf_nbuf_get_frag_size() - get frag size
+ * @nbuf: sk buffer
+ * @cur_frag: current frag
+ *
+ * Return: frag size
+ */
+uint32_t
+__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
+{
+ struct skb_shared_info *sh = skb_shinfo(nbuf);
+ const skb_frag_t *frag = sh->frags + cur_frag;
+ return skb_frag_size(frag);
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_frag_size);
+
+/**
+ * __qdf_nbuf_frag_map() - dma map frag
+ * @osdev: os device
+ * @nbuf: sk buff
+ * @offset: offset
+ * @dir: direction
+ * @cur_frag: current fragment
+ *
+ * Return: QDF status
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS __qdf_nbuf_frag_map(
+ qdf_device_t osdev, __qdf_nbuf_t nbuf,
+ int offset, qdf_dma_dir_t dir, int cur_frag)
+{
+ int32_t paddr, frag_len;
+
+ QDF_NBUF_CB_PADDR(nbuf) = paddr = (int32_t) nbuf->data;
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_map);
+#else
+QDF_STATUS __qdf_nbuf_frag_map(
+ qdf_device_t osdev, __qdf_nbuf_t nbuf,
+ int offset, qdf_dma_dir_t dir, int cur_frag)
+{
+ int32_t paddr, frag_len;
+
+ struct skb_shared_info *sh = skb_shinfo(nbuf);
+ const skb_frag_t *frag = sh->frags + cur_frag;
+ frag_len = skb_frag_size(frag);
+
+ QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
+ skb_frag_dma_map(osdev->dev, frag, offset, frag_len, dir);
+ return dma_mapping_error(osdev->dev, paddr) ?
+ QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_map);
+#endif
+/**
+ * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
+ * @dmap: dma map
+ * @cb: callback
+ * @arg: argument
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
+{
+ return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_set_cb);
+
+
+/**
+ * __qdf_nbuf_get_vlan_info() - get vlan info
+ * @hdl: net handle
+ * @skb: sk buff
+ * @vlan: vlan header
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+__qdf_nbuf_get_vlan_info(qdf_net_handle_t hdl, struct sk_buff *skb,
+ qdf_net_vlanhdr_t *vlan)
+{
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_vlan_info);
+
+#ifndef REMOVE_INIT_DEBUG_CODE
+/**
+ * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
+ * @osdev: os device
+ * @buf: sk buff
+ * @dir: direction
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void __qdf_nbuf_sync_single_for_cpu(
+ qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+ return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_single_for_cpu);
+#else
+void __qdf_nbuf_sync_single_for_cpu(
+ qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+ if (0 == QDF_NBUF_CB_PADDR(buf)) {
+ qdf_print("ERROR: NBUF mapped physical address is NULL\n");
+ return;
+ }
+/* dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+ buf->end - buf->data, dir); */
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_single_for_cpu);
+#endif
+/**
+ * __qdf_nbuf_sync_for_cpu() - nbuf sync
+ * @osdev: os device
+ * @skb: sk buff
+ * @dir: direction
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
+ struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+ qdf_assert(
+ (dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+ /*
+ * Assume there's a single fragment.
+ * If this is not true, the assertion in __adf_nbuf_map will catch it.
+ */
+ __qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_for_cpu);
+#endif
+
diff --git a/qdf/linux/src/qdf_perf.c b/qdf/linux/src/qdf_perf.c
new file mode 100644
index 000000000000..86fb7b4d9a80
--- /dev/null
+++ b/qdf/linux/src/qdf_perf.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_perf
+ * This file provides OS dependent perf API's.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <qdf_perf.h>
+
+qdf_perf_entry_t perf_root = {{0, 0} };
+
+/**
+ * qdf_perfmod_init() - Module init
+ *
+ * return: int
+ */
+int
+qdf_perfmod_init(void)
+{
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+ "Perf Debug Module Init");
+ INIT_LIST_HEAD(&perf_root.list);
+ INIT_LIST_HEAD(&perf_root.child);
+ perf_root.proc = proc_mkdir(PROCFS_PERF_DIRNAME, 0);
+ return 0;
+}
+EXPORT_SYMBOL(qdf_perfmod_init);
+
+/**
+ * qdf_perfmod_exit() - Module exit
+ *
+ * Return: none
+ */
+void
+qdf_perfmod_exit(void)
+{
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+ "Perf Debug Module Exit");
+ remove_proc_entry(PROCFS_PERF_DIRNAME, 0);
+}
+EXPORT_SYMBOL(qdf_perfmod_exit);
+
+/**
+ * __qdf_perf_init() - Create the perf entry
+ * @parent: parent perf id
+ * @id_name: name of perf id
+ * @type: type of perf counter
+ *
+ * return: perf id
+ */
+qdf_perf_id_t
+__qdf_perf_init(qdf_perf_id_t parent, uint8_t *id_name,
+ qdf_perf_cntr_t type)
+{
+ qdf_perf_entry_t *entry = NULL;
+ qdf_perf_entry_t *pentry = PERF_ENTRY(parent);
+
+ if (type >= CNTR_LAST) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s:%s Invalid perf-type", __FILE__, __func__);
+ goto done;
+ }
+
+ if (!pentry)
+ pentry = &perf_root;
+ entry = kmalloc(sizeof(struct qdf_perf_entry), GFP_ATOMIC);
+
+ if (!entry) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ " Out of Memory,:%s", __func__);
+ return NULL;
+ }
+
+ memset(entry, 0, sizeof(struct qdf_perf_entry));
+
+ INIT_LIST_HEAD(&entry->list);
+ INIT_LIST_HEAD(&entry->child);
+
+ spin_lock_init(&entry->lock_irq);
+
+ list_add_tail(&entry->list, &pentry->child);
+
+ entry->name = id_name;
+ entry->type = type;
+
+ if (type == CNTR_GROUP) {
+ entry->proc = proc_mkdir(id_name, pentry->proc);
+ goto done;
+ }
+
+ entry->parent = pentry;
+ entry->proc = create_proc_entry(id_name, S_IFREG|S_IRUGO|S_IWUSR,
+ pentry->proc);
+ entry->proc->data = entry;
+ entry->proc->read_proc = api_tbl[type].proc_read;
+ entry->proc->write_proc = api_tbl[type].proc_write;
+
+ /*
+ * Initialize the Event with default values
+ */
+ api_tbl[type].init(entry, api_tbl[type].def_val);
+
+done:
+ return entry;
+}
+EXPORT_SYMBOL(__qdf_perf_init);
+
+/**
+ * __qdf_perf_destroy - Destroy the perf entry
+ * @id: pointer to qdf_perf_id_t
+ *
+ * @return: bool
+ */
+bool __qdf_perf_destroy(qdf_perf_id_t id)
+{
+ qdf_perf_entry_t *entry = PERF_ENTRY(id),
+ *parent = entry->parent;
+
+ if (!list_empty(&entry->child)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "Child's are alive, Can't delete");
+ return A_FALSE;
+ }
+
+ remove_proc_entry(entry->name, parent->proc);
+
+ list_del(&entry->list);
+
+ vfree(entry);
+
+ return true;
+}
+EXPORT_SYMBOL(__qdf_perf_destroy);
+
+/**
+ * __qdf_perf_start - Start the sampling
+ * @id: Instance of qdf_perf_id_t
+ *
+ * Returns: none
+ */
+void __qdf_perf_start(qdf_perf_id_t id)
+{
+ qdf_perf_entry_t *entry = PERF_ENTRY(id);
+
+ api_tbl[entry->type].sample(entry, 0);
+}
+EXPORT_SYMBOL(__qdf_perf_start);
+
+/**
+ * __qdf_perf_end - Stop sampling
+ * @id: Instance of qdf_perf_id_t
+ *
+ * Returns: none
+ */
+void __qdf_perf_end(qdf_perf_id_t id)
+{
+ qdf_perf_entry_t *entry = PERF_ENTRY(id);
+
+ api_tbl[entry->type].sample(entry, 1);
+}
+EXPORT_SYMBOL(__qdf_perf_end);
diff --git a/qdf/linux/src/qdf_threads.c b/qdf/linux/src/qdf_threads.c
new file mode 100644
index 000000000000..758b5db99eb4
--- /dev/null
+++ b/qdf/linux/src/qdf_threads.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_threads
+ * QCA driver framework (QDF) thread APIs
+ */
+
+/* Include Files */
+#include <qdf_threads.h>
+#include <qdf_types.h>
+#include <qdf_trace.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+/* Function declarations and documenation */
+
+/**
+ * qdf_sleep() - sleep
+ * @ms_interval : Number of milliseconds to suspend the current thread.
+ * A value of 0 may or may not cause the current thread to yield.
+ *
+ * This function suspends the execution of the current thread
+ * until the specified time out interval elapses.
+ *
+ * Return: none
+ */
+void qdf_sleep(uint32_t ms_interval)
+{
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ return;
+ }
+ msleep_interruptible(ms_interval);
+}
+EXPORT_SYMBOL(qdf_sleep);
+
+/**
+ * qdf_sleep_us() - sleep
+ * @us_interval : Number of microseconds to suspend the current thread.
+ * A value of 0 may or may not cause the current thread to yield.
+ *
+ * This function suspends the execution of the current thread
+ * until the specified time out interval elapses.
+ *
+ * Return : none
+ */
+void qdf_sleep_us(uint32_t us_interval)
+{
+ unsigned long timeout = usecs_to_jiffies(us_interval) + 1;
+ if (in_interrupt()) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s cannot be called from interrupt context!!!",
+ __func__);
+ return;
+ }
+
+ while (timeout && !signal_pending(current))
+ timeout = schedule_timeout_interruptible(timeout);
+}
+EXPORT_SYMBOL(qdf_sleep_us);
+
+/**
+ * qdf_busy_wait() - busy wait
+ * @us_interval : Number of microseconds to busy wait.
+ *
+ * This function places the current thread in busy wait until the specified
+ * time out interval elapses. If the interval is greater than 50us on WM, the
+ * behaviour is undefined.
+ *
+ * Return : none
+ */
+void qdf_busy_wait(uint32_t us_interval)
+{
+ udelay(us_interval);
+}
+EXPORT_SYMBOL(qdf_busy_wait);
diff --git a/qdf/linux/src/qdf_trace.c b/qdf/linux/src/qdf_trace.c
new file mode 100644
index 000000000000..938773759894
--- /dev/null
+++ b/qdf/linux/src/qdf_trace.c
@@ -0,0 +1,1054 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_trace
+ * QCA driver framework (QDF) trace APIs
+ * Trace, logging, and debugging definitions and APIs
+ */
+
+/* Include Files */
+#include <qdf_trace.h>
+#include <ani_global.h>
+#include <wlan_logging_sock_svc.h>
+#include "qdf_time.h"
+/* Preprocessor definitions and constants */
+
+#define QDF_TRACE_BUFFER_SIZE (512)
+
+enum qdf_timestamp_unit qdf_log_timestamp_type = QDF_LOG_TIMESTAMP_UNIT;
+
+/* macro to map qdf trace levels into the bitmask */
+#define QDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level)))
+
+/**
+ * typedef struct module_trace_info - Trace level for a module, as a bitmask.
+ * The bits in this mask are ordered by QDF_TRACE_LEVEL. For example,
+ * each bit represents one of the bits in QDF_TRACE_LEVEL that may be turned
+ * on to have traces at that level logged, i.e. if QDF_TRACE_LEVEL_ERROR is
+ * == 2, then if bit 2 (low order) is turned ON, then ERROR traces will be
+ * printed to the trace log. Note that all bits turned OFF means no traces
+ * @module_trace_level: trace level
+ * @module_name_str: 3 character string name for the module
+ */
+typedef struct {
+ uint16_t module_trace_level;
+ unsigned char module_name_str[4];
+} module_trace_info;
+
+#define QDF_DEFAULT_TRACE_LEVEL \
+ ((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR))
+
+/* Array of static data that contains all of the per module trace
+ * information. This includes the trace level for the module and
+ * the 3 character 'name' of the module for marking the trace logs
+ */
+module_trace_info g_qdf_trace_info[QDF_MODULE_ID_MAX] = {
+ [QDF_MODULE_ID_TLSHIM] = {QDF_DEFAULT_TRACE_LEVEL, "DP"},
+ [QDF_MODULE_ID_WMI] = {QDF_DEFAULT_TRACE_LEVEL, "WMI"},
+ [QDF_MODULE_ID_HDD] = {QDF_DEFAULT_TRACE_LEVEL, "HDD"},
+ [QDF_MODULE_ID_SME] = {QDF_DEFAULT_TRACE_LEVEL, "SME"},
+ [QDF_MODULE_ID_PE] = {QDF_DEFAULT_TRACE_LEVEL, "PE "},
+ [QDF_MODULE_ID_WMA] = {QDF_DEFAULT_TRACE_LEVEL, "WMA"},
+ [QDF_MODULE_ID_SYS] = {QDF_DEFAULT_TRACE_LEVEL, "SYS"},
+ [QDF_MODULE_ID_QDF] = {QDF_DEFAULT_TRACE_LEVEL, "QDF"},
+ [QDF_MODULE_ID_SAP] = {QDF_DEFAULT_TRACE_LEVEL, "SAP"},
+ [QDF_MODULE_ID_HDD_SOFTAP] = {QDF_DEFAULT_TRACE_LEVEL, "HSP"},
+ [QDF_MODULE_ID_HDD_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "HDP"},
+ [QDF_MODULE_ID_HDD_SAP_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "SDP"},
+ [QDF_MODULE_ID_BMI] = {QDF_DEFAULT_TRACE_LEVEL, "BMI"},
+ [QDF_MODULE_ID_HIF] = {QDF_DEFAULT_TRACE_LEVEL, "HIF"},
+ [QDF_MODULE_ID_TXRX] = {QDF_DEFAULT_TRACE_LEVEL, "TRX"},
+ [QDF_MODULE_ID_HTT] = {QDF_DEFAULT_TRACE_LEVEL, "HTT"},
+};
+
+/* Static and Global variables */
+static spinlock_t ltrace_lock;
+
+static qdf_trace_record_t g_qdf_trace_tbl[MAX_QDF_TRACE_RECORDS];
+/* global qdf trace data */
+static t_qdf_trace_data g_qdf_trace_data;
+/*
+ * all the call back functions for dumping MTRACE messages from ring buffer
+ * are stored in qdf_trace_cb_table,these callbacks are initialized during init
+ * only so, we will make a copy of these call back functions and maintain in to
+ * qdf_trace_restore_cb_table. Incase if we make modifications to
+ * qdf_trace_cb_table, we can certainly retrieve all the call back functions
+ * back from Restore Table
+ */
+static tp_qdf_trace_cb qdf_trace_cb_table[QDF_MODULE_ID_MAX];
+static tp_qdf_trace_cb qdf_trace_restore_cb_table[QDF_MODULE_ID_MAX];
+
+/* Static and Global variables */
+static spinlock_t l_dp_trace_lock;
+
+static struct qdf_dp_trace_record_s
+ g_qdf_dp_trace_tbl[MAX_QDF_DP_TRACE_RECORDS];
+
+/*
+ * all the options to configure/control DP trace are
+ * defined in this structure
+ */
+static struct s_qdf_dp_trace_data g_qdf_dp_trace_data;
+/*
+ * all the call back functions for dumping DPTRACE messages from ring buffer
+ * are stored in qdf_dp_trace_cb_table, callbacks are initialized during init
+ */
+static tp_qdf_dp_trace_cb qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX];
+
+/**
+ * qdf_trace_set_level() - Set the trace level for a particular module
+ * @module: Module id
+ * @level : trace level
+ *
+ * Trace level is a member of the QDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be issued.
+ * More severe conditions are more likely to be logged.
+ *
+ * This is an external API that allows trace levels to be set for each module.
+ *
+ * Return: None
+ */
+void qdf_trace_set_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level)
+{
+ /* make sure the caller is passing in a valid LEVEL */
+ if (level >= QDF_TRACE_LEVEL_MAX) {
+ pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+ level);
+ return;
+ }
+
+ /* Treat 'none' differently. NONE means we have to run off all
+ * the bits in the bit mask so none of the traces appear. Anything
+ * other than 'none' means we need to turn ON a bit in the bitmask
+ */
+ if (QDF_TRACE_LEVEL_NONE == level)
+ g_qdf_trace_info[module].module_trace_level =
+ QDF_TRACE_LEVEL_NONE;
+ else
+ /* set the desired bit in the bit mask for the module trace
+ * level */
+ g_qdf_trace_info[module].module_trace_level |=
+ QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+}
+EXPORT_SYMBOL(qdf_trace_set_level);
+
+/**
+ * qdf_trace_set_module_trace_level() - Set module trace level
+ * @module: Module id
+ * @level: Trace level for a module, as a bitmask as per 'module_trace_info'
+ *
+ * Sets the module trace level where the trace level is given as a bit mask
+ *
+ * Return: None
+ */
+void qdf_trace_set_module_trace_level(QDF_MODULE_ID module, uint32_t level)
+{
+ if (module < 0 || module >= QDF_MODULE_ID_MAX) {
+ pr_err("%s: Invalid module id %d passed\n", __func__, module);
+ return;
+ }
+ g_qdf_trace_info[module].module_trace_level = level;
+}
+EXPORT_SYMBOL(qdf_trace_set_module_trace_level);
+
+/**
+ * qdf_trace_set_value() - Set module trace value
+ * @module: Module id
+ * @level: Trace level for a module, as a bitmask as per 'module_trace_info'
+ * @on: set/clear the desired bit in the bit mask
+ *
+ * Return: None
+ */
+void qdf_trace_set_value(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+ uint8_t on)
+{
+ /* make sure the caller is passing in a valid LEVEL */
+ if (level < 0 || level >= QDF_TRACE_LEVEL_MAX) {
+ pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+ level);
+ return;
+ }
+
+ /* make sure the caller is passing in a valid module */
+ if (module < 0 || module >= QDF_MODULE_ID_MAX) {
+ pr_err("%s: Invalid module id %d passed in!\n", __func__,
+ module);
+ return;
+ }
+
+ /* Treat 'none' differently. NONE means we have to turn off all
+ the bits in the bit mask so none of the traces appear */
+ if (QDF_TRACE_LEVEL_NONE == level) {
+ g_qdf_trace_info[module].module_trace_level =
+ QDF_TRACE_LEVEL_NONE;
+ }
+ /* Treat 'All' differently. All means we have to turn on all
+ the bits in the bit mask so all of the traces appear */
+ else if (QDF_TRACE_LEVEL_ALL == level) {
+ g_qdf_trace_info[module].module_trace_level = 0xFFFF;
+ } else {
+ if (on)
+ /* set the desired bit in the bit mask for the module
+ trace level */
+ g_qdf_trace_info[module].module_trace_level |=
+ QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+ else
+ /* clear the desired bit in the bit mask for the module
+ trace level */
+ g_qdf_trace_info[module].module_trace_level &=
+ ~(QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level));
+ }
+}
+EXPORT_SYMBOL(qdf_trace_set_value);
+
+/**
+ * qdf_trace_get_level() - get the trace level
+ * @module: module Id
+ * @level: trace level
+ *
+ * This is an external API that returns a bool value to signify if a
+ * particular trace level is set for the specified module.
+ * A member of the QDF_TRACE_LEVEL enumeration indicating the severity
+ * of the condition causing the trace message to be issued.
+ *
+ * Note that individual trace levels are the only valid values
+ * for this API. QDF_TRACE_LEVEL_NONE and QDF_TRACE_LEVEL_ALL
+ * are not valid input and will return false
+ *
+ * Return:
+ * false - the specified trace level for the specified module is OFF
+ * true - the specified trace level for the specified module is ON
+ */
+bool qdf_trace_get_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level)
+{
+ bool trace_on = false;
+
+ if ((QDF_TRACE_LEVEL_NONE == level) ||
+ (QDF_TRACE_LEVEL_ALL == level) || (level >= QDF_TRACE_LEVEL_MAX)) {
+ trace_on = false;
+ } else {
+ trace_on = (level & g_qdf_trace_info[module].module_trace_level)
+ ? true : false;
+ }
+
+ return trace_on;
+}
+EXPORT_SYMBOL(qdf_trace_get_level);
+
+/**
+ * qdf_snprintf() - wrapper function to snprintf
+ * @str_buffer: string Buffer
+ * @size: defines the size of the data record
+ * @str_format: Format string in which the message to be logged. This format
+ * string contains printf-like replacement parameters, which follow
+ * this parameter in the variable argument list.
+ *
+ * Return: None
+ */
+void qdf_snprintf(char *str_buffer, unsigned int size, char *str_format, ...)
+{
+ va_list val;
+
+ va_start(val, str_format);
+ snprintf(str_buffer, size, str_format, val);
+ va_end(val);
+}
+EXPORT_SYMBOL(qdf_snprintf);
+
+#ifdef QDF_ENABLE_TRACING
+
+/**
+ * qdf_trace_msg() - externally called trace function
+ * @module: Module identifier a member of the QDF_MODULE_ID
+ * enumeration that identifies the module issuing the trace message.
+ * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration
+ * indicating the severity of the condition causing the trace message
+ * to be issued. More severe conditions are more likely to be logged.
+ * @str_format: Format string in which the message to be logged. This format
+ * string contains printf-like replacement parameters, which follow
+ * this parameter in the variable argument list.
+ *
+ * Checks the level of severity and accordingly prints the trace messages
+ *
+ * Return: None
+ */
+void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+ char *str_format, ...)
+{
+ char str_buffer[QDF_TRACE_BUFFER_SIZE];
+ int n;
+
+ /* Print the trace message when the desired level bit is set in
+ the module tracel level mask */
+ if (g_qdf_trace_info[module].module_trace_level &
+ QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)) {
+ /* the trace level strings in an array. these are ordered in
+ * the same order as the trace levels are defined in the enum
+ * (see QDF_TRACE_LEVEL) so we can index into this array with
+ * the level and get the right string. The qdf trace levels
+ * are... none, Fatal, Error, Warning, Info, info_high, info_med,
+ * info_low, Debug
+ */
+ static const char *TRACE_LEVEL_STR[] = { " ", "F ", "E ", "W ",
+ "I ", "IH", "IM", "IL", "D" };
+ va_list val;
+ va_start(val, str_format);
+
+ /* print the prefix string into the string buffer... */
+ n = snprintf(str_buffer, QDF_TRACE_BUFFER_SIZE,
+ "wlan: [%d:%2s:%3s] ",
+ in_interrupt() ? 0 : current->pid,
+ (char *)TRACE_LEVEL_STR[level],
+ (char *)g_qdf_trace_info[module].module_name_str);
+
+ /* print the formatted log message after the prefix string */
+ if ((n >= 0) && (n < QDF_TRACE_BUFFER_SIZE)) {
+ vsnprintf(str_buffer + n, QDF_TRACE_BUFFER_SIZE - n,
+ str_format, val);
+#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE)
+ wlan_log_to_user(level, (char *)str_buffer,
+ strlen(str_buffer));
+#else
+ pr_err("%s\n", str_buffer);
+#endif
+ }
+ va_end(val);
+ }
+}
+EXPORT_SYMBOL(qdf_trace_msg);
+
+/**
+ * qdf_trace_display() - Display trace
+ *
+ * Return: None
+ */
+void qdf_trace_display(void)
+{
+ QDF_MODULE_ID module_id;
+
+ pr_err
+ (" 1)FATAL 2)ERROR 3)WARN 4)INFO 5)INFO_H 6)INFO_M 7)INFO_L 8)DEBUG\n");
+ for (module_id = 0; module_id < QDF_MODULE_ID_MAX; ++module_id) {
+ pr_err
+ ("%2d)%s %s %s %s %s %s %s %s %s\n",
+ (int)module_id, g_qdf_trace_info[module_id].module_name_str,
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_FATAL)) ? "X" :
+ " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_ERROR)) ? "X" :
+ " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_WARN)) ? "X" :
+ " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_INFO)) ? "X" :
+ " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_HIGH)) ? "X"
+ : " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_MED)) ? "X"
+ : " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_LOW)) ? "X"
+ : " ",
+ (g_qdf_trace_info[module_id].
+ module_trace_level & (1 << QDF_TRACE_LEVEL_DEBUG)) ? "X" :
+ " ");
+ }
+}
+EXPORT_SYMBOL(qdf_trace_display);
+
+#define ROW_SIZE 16
+/* Buffer size = data bytes(2 hex chars plus space) + NULL */
+#define BUFFER_SIZE ((ROW_SIZE * 3) + 1)
+
+/**
+ * qdf_trace_hex_dump() - externally called hex dump function
+ * @module: Module identifier a member of the QDF_MODULE_ID enumeration that
+ * identifies the module issuing the trace message.
+ * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be
+ * issued. More severe conditions are more likely to be logged.
+ * @data: The base address of the buffer to be logged.
+ * @buf_len: The size of the buffer to be logged.
+ *
+ * Checks the level of severity and accordingly prints the trace messages
+ *
+ * Return: None
+ */
+void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+ void *data, int buf_len)
+{
+ const u8 *ptr = data;
+ int i, linelen, remaining = buf_len;
+ unsigned char linebuf[BUFFER_SIZE];
+
+ if (!(g_qdf_trace_info[module].module_trace_level &
+ QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)))
+ return;
+
+ for (i = 0; i < buf_len; i += ROW_SIZE) {
+ linelen = min(remaining, ROW_SIZE);
+ remaining -= ROW_SIZE;
+
+ hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1,
+ linebuf, sizeof(linebuf), false);
+
+ qdf_trace_msg(module, level, "%.8x: %s", i, linebuf);
+ }
+}
+EXPORT_SYMBOL(qdf_trace_hex_dump);
+
+#endif
+
+/**
+ * qdf_trace_enable() - Enable MTRACE for specific modules
+ * @bitmask_of_module_id: Bitmask according to enum of the modules.
+ * 32[dec] = 0010 0000 [bin] <enum of HDD is 5>
+ * 64[dec] = 0100 0000 [bin] <enum of SME is 6>
+ * 128[dec] = 1000 0000 [bin] <enum of PE is 7>
+ * @enable: can be true or false true implies enabling MTRACE false implies
+ * disabling MTRACE.
+ *
+ * Enable MTRACE for specific modules whose bits are set in bitmask and enable
+ * is true. if enable is false it disables MTRACE for that module. set the
+ * bitmask according to enum value of the modules.
+ * This functions will be called when you issue ioctl as mentioned following
+ * [iwpriv wlan0 setdumplog <value> <enable>].
+ * <value> - Decimal number, i.e. 64 decimal value shows only SME module,
+ * 128 decimal value shows only PE module, 192 decimal value shows PE and SME.
+ *
+ * Return: None
+ */
+void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable)
+{
+ int i;
+ if (bitmask_of_module_id) {
+ for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+ if (((bitmask_of_module_id >> i) & 1)) {
+ if (enable) {
+ if (NULL !=
+ qdf_trace_restore_cb_table[i]) {
+ qdf_trace_cb_table[i] =
+ qdf_trace_restore_cb_table[i];
+ }
+ } else {
+ qdf_trace_restore_cb_table[i] =
+ qdf_trace_cb_table[i];
+ qdf_trace_cb_table[i] = NULL;
+ }
+ }
+ }
+ } else {
+ if (enable) {
+ for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+ if (NULL != qdf_trace_restore_cb_table[i]) {
+ qdf_trace_cb_table[i] =
+ qdf_trace_restore_cb_table[i];
+ }
+ }
+ } else {
+ for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+ qdf_trace_restore_cb_table[i] =
+ qdf_trace_cb_table[i];
+ qdf_trace_cb_table[i] = NULL;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(qdf_trace_enable);
+
+/**
+ * qdf_trace_init() - initializes qdf trace structures and variables
+ *
+ * Called immediately after cds_preopen, so that we can start recording HDD
+ * events ASAP.
+ *
+ * Return: None
+ */
+void qdf_trace_init(void)
+{
+ uint8_t i;
+ g_qdf_trace_data.head = INVALID_QDF_TRACE_ADDR;
+ g_qdf_trace_data.tail = INVALID_QDF_TRACE_ADDR;
+ g_qdf_trace_data.num = 0;
+ g_qdf_trace_data.enable = true;
+ g_qdf_trace_data.dump_count = DEFAULT_QDF_TRACE_DUMP_COUNT;
+ g_qdf_trace_data.num_since_last_dump = 0;
+
+ for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+ qdf_trace_cb_table[i] = NULL;
+ qdf_trace_restore_cb_table[i] = NULL;
+ }
+}
+EXPORT_SYMBOL(qdf_trace_init);
+
+/**
+ * qdf_trace() - puts the messages in to ring-buffer
+ * @module: Enum of module, basically module id.
+ * @param: Code to be recorded
+ * @session: Session ID of the log
+ * @data: Actual message contents
+ *
+ * This function will be called from each module who wants record the messages
+ * in circular queue. Before calling this functions make sure you have
+ * registered your module with qdf through qdf_trace_register function.
+ *
+ * Return: None
+ */
+void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data)
+{
+ tp_qdf_trace_record rec = NULL;
+ unsigned long flags;
+
+ if (!g_qdf_trace_data.enable)
+ return;
+
+ /* if module is not registered, don't record for that module */
+ if (NULL == qdf_trace_cb_table[module])
+ return;
+
+ /* Aquire the lock so that only one thread at a time can fill the ring
+ * buffer
+ */
+ spin_lock_irqsave(&ltrace_lock, flags);
+
+ g_qdf_trace_data.num++;
+
+ if (g_qdf_trace_data.num > MAX_QDF_TRACE_RECORDS)
+ g_qdf_trace_data.num = MAX_QDF_TRACE_RECORDS;
+
+ if (INVALID_QDF_TRACE_ADDR == g_qdf_trace_data.head) {
+ /* first record */
+ g_qdf_trace_data.head = 0;
+ g_qdf_trace_data.tail = 0;
+ } else {
+ /* queue is not empty */
+ uint32_t tail = g_qdf_trace_data.tail + 1;
+
+ if (MAX_QDF_TRACE_RECORDS == tail)
+ tail = 0;
+
+ if (g_qdf_trace_data.head == tail) {
+ /* full */
+ if (MAX_QDF_TRACE_RECORDS == ++g_qdf_trace_data.head)
+ g_qdf_trace_data.head = 0;
+ }
+ g_qdf_trace_data.tail = tail;
+ }
+
+ rec = &g_qdf_trace_tbl[g_qdf_trace_data.tail];
+ rec->code = code;
+ rec->session = session;
+ rec->data = data;
+ rec->time = qdf_get_log_timestamp();
+ rec->module = module;
+ rec->pid = (in_interrupt() ? 0 : current->pid);
+ g_qdf_trace_data.num_since_last_dump++;
+ spin_unlock_irqrestore(&ltrace_lock, flags);
+}
+EXPORT_SYMBOL(qdf_trace);
+
+/**
+ * qdf_trace_spin_lock_init() - initializes the lock variable before use
+ *
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return: None
+ */
+QDF_STATUS qdf_trace_spin_lock_init(void)
+{
+ spin_lock_init(&ltrace_lock);
+
+ return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_trace_spin_lock_init);
+
+/**
+ * qdf_trace_register() - registers the call back functions
+ * @module_iD: enum value of module
+ * @qdf_trace_callback: call back functions to display the messages in
+ * particular format.
+ *
+ * Registers the call back functions to display the messages in particular
+ * format mentioned in these call back functions. This functions should be
+ * called by interested module in their init part as we will be ready to
+ * register as soon as modules are up.
+ *
+ * Return: None
+ */
+void qdf_trace_register(QDF_MODULE_ID module_iD,
+ tp_qdf_trace_cb qdf_trace_callback)
+{
+ qdf_trace_cb_table[module_iD] = qdf_trace_callback;
+}
+EXPORT_SYMBOL(qdf_trace_register);
+
+/**
+ * qdf_trace_dump_all() - Dump data from ring buffer via call back functions
+ * registered with QDF
+ * @p_mac: Context of particular module
+ * @code: Reason code
+ * @session: Session id of log
+ * @count: Number of lines to dump starting from tail to head
+ *
+ * This function will be called up on issueing ioctl call as mentioned following
+ * [iwpriv wlan0 dumplog 0 0 <n> <bitmask_of_module>]
+ *
+ * <n> - number lines to dump starting from tail to head.
+ *
+ * <bitmask_of_module> - if anybody wants to know how many messages were
+ * recorded for particular module/s mentioned by setbit in bitmask from last
+ * <n> messages. It is optional, if you don't provide then it will dump
+ * everything from buffer.
+ *
+ * Return: None
+ */
+void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session,
+ uint32_t count, uint32_t bitmask_of_module)
+{
+ qdf_trace_record_t p_record;
+ int32_t i, tail;
+
+ if (!g_qdf_trace_data.enable) {
+ QDF_TRACE(QDF_MODULE_ID_SYS,
+ QDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+ return;
+ }
+
+ QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_INFO,
+ "Total Records: %d, Head: %d, Tail: %d",
+ g_qdf_trace_data.num, g_qdf_trace_data.head,
+ g_qdf_trace_data.tail);
+
+ /* aquire the lock so that only one thread at a time can read
+ * the ring buffer
+ */
+ spin_lock(&ltrace_lock);
+
+ if (g_qdf_trace_data.head != INVALID_QDF_TRACE_ADDR) {
+ i = g_qdf_trace_data.head;
+ tail = g_qdf_trace_data.tail;
+
+ if (count) {
+ if (count > g_qdf_trace_data.num)
+ count = g_qdf_trace_data.num;
+ if (tail >= (count - 1))
+ i = tail - count + 1;
+ else if (count != MAX_QDF_TRACE_RECORDS)
+ i = MAX_QDF_TRACE_RECORDS - ((count - 1) -
+ tail);
+ }
+
+ p_record = g_qdf_trace_tbl[i];
+ /* right now we are not using num_since_last_dump member but
+ * in future we might re-visit and use this member to track
+ * how many latest messages got added while we were dumping
+ * from ring buffer
+ */
+ g_qdf_trace_data.num_since_last_dump = 0;
+ spin_unlock(&ltrace_lock);
+ for (;; ) {
+ if ((code == 0 || (code == p_record.code)) &&
+ (qdf_trace_cb_table[p_record.module] != NULL)) {
+ if (0 == bitmask_of_module) {
+ qdf_trace_cb_table[p_record.
+ module] (p_mac,
+ &p_record,
+ (uint16_t)
+ i);
+ } else {
+ if (bitmask_of_module &
+ (1 << p_record.module)) {
+ qdf_trace_cb_table[p_record.
+ module]
+ (p_mac, &p_record,
+ (uint16_t) i);
+ }
+ }
+ }
+
+ if (i == tail)
+ break;
+ i += 1;
+
+ spin_lock(&ltrace_lock);
+ if (MAX_QDF_TRACE_RECORDS == i) {
+ i = 0;
+ p_record = g_qdf_trace_tbl[0];
+ } else {
+ p_record = g_qdf_trace_tbl[i];
+ }
+ spin_unlock(&ltrace_lock);
+ }
+ } else {
+ spin_unlock(&ltrace_lock);
+ }
+}
+EXPORT_SYMBOL(qdf_trace_dump_all);
+
+/**
+ * qdf_dp_trace_init() - enables the DP trace
+ * Called during driver load and it enables DP trace
+ *
+ * Return: None
+ */
+void qdf_dp_trace_init(void)
+{
+ uint8_t i;
+
+ qdf_dp_trace_spin_lock_init();
+ g_qdf_dp_trace_data.head = INVALID_QDF_DP_TRACE_ADDR;
+ g_qdf_dp_trace_data.tail = INVALID_QDF_DP_TRACE_ADDR;
+ g_qdf_dp_trace_data.num = 0;
+ g_qdf_dp_trace_data.proto_bitmap = 0;
+ g_qdf_dp_trace_data.no_of_record = 0;
+ g_qdf_dp_trace_data.verbosity = QDF_DP_TRACE_VERBOSITY_DEFAULT;
+ g_qdf_dp_trace_data.enable = true;
+
+ for (i = 0; i < QDF_DP_TRACE_MAX; i++)
+ qdf_dp_trace_cb_table[i] = qdf_dp_display_record;
+}
+EXPORT_SYMBOL(qdf_dp_trace_init);
+
+/**
+ * qdf_dp_trace_set_value() - Configure the value to control DP trace
+ * @proto_bitmap: defines the protocol to be tracked
+ * @no_of_records: defines the nth packet which is traced
+ * @verbosity: defines the verbosity level
+ *
+ * Return: None
+ */
+void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_record,
+ uint8_t verbosity)
+{
+ g_qdf_dp_trace_data.proto_bitmap = proto_bitmap;
+ g_qdf_dp_trace_data.no_of_record = no_of_record;
+ g_qdf_dp_trace_data.verbosity = verbosity;
+ return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_set_value);
+
+/**
+ * qdf_dp_trace_enable_track() - enable the tracing for netbuf
+ * @code: defines the event
+ *
+ * Return: true or false depends on whether tracing enabled
+ */
+static bool qdf_dp_trace_enable_track(enum QDF_DP_TRACE_ID code)
+{
+ if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_HIGH)
+ return true;
+ if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_MEDIUM
+ && (code <= QDF_DP_TRACE_HIF_PACKET_PTR_RECORD))
+ return true;
+ if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_LOW
+ && (code <= QDF_DP_TRACE_CE_PACKET_RECORD))
+ return true;
+ if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_DEFAULT
+ && (code == QDF_DP_TRACE_DROP_PACKET_RECORD))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(qdf_dp_trace_enable_track);
+
+/**
+ * qdf_dp_trace_set_track() - Marks whether the packet needs to be traced
+ * @nbuf: defines the netbuf
+ *
+ * Return: None
+ */
+void qdf_dp_trace_set_track(qdf_nbuf_t nbuf)
+{
+ spin_lock_bh(&l_dp_trace_lock);
+ g_qdf_dp_trace_data.count++;
+ if (g_qdf_dp_trace_data.proto_bitmap != 0) {
+ if (cds_pkt_get_proto_type(nbuf,
+ g_qdf_dp_trace_data.proto_bitmap, 0)) {
+ QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1;
+ }
+ }
+ if ((g_qdf_dp_trace_data.no_of_record != 0) &&
+ (g_qdf_dp_trace_data.count %
+ g_qdf_dp_trace_data.no_of_record == 0)) {
+ QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1;
+ }
+ spin_unlock_bh(&l_dp_trace_lock);
+ return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_set_track);
+
+/**
+ * dump_hex_trace() - Display the data in buffer
+ * @buf: buffer which contains data to be displayed
+ * @buf_len: defines the size of the data to be displayed
+ *
+ * Return: None
+ */
+static void dump_hex_trace(uint8_t *buf, uint8_t buf_len)
+{
+ uint8_t i = 0;
+ /* Dump the bytes in the last line */
+ qdf_print("DATA: ");
+ for (i = 0; i < buf_len; i++)
+ qdf_print("%02x ", buf[i]);
+ qdf_print("\n");
+}
+EXPORT_SYMBOL(dump_hex_trace);
+
+/**
+ * qdf_dp_display_trace() - Displays a record in DP trace
+ * @p_record: pointer to a record in DP trace
+ * @rec_index: record index
+ *
+ * Return: None
+ */
+void qdf_dp_display_record(struct qdf_dp_trace_record_s *p_record,
+ uint16_t rec_index)
+{
+ qdf_print("INDEX: %04d TIME: %012llu CODE: %02d\n", rec_index,
+ p_record->time, p_record->code);
+ switch (p_record->code) {
+ case QDF_DP_TRACE_HDD_TX_TIMEOUT:
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "HDD TX Timeout\n");
+ break;
+ case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "HDD soft_aP TX Timeout\n");
+ break;
+ case QDF_DP_TRACE_VDEV_PAUSE:
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "VDEV Pause\n");
+ break;
+ case QDF_DP_TRACE_VDEV_UNPAUSE:
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "VDEV un_pause\n");
+ break;
+ default:
+ dump_hex_trace(p_record->data, p_record->size);
+ }
+ return;
+}
+EXPORT_SYMBOL(qdf_dp_display_record);
+
+/**
+ * qdf_dp_trace() - Stores the data in buffer
+ * @nbuf: defines the netbuf
+ * @code: defines the event
+ * @data: defines the data to be stored
+ * @size: defines the size of the data record
+ *
+ * Return: None
+ */
+void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
+ uint8_t *data, uint8_t size)
+{
+ struct qdf_dp_trace_record_s *rec = NULL;
+
+ /* Return when Dp trace is not enabled */
+ if (!g_qdf_dp_trace_data.enable)
+ return;
+
+ /* If nbuf is NULL, check for VDEV PAUSE, UNPAUSE, TIMEOUT */
+ if (!nbuf) {
+ switch (code) {
+ case QDF_DP_TRACE_HDD_TX_TIMEOUT:
+ case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+ case QDF_DP_TRACE_VDEV_PAUSE:
+ case QDF_DP_TRACE_VDEV_UNPAUSE:
+ if (qdf_dp_trace_enable_track(code))
+ goto register_record;
+ else
+ return;
+
+ default:
+ return;
+ }
+ }
+
+ /* Return when the packet is not a data packet */
+ if (QDF_NBUF_GET_PACKET_TRACK(nbuf) != QDF_NBUF_TX_PKT_DATA_TRACK)
+ return;
+
+ /* Return when nbuf is not marked for dp tracing or
+ * verbosity does not allow
+ */
+ if (qdf_dp_trace_enable_track(code) == false ||
+ !QDF_NBUF_CB_TX_DP_TRACE(nbuf))
+ return;
+
+ /* Acquire the lock so that only one thread at a time can fill the ring
+ * buffer
+ */
+
+register_record:
+
+ spin_lock_bh(&l_dp_trace_lock);
+
+ g_qdf_dp_trace_data.num++;
+
+ if (g_qdf_dp_trace_data.num > MAX_QDF_DP_TRACE_RECORDS)
+ g_qdf_dp_trace_data.num = MAX_QDF_DP_TRACE_RECORDS;
+
+ if (INVALID_QDF_DP_TRACE_ADDR == g_qdf_dp_trace_data.head) {
+ /* first record */
+ g_qdf_dp_trace_data.head = 0;
+ g_qdf_dp_trace_data.tail = 0;
+ } else {
+ /* queue is not empty */
+ g_qdf_dp_trace_data.tail++;
+
+ if (MAX_QDF_DP_TRACE_RECORDS == g_qdf_dp_trace_data.tail)
+ g_qdf_dp_trace_data.tail = 0;
+
+ if (g_qdf_dp_trace_data.head == g_qdf_dp_trace_data.tail) {
+ /* full */
+ if (MAX_QDF_DP_TRACE_RECORDS ==
+ ++g_qdf_dp_trace_data.head)
+ g_qdf_dp_trace_data.head = 0;
+ }
+ }
+
+ rec = &g_qdf_dp_trace_tbl[g_qdf_dp_trace_data.tail];
+ rec->code = code;
+ rec->size = 0;
+ if (data != NULL && size > 0) {
+ if (size > QDF_DP_TRACE_RECORD_SIZE)
+ size = QDF_DP_TRACE_RECORD_SIZE;
+
+ rec->size = size;
+ switch (code) {
+ case QDF_DP_TRACE_HDD_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_CE_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_HTT_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_HTC_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_HIF_PACKET_PTR_RECORD:
+ qdf_mem_copy(rec->data, (uint8_t *)(&data), size);
+ break;
+
+ case QDF_DP_TRACE_DROP_PACKET_RECORD:
+ case QDF_DP_TRACE_HDD_PACKET_RECORD:
+ case QDF_DP_TRACE_CE_PACKET_RECORD:
+ qdf_mem_copy(rec->data, data, size);
+ break;
+ default:
+ break;
+ }
+ }
+ rec->time = qdf_get_log_timestamp();
+ rec->pid = (in_interrupt() ? 0 : current->pid);
+ spin_unlock_bh(&l_dp_trace_lock);
+}
+EXPORT_SYMBOL(qdf_dp_trace);
+
+/**
+ * qdf_dp_trace_spin_lock_init() - initializes the lock variable before use
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return: None
+ */
+void qdf_dp_trace_spin_lock_init(void)
+{
+ spin_lock_init(&l_dp_trace_lock);
+
+ return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_spin_lock_init);
+
+/**
+ * qdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions
+ * registered with QDF
+ * @code: Reason code
+ * @count: Number of lines to dump starting from tail to head
+ *
+ * Return: None
+ */
+void qdf_dp_trace_dump_all(uint32_t count)
+{
+ struct qdf_dp_trace_record_s p_record;
+ int32_t i, tail;
+
+ if (!g_qdf_dp_trace_data.enable) {
+ QDF_TRACE(QDF_MODULE_ID_SYS,
+ QDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+ return;
+ }
+
+ QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_ERROR,
+ "Total Records: %d, Head: %d, Tail: %d",
+ g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head,
+ g_qdf_dp_trace_data.tail);
+
+ /* aquire the lock so that only one thread at a time can read
+ * the ring buffer
+ */
+ spin_lock_bh(&l_dp_trace_lock);
+
+ if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) {
+ i = g_qdf_dp_trace_data.head;
+ tail = g_qdf_dp_trace_data.tail;
+
+ if (count) {
+ if (count > g_qdf_dp_trace_data.num)
+ count = g_qdf_dp_trace_data.num;
+ if (tail >= (count - 1))
+ i = tail - count + 1;
+ else if (count != MAX_QDF_DP_TRACE_RECORDS)
+ i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) -
+ tail);
+ }
+
+ p_record = g_qdf_dp_trace_tbl[i];
+ spin_unlock_bh(&l_dp_trace_lock);
+ for (;; ) {
+
+ qdf_dp_trace_cb_table[p_record.
+ code] (&p_record, (uint16_t)i);
+ if (i == tail)
+ break;
+ i += 1;
+
+ spin_lock_bh(&l_dp_trace_lock);
+ if (MAX_QDF_DP_TRACE_RECORDS == i)
+ i = 0;
+
+ p_record = g_qdf_dp_trace_tbl[i];
+ spin_unlock_bh(&l_dp_trace_lock);
+ }
+ } else {
+ spin_unlock_bh(&l_dp_trace_lock);
+ }
+}
+EXPORT_SYMBOL(qdf_dp_trace_dump_all);