summaryrefslogtreecommitdiff
path: root/include/linux/workqueue.h
diff options
context:
space:
mode:
authorEmmanuel Grumbach <emmanuel.grumbach@intel.com>2015-08-16 10:20:58 +0300
committerEmmanuel Grumbach <emmanuel.grumbach@intel.com>2015-08-16 10:20:58 +0300
commit473e0bc39bd5d2542e90edc622a65bd49206409d (patch)
treebd31cc370c386228d4525a9e84d87d412acd6959 /include/linux/workqueue.h
parent1a84e7716086be3b90e3b735725d0a14da28a69c (diff)
parent8f9c98df949333f08b74e5df1caacf7e2c5e8552 (diff)
Merge tag 'mac80211-next-for-davem-2015-08-14' into next
Another pull request for the next cycle, this time with quite a bit of content: * mesh fixes/improvements from Alexis, Bob, Chun-Yeow and Jesse * TDLS higher bandwidth support (Arik) * OCB fixes from Bertold Van den Bergh * suspend/resume fixes from Eliad * dynamic SMPS support for minstrel-HT (Krishna Chaitanya) * VHT bitrate mask support (Lorenzo Bianconi) * better regulatory support for 5/10 MHz channels (Matthias May) * basic support for MU-MIMO to avoid the multi-vif issue (Sara Sharon) along with a number of other cleanups.
Diffstat (limited to 'include/linux/workqueue.h')
-rw-r--r--include/linux/workqueue.h31
1 files changed, 30 insertions, 1 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index deee212af8e0..738b30b39b68 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -424,6 +424,7 @@ struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
+int workqueue_set_unbound_cpumask(cpumask_var_t cpumask);
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work);
@@ -434,7 +435,6 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
-extern void flush_scheduled_work(void);
extern int schedule_on_each_cpu(work_func_t func);
@@ -531,6 +531,35 @@ static inline bool schedule_work(struct work_struct *work)
}
/**
+ * flush_scheduled_work - ensure that any scheduled work has run to completion.
+ *
+ * Forces execution of the kernel-global workqueue and blocks until its
+ * completion.
+ *
+ * Think twice before calling this function! It's very easy to get into
+ * trouble if you don't take great care. Either of the following situations
+ * will lead to deadlock:
+ *
+ * One of the work items currently on the workqueue needs to acquire
+ * a lock held by your code or its caller.
+ *
+ * Your code is running in the context of a work routine.
+ *
+ * They will be detected by lockdep when they occur, but the first might not
+ * occur very often. It depends on what work items are on the workqueue and
+ * what locks they need, which you have no control over.
+ *
+ * In most situations flushing the entire workqueue is overkill; you merely
+ * need to know that a particular work item isn't queued and isn't running.
+ * In such cases you should use cancel_delayed_work_sync() or
+ * cancel_work_sync() instead.
+ */
+static inline void flush_scheduled_work(void)
+{
+ flush_workqueue(system_wq);
+}
+
+/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
* @cpu: cpu to use
* @dwork: job to be done