summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-10-28 01:59:31 -0600
committerLinux Build Service Account <lnxbuild@localhost>2016-10-28 01:59:32 -0600
commit4c3ff3848cb4ee9c56418e6892ca52e265d1fa13 (patch)
treeac4f8810cda81b035799c165eec0d2c721c6c555 /kernel
parentb080f2e98893799166f0644756ff3080c7ac1f45 (diff)
parentce610f2fc934e89c706a8030b25ce48accced28a (diff)
Promotion of kernel.lnx.4.4-161028.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1080392 I05345b8f5d108e59863ee4e37ed75fd32a284ee5 ASoC: wcd-dsp-mgr: fix restart logic when codec comes ba 1081345 I69981b603526575758376695d7a5e9affd389dbe ARM: dts: msm: add SLPI fw name property for msmcobalt 1024862 I600c646a0c23b654392d8e00a829bfd88b71c38c msm: mdss: Fix memleak in panel_debug_reg_write 1069060 Icbdb05aeb9211665420a872d3453dbbd24fd347b msm: ipa: fix ioctl input param validation 1061018 I738eeb141d323ae1a77f352b11525556888a9243 ARM: dts: msm: fix sd card detection for msmcobalt QRD S 1050304 I9c7afdd74122318fc5f7ebd7c0381a4078d1fb01 msm: mdss: edid: fix dtd parsing 1082022 Iad90353f66cf5ad7f83e3fc252d82cec45abce60 platform: qpnp-revid: Add REVID details for PM/PM2FALCON 1077762 I8121d45fedab6197f8466d25674d5b0decd45f1b ARM: dts: msm: enable esd for nt35597 panels in msmcobal 1080014 Ic5dbac9c2cd31f3193ede7cdc64249f7514e3e98 icnss: Do top level reset when timeout to pull register 1077766 Ie60587aa4a4a48bae70ceac4e053e63fc29f516a ASoC: msm: Add routes for all TDM modes 1078147 I0aa3443a2fddfa200631b6fe9eb22de10178fe46 icnss: Do not parse VBATT DT entry if VBATT feature is d 1080392 Id9cbda04a4c36070e52919d3a34ebc871ac22a8e ASoC: wcd934x: Trigger codec DSP subsystem restart 1081806 I42a47e4386ca7a1bbbec428c8d2cbe307634cd1e ARM: dts: msm: Add support for Modem PIL on msmtriton 1081547 I64f0f5b15cc5d9185e415f0b26e276e111c885ec usb: dwc3: gadget: Remove dev_WARN_ONCE in dwc3_gadget_e 1082650 Iab7fc8551b628c443ce751026b06c5ff4ebba39a sched: Fix compilation issue with reset_hmp_stats 1082650 Idb74e9df4fcb55085ac869f5ba273cef4a3eb9eb sched/fair: Fix compilation issue 1069188 I2939e9fd37e96f37101b93dabd86cfdaaf06907f ASoC: wcd934x: Add support for audio subsystem restart 1081010 I16e500aa87813cd3a1e18f134ea1ed145b9e1ef8 msm: sde: Fix SDE rotator clock disable sequence 1074914 I94b9091b8349d6388fbe5c29eabe3666c86e5cce ASoC: msm: Enable Media Format converter for all resampl 1081806 I3e700bdae21b6e18b28cf5982e42e73d7ae953a5 ARM: dts: msm: Add support for Modem PIL on msmfalcon 1077762 I3df0889c1225dcb613910cb90f050a2994f5834d msm: mdss: fix crash when wrong params are passed on the 1065513 I29b9ea41df467d0092be8005733016843dc26f60 input: touchscreen: replace macros used inside the funct 1082047 I6a678d9133b06dda2de1c6cfe4bbafacc594f299 leds: qpnp-wled: add a secure write API 1076402 If06c421c559277066cf9ffd86e71fe366ab7bf8d leds: qpnp-flash-v2: fix safety timer configuration 1081279 I1b614cb70aa43087c122ce6c42305491e5f5b34b clk: qcom: Change clock index for mmssnoc_axi clock for 1081803 Ib0d27c13c0ebdfac629c1469c9a91a0b84d03640 soc: qcom: pil-q6v5: Add support for qdspv62.1.5 reset 1080796 Ife9d6a16e1607b40c71eb1897947bf4645ac9561 USB: dwc3-msm: Disable pipe clock requirement for HS/FS 1025311 I5f9627964d86c51cb8fb0c5c5f7c9f5f2e19b3e7 cfg80211: Add support to configure a beacon data rate 1081430 I2abd67a8ff647165ddedb385e80f8705d73d3100 input: touchscreen: fix compilation errors for Goodix dr 1077525 I012484aa92e8f25ed0ad4f6709a54f40409ab8d2 msm: ipa: Fix the memory leak in error cases 1079971 Ib6bddd5f43b41660304d240db39f4e273ede3af0 input: misc: hbtp_input: Add support for multiple AFE 1082047 I4c99b598d5f484368712eab239f7de70b727c2aa leds: qpnp-wled: Fix configuration of AVDD TRIM and OVP 1075303 I0552bf8a48c214b89e160fe4df145973f02c2788 ASoC: wcd934x-dsp-cntl: fix memory enable/disable sequen 1079311 I58421ba7f784f7467015a5943aecc2dd0022145a leds: qpnp-flash-v2: fix ITARGET configuration 1048242 I2ef9f3e122b39682a743334668fcd0aeb085e147 qcom-charger: set optimal buck switching frequency 1080290 I0f48666ac948a9571e249598ae7cc19df9036b1d qseecom: check buffer size when loading firmware images 1022917 Ib846318dc3d359672314485d386a23fd8621bfda ARM: dts: msm: Add dmic clock rate on ECPP path for msmc 1072280 I9640112b8945dc603e3af55fc1096bea9f7e6634 ASoC: qdsp6v2: fix potential bug of infinite loop 1082047 If0646f5ae111f75ababc405cbd2f7bcea899360e leds: qpnp-wled: add support to read pmic revid 1043802 I1d9d7a6b6bd2d3420a28a8c09868143bb9bd3b27 soc: qcom: add support for fingerprint sensor driver 1076516 If68d029c87db2370f07f279aacbc51d139f00aae msm: mdss: dp: trigger link retraining on link status up 1077773 I99fcbc5c1b36f62ecfa7631f1b51633a7fb08417 ASoC: msmcobalt: Add support for all TDM modes 1080331 Iaec3a17e5eed952ba0abdcffbf321ad942486769 drivers: mfd: wcd934x: Set MBHC registers to volatile 1082440 I287761ce5b13dc139f59270dceac67cc9ae45cff defconfig: Enable CONFIG_ICNSS_DEBUG for 32-bit msmfalco 1082440 I6028d4a01ddfd8f8589a1de5322c2780d256d5bc defconfig: Enable CONFIG_ICNSS_DEBUG for 32-bit msmcorte 1024872 I03ce718b0456d437d31d701586965d0aa7443b51 msm: mdss: Correct the format specifiers in sscanf funct 1076227 Ic756deeabeb0eaaf95e5354b565cf20a85fd7cca msm: mdss: dp: remove programming of maud/naud for audio 1024934 I533e2d6a760ebd52047c521c1a1e85bfc754fce1 msm: mdss: Fix memleak in framebuffer register and remov 1081757 I13156d82f544cd28a1579c23b18182c879c85767 clk: osm: msm: allow WDOG status register to be enabled 1077761 I9cc07edd7d72e7fae8f95b16461aac75c3fa1fc9 ARM: dts: msm: add support for all TDM modes 1024850 I10ea6f2b22d554d02f302f5700f6674d08e4777d msm: mdss: Check for buffer boundary condition in panel_ 1073650 I2836ef5b7f37f627a32a0b4332dc299a60012704 msm: mdss: Avoid adaptive variable refresh during dynami 1081345 I9ecc0cc1b5de5e0bd47c658747463a4498123655 sensors: ssc: add support to read SLPI fw name 1077762 Ia03f76cf13d3787e2e13e27ae0360723fe36d615 ARM: dts: msm: enable esd for jdi qhd cmd mode panel in 1077684 I340f778583bb63d6436c4ef8f51ead77a2871625 msm: mdss: Correct settings when on/off happens in dest- 1022917 I772e05990a796c99c3b6b6f50323d87b5894231b ASoC: msmcobalt: Fix MCLK routing on msmcobalt for WCD93 1082191 If3d964840362b6147ba7c9e26c4a3f5d20e5a557 clk: msm: clock: Remove controlling some graphics clocks 1051643 1054360 I1b3af2a59dabf67a947a59334883ce0437c9ee6f ARM: dts: msm: add PCIe PHY sequence for msmcobalt v2 Change-Id: I673d05df7b013787ac73574a972dd7bade7332af CRs-Fixed: 1076227, 1081757, 1077766, 1077761, 1072280, 1077762, 1022917, 1074914, 1069060, 1051643, 1076516, 1081430, 1080290, 1077525, 1078147, 1081803, 1024862, 1077684, 1048242, 1081806, 1025311, 1081345, 1050304, 1065513, 1081547, 1061018, 1073650, 1082191, 1077773, 1079311, 1082440, 1082650, 1081010, 1080796, 1024872, 1076402, 1054360, 1024934, 1080331, 1079971, 1043802, 1075303, 1024850, 1080392, 1069188, 1082022, 1082047, 1080014, 1081279
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/inode.c7
-rw-r--r--kernel/bpf/syscall.c24
-rw-r--r--kernel/bpf/verifier.c66
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/locking/mcs_spinlock.h8
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/hmp.c2
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/trace/trace_events.c9
-rw-r--r--kernel/workqueue.c40
13 files changed, 160 insertions, 47 deletions
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 5a8a797d50b7..d1a7646f79c5 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
{
switch (type) {
case BPF_TYPE_PROG:
- atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+ raw = bpf_prog_inc(raw);
break;
case BPF_TYPE_MAP:
- bpf_map_inc(raw, true);
+ raw = bpf_map_inc(raw, true);
break;
default:
WARN_ON_ONCE(1);
@@ -277,7 +277,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
goto out;
raw = bpf_any_get(inode->i_private, *type);
- touch_atime(&path);
+ if (!IS_ERR(raw))
+ touch_atime(&path);
path_put(&path);
return raw;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 3b39550d8485..4e32cc94edd9 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -181,11 +181,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
return f.file->private_data;
}
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
{
- atomic_inc(&map->refcnt);
+ if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&map->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
if (uref)
atomic_inc(&map->usercnt);
+ return map;
}
struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -197,7 +204,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
if (IS_ERR(map))
return map;
- bpf_map_inc(map, true);
+ map = bpf_map_inc(map, true);
fdput(f);
return map;
@@ -580,6 +587,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
return f.file->private_data;
}
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+ if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+ atomic_dec(&prog->aux->refcnt);
+ return ERR_PTR(-EBUSY);
+ }
+ return prog;
+}
+
/* called by sockets/tracing/seccomp before attaching program to an event
* pairs with bpf_prog_put()
*/
@@ -592,7 +608,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
if (IS_ERR(prog))
return prog;
- atomic_inc(&prog->aux->refcnt);
+ prog = bpf_prog_inc(prog);
fdput(f);
return prog;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2e7f7ab739e4..2cbfba78d3db 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -239,15 +239,6 @@ static const char * const reg_type_str[] = {
[CONST_IMM] = "imm",
};
-static const struct {
- int map_type;
- int func_id;
-} func_limit[] = {
- {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
- {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-};
-
static void print_verifier_state(struct verifier_env *env)
{
enum bpf_reg_type t;
@@ -898,24 +889,44 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
static int check_map_func_compatibility(struct bpf_map *map, int func_id)
{
- bool bool_map, bool_func;
- int i;
-
if (!map)
return 0;
- for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
- bool_map = (map->map_type == func_limit[i].map_type);
- bool_func = (func_id == func_limit[i].func_id);
- /* only when map & func pair match it can continue.
- * don't allow any other map type to be passed into
- * the special func;
- */
- if (bool_func && bool_map != bool_func)
- return -EINVAL;
+ /* We need a two way check, first is from map perspective ... */
+ switch (map->map_type) {
+ case BPF_MAP_TYPE_PROG_ARRAY:
+ if (func_id != BPF_FUNC_tail_call)
+ goto error;
+ break;
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ if (func_id != BPF_FUNC_perf_event_read &&
+ func_id != BPF_FUNC_perf_event_output)
+ goto error;
+ break;
+ default:
+ break;
+ }
+
+ /* ... and second from the function itself. */
+ switch (func_id) {
+ case BPF_FUNC_tail_call:
+ if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+ goto error;
+ break;
+ case BPF_FUNC_perf_event_read:
+ case BPF_FUNC_perf_event_output:
+ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+ goto error;
+ break;
+ default:
+ break;
}
return 0;
+error:
+ verbose("cannot pass map_type %d into func %d\n",
+ map->map_type, func_id);
+ return -EINVAL;
}
static int check_call(struct verifier_env *env, int func_id)
@@ -1348,6 +1359,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
}
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
+ BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
verbose("BPF_LD_ABS uses reserved fields\n");
return -EINVAL;
@@ -2003,7 +2015,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
- fdput(f);
return PTR_ERR(map);
}
@@ -2023,15 +2034,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
return -E2BIG;
}
- /* remember this map */
- env->used_maps[env->used_map_cnt++] = map;
-
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
* and all maps are released in free_bpf_prog_info()
*/
- bpf_map_inc(map, false);
+ map = bpf_map_inc(map, false);
+ if (IS_ERR(map)) {
+ fdput(f);
+ return PTR_ERR(map);
+ }
+ env->used_maps[env->used_map_cnt++] = map;
+
fdput(f);
next_insn:
insn++;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index cc6c7d0a6758..e2e784ad9e0f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2776,9 +2776,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
+ struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ret;
+ int ssid, ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
@@ -2826,8 +2827,10 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
cgroup_kn_unlock(of->kn);
- cpuset_post_attach_flush();
return ret ?: nbytes;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index a7ec545308a6..e3c0f38acbe6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -57,7 +57,6 @@
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
@@ -1029,7 +1028,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
}
}
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
@@ -2122,6 +2121,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.allow_attach = cpuset_allow_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = 1,
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index adfdc0536117..014b69528194 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
bool truncated)
{
struct ring_buffer *rb = handle->rb;
+ bool wakeup = truncated;
unsigned long aux_head;
u64 flags = 0;
@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
- perf_output_wakeup(handle);
+ wakeup = true;
local_add(rb->aux_watermark, &rb->aux_wakeup);
}
+
+ if (wakeup) {
+ if (truncated)
+ handle->event->pending_disable = 1;
+ perf_output_wakeup(handle);
+ }
+
handle->event = NULL;
local_set(&rb->aux_nest, 0);
diff --git a/kernel/futex.c b/kernel/futex.c
index 461c72b2dac2..9d8163afd87c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1244,10 +1244,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
if (unlikely(should_fail_futex(true)))
ret = -EFAULT;
- if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
+ if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
- else if (curval != uval)
- ret = -EINVAL;
+ } else if (curval != uval) {
+ /*
+ * If a unconditional UNLOCK_PI operation (user space did not
+ * try the TID->0 transition) raced with a waiter setting the
+ * FUTEX_WAITERS flag between get_user() and locking the hash
+ * bucket lock, retry the operation.
+ */
+ if ((FUTEX_TID_MASK & curval) == uval)
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+ }
if (ret) {
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
return ret;
@@ -1474,8 +1484,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
- plist_add(&q->list, &hb2->chain);
hb_waiters_inc(hb2);
+ plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -2538,6 +2548,15 @@ retry:
if (ret == -EFAULT)
goto pi_faulted;
/*
+ * A unconditional UNLOCK_PI op raced against a waiter
+ * setting the FUTEX_WAITERS bit. Try again.
+ */
+ if (ret == -EAGAIN) {
+ spin_unlock(&hb->lock);
+ put_futex_key(&key);
+ goto retry;
+ }
+ /*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index 5b9102a47ea5..c835270f0c2f 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
node->locked = 0;
node->next = NULL;
- prev = xchg_acquire(lock, node);
+ /*
+ * We rely on the full barrier with global transitivity implied by the
+ * below xchg() to order the initialization stores above against any
+ * observation of @node. And to provide the ACQUIRE ordering associated
+ * with a LOCK primitive.
+ */
+ prev = xchg(lock, node);
if (likely(prev == NULL)) {
/*
* Lock acquired, don't need to set node->locked to 1. Threads
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e32d4d7903b0..e0f212743c77 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3525,7 +3525,7 @@ static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
BUG_ON(stats->nr_big_tasks < 0 ||
(s64)stats->cumulative_runnable_avg < 0);
- verify_pred_demands_sum(stats);
+ BUG_ON((s64)stats->pred_demands_sum < 0);
}
#else /* CONFIG_CFS_BANDWIDTH */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index d220482f4dbc..50a6d8e0d4d4 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1387,7 +1387,7 @@ void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
dec_cumulative_runnable_avg(&rq->hmp_stats, p);
}
-static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
{
stats->nr_big_tasks = 0;
if (reset_cra) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 471dc9faab35..4289bf6cd642 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1407,6 +1407,7 @@ extern void inc_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
extern void dec_rq_hmp_stats(struct rq *rq,
struct task_struct *p, int change_cra);
+extern void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra);
extern int is_big_task(struct task_struct *p);
extern int upmigrate_discouraged(struct task_struct *p);
extern struct sched_cluster *rq_cluster(struct rq *rq);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fda3b6e1b3a0..26960e49bb8c 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2108,8 +2108,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
trace_create_file("filter", 0644, file->dir, file,
&ftrace_event_filter_fops);
- trace_create_file("trigger", 0644, file->dir, file,
- &event_trigger_fops);
+ /*
+ * Only event directories that can be enabled should have
+ * triggers.
+ */
+ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ trace_create_file("trigger", 0644, file->dir, file,
+ &event_trigger_fops);
trace_create_file("format", 0444, file->dir, call,
&ftrace_event_format_fops);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ef84d9874d03..316b316c7528 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -652,6 +652,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
@@ -4447,6 +4476,17 @@ static void rebind_workers(struct worker_pool *pool)
pool->attrs->cpumask) < 0);
spin_lock_irq(&pool->lock);
+
+ /*
+ * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+ * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is
+ * being reworked and this can go away in time.
+ */
+ if (!(pool->flags & POOL_DISASSOCIATED)) {
+ spin_unlock_irq(&pool->lock);
+ return;
+ }
+
pool->flags &= ~POOL_DISASSOCIATED;
for_each_pool_worker(worker, pool) {