summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-10-19 16:55:17 -0700
committerBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-10-20 02:11:57 -0700
commitb2465235ad43e26e351e3b437a7f90f927cf11a4 (patch)
tree6339f0e9b8778e98d9664f8570403ad93d7313cb /kernel
parent709d3a4e3edcb0963185f9481b178c9cfb9aab55 (diff)
parent73a2b70bdf78f3ad747dd740c9e55c062c71b86c (diff)
Merge android-4.4@73a2b70 (v4.4.92) into msm-4.4
* refs/heads/tmp-73a2b70 Linux 4.4.92 ext4: don't allow encrypted operations without keys ext4: Don't clear SGID when inheriting ACLs ext4: fix data corruption for mmap writes sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs nvme: protect against simultaneous shutdown invocations drm/i915/bios: ignore HDMI on port A brcmfmac: setup passive scan if requested by user-space uwb: ensure that endpoint is interrupt uwb: properly check kthread_run return value iio: adc: mcp320x: Fix oops on module unload iio: adc: mcp320x: Fix readout of negative voltages iio: ad7793: Fix the serial interface reset iio: core: Return error for failed read_reg staging: iio: ad7192: Fix - use the dedicated reset function avoiding dma from stack. iio: ad_sigma_delta: Implement a dedicated reset function iio: adc: twl4030: Disable the vusb3v1 rugulator in the error handling path of 'twl4030_madc_probe()' iio: adc: twl4030: Fix an error handling path in 'twl4030_madc_probe()' xhci: fix finding correct bus_state structure for USB 3.1 hosts USB: fix out-of-bounds in usb_set_configuration usb: Increase quirk delay for USB devices USB: core: harden cdc_parse_cdc_header USB: uas: fix bug in handling of alternate settings scsi: sd: Do not override max_sectors_kb sysfs setting iwlwifi: add workaround to disable wide channels in 5GHz HID: i2c-hid: allocate hid buffers for real worst case ftrace: Fix kmemleak in unregister_ftrace_graph stm class: Fix a use-after-free Drivers: hv: fcopy: restore correct transfer length driver core: platform: Don't read past the end of "driver_override" buffer ALSA: usx2y: Suppress kernel warning at page allocation failures ALSA: compress: Remove unused variable lsm: fix smack_inode_removexattr and xattr_getsecurity memleak USB: g_mass_storage: Fix deadlock when driver is unbound usb: gadget: mass_storage: set msg_registered after msg registered USB: devio: Don't corrupt user memory USB: dummy-hcd: Fix erroneous synchronization change USB: dummy-hcd: fix infinite-loop resubmission bug USB: dummy-hcd: fix connection failures (wrong speed) usb: pci-quirks.c: Corrected timeout values used in handshake ALSA: usb-audio: Check out-of-bounds access by corrupted buffer descriptor usb: renesas_usbhs: fix usbhsf_fifo_clear() for RX direction usb: renesas_usbhs: fix the BCLR setting condition for non-DCP pipe usb-storage: unusual_devs entry to fix write-access regression for Seagate external drives usb: gadget: udc: atmel: set vbus irqflags explicitly USB: gadgetfs: fix copy_to_user while holding spinlock USB: gadgetfs: Fix crash caused by inadequate synchronization usb: gadget: inode.c: fix unbalanced spin_lock in ep0_write ANDROID: binder: init desired_prio.sched_policy before use it BACKPORT: net: xfrm: support setting an output mark. UPSTREAM: xfrm: Only add l3mdev oif to dst lookups UPSTREAM: net: l3mdev: Add master device lookup by index Linux 4.4.91 ttpci: address stringop overflow warning ALSA: au88x0: avoid theoretical uninitialized access ARM: remove duplicate 'const' annotations' IB/qib: fix false-postive maybe-uninitialized warning drivers: firmware: psci: drop duplicate const from psci_of_match libata: transport: Remove circular dependency at free time xfs: remove kmem_zalloc_greedy i2c: meson: fix wrong variable usage in meson_i2c_put_data md/raid10: submit bio directly to replacement disk rds: ib: add error handle iommu/io-pgtable-arm: Check for leaf entry before dereferencing it parisc: perf: Fix potential NULL pointer dereference netfilter: nfnl_cthelper: fix incorrect helper->expect_class_max exynos-gsc: Do not swap cb/cr for semi planar formats MIPS: IRQ Stack: Unwind IRQ stack onto task stack netfilter: invoke synchronize_rcu after set the _hook_ to NULL bridge: netlink: register netdevice before executing changelink mmc: sdio: fix alignment issue in struct sdio_func usb: plusb: Add support for PL-27A1 team: fix memory leaks net/packet: check length in getsockopt() called with PACKET_HDRLEN net: core: Prevent from dereferencing null pointer when releasing SKB MIPS: Lantiq: Fix another request_mem_region() return code check ASoC: dapm: fix some pointer error handling usb: chipidea: vbus event may exist before starting gadget audit: log 32-bit socketcalls ASoC: dapm: handle probe deferrals partitions/efi: Fix integer overflow in GPT size calculation USB: serial: mos7840: fix control-message error handling USB: serial: mos7720: fix control-message error handling drm/amdkfd: fix improper return value on error IB/ipoib: Replace list_del of the neigh->list with list_del_init IB/ipoib: rtnl_unlock can not come after free_netdev IB/ipoib: Fix deadlock over vlan_mutex tty: goldfish: Fix a parameter of a call to free_irq ARM: 8635/1: nommu: allow enabling REMAP_VECTORS_TO_RAM iio: adc: hx711: Add DT binding for avia,hx711 iio: adc: axp288: Drop bogus AXP288_ADC_TS_PIN_CTRL register modifications hwmon: (gl520sm) Fix overflows and crash seen when writing into limit attributes sh_eth: use correct name for ECMR_MPDE bit extcon: axp288: Use vbus-valid instead of -present to determine cable presence igb: re-assign hw address pointer on reset after PCI error MIPS: ralink: Fix incorrect assignment on ralink_soc MIPS: Ensure bss section ends on a long-aligned address ARM: dts: r8a7790: Use R-Car Gen 2 fallback binding for msiof nodes RDS: RDMA: Fix the composite message user notification GFS2: Fix reference to ERR_PTR in gfs2_glock_iter_next drm: bridge: add DT bindings for TI ths8135 drm_fourcc: Fix DRM_FORMAT_MOD_LINEAR #define FROMLIST: tracing: Add support for preempt and irq enable/disable events FROMLIST: tracing: Prepare to add preempt and irq trace events ANDROID: binder: fix transaction leak. ANDROID: binder: Add tracing for binder priority inheritance. Linux 4.4.90 fix xen_swiotlb_dma_mmap prototype swiotlb-xen: implement xen_swiotlb_dma_mmap callback video: fbdev: aty: do not leak uninitialized padding in clk to userspace KVM: VMX: use cmpxchg64 ARM: pxa: fix the number of DMA requestor lines ARM: pxa: add the number of DMA requestor lines dmaengine: mmp-pdma: add number of requestors cxl: Fix driver use count KVM: VMX: remove WARN_ON_ONCE in kvm_vcpu_trigger_posted_interrupt KVM: VMX: do not change SN bit in vmx_update_pi_irte() timer/sysclt: Restrict timer migration sysctl values to 0 and 1 gfs2: Fix debugfs glocks dump x86/fpu: Don't let userspace set bogus xcomp_bv btrfs: prevent to set invalid default subvolid btrfs: propagate error to btrfs_cmp_data_prepare caller btrfs: fix NULL pointer dereference from free_reloc_roots() PCI: Fix race condition with driver_override kvm: nVMX: Don't allow L2 to access the hardware CR8 KVM: VMX: Do not BUG() on out-of-bounds guest IRQ arm64: fault: Route pte translation faults via do_translation_fault arm64: Make sure SPsel is always set seccomp: fix the usage of get/put_seccomp_filter() in seccomp_get_filter() bsg-lib: don't free job in bsg_prepare_job nl80211: check for the required netlink attributes presence vfs: Return -ENXIO for negative SEEK_HOLE / SEEK_DATA offsets SMB3: Don't ignore O_SYNC/O_DSYNC and O_DIRECT flags SMB: Validate negotiate (to protect against downgrade) even if signing off Fix SMB3.1.1 guest authentication to Samba powerpc/pseries: Fix parent_dn reference leak in add_dt_node() KEYS: prevent KEYCTL_READ on negative key KEYS: prevent creating a different user's keyrings KEYS: fix writing past end of user-supplied buffer in keyring_read() crypto: talitos - fix sha224 crypto: talitos - Don't provide setkey for non hmac hashing algs. scsi: scsi_transport_iscsi: fix the issue that iscsi_if_rx doesn't parse nlmsg properly md/raid5: preserve STRIPE_ON_UNPLUG_LIST in break_stripe_batch_list md/raid5: fix a race condition in stripe batch tracing: Erase irqsoff trace with empty write tracing: Fix trace_pipe behavior for instance traces KVM: PPC: Book3S: Fix race and leak in kvm_vm_ioctl_create_spapr_tce() mac80211: flush hw_roc_start work before cancelling the ROC cifs: release auth_key.response for reconnect. f2fs: catch up to v4.14-rc1 UPSTREAM: cpufreq: schedutil: use now as reference when aggregating shared policy requests ANDROID: add script to fetch android kernel config fragments f2fs: reorganize stat information f2fs: clean up flush/discard command namings f2fs: check in-memory sit version bitmap f2fs: check in-memory nat version bitmap f2fs: check in-memory block bitmap f2fs: introduce FI_ATOMIC_COMMIT f2fs: clean up with list_{first, last}_entry f2fs: return fs_trim if there is no candidate f2fs: avoid needless checkpoint in f2fs_trim_fs f2fs: relax async discard commands more f2fs: drop exist_data for inline_data when truncated to 0 f2fs: don't allow encrypted operations without keys f2fs: show the max number of atomic operations f2fs: get io size bit from mount option f2fs: support IO alignment for DATA and NODE writes f2fs: add submit_bio tracepoint f2fs: reassign new segment for mode=lfs f2fs: fix a missing discard prefree segments f2fs: use rb_entry_safe f2fs: add a case of no need to read a page in write begin f2fs: fix a problem of using memory after free f2fs: remove unneeded condition f2fs: don't cache nat entry if out of memory f2fs: remove unused values in recover_fsync_data f2fs: support async discard based on v4.9 f2fs: resolve op and op_flags confilcts f2fs: remove wrong backported codes FROMLIST: binder: fix use-after-free in binder_transaction() UPSTREAM: ipv6: fib: Unlink replaced routes from their nodes Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org> Conflicts: fs/f2fs/crypto_key.c fs/f2fs/f2fs_crypto.h net/wireless/nl80211.c sound/usb/card.c Change-Id: I742aeaec84c7892165976b7bea3e07bdd6881d93 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c16
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/cpufreq_schedutil.c7
-rw-r--r--kernel/seccomp.c23
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c14
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/trace/trace_irqsoff.c133
12 files changed, 172 insertions, 61 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f92ec9a71af3..a599351997ad 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2300,6 +2300,13 @@ retry:
mutex_unlock(&cpuset_mutex);
}
+static bool force_rebuild;
+
+void cpuset_force_rebuild(void)
+{
+ force_rebuild = true;
+}
+
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
*
@@ -2374,8 +2381,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
}
/* rebuild sched domains if cpus_allowed has changed */
- if (cpus_updated)
+ if (cpus_updated || force_rebuild) {
+ force_rebuild = false;
rebuild_sched_domains();
+ }
}
void cpuset_update_active_cpus(bool cpu_online)
@@ -2394,6 +2403,11 @@ void cpuset_update_active_cpus(bool cpu_online)
schedule_work(&cpuset_hotplug_work);
}
+void cpuset_wait_for_hotplug(void)
+{
+ flush_work(&cpuset_hotplug_work);
+}
+
/*
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
* Call this routine anytime after node_states[N_MEMORY] changes.
diff --git a/kernel/power/process.c b/kernel/power/process.c
index e7f1f736a5b6..cc177142a08f 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -19,8 +19,9 @@
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/wakeup_reason.h>
+#include <linux/cpuset.h>
-/*
+/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
@@ -208,6 +209,8 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
+ cpuset_wait_for_hotplug();
+
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0271e8e2c020..3410706ea7d5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8188,17 +8188,16 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
* operation in the resume sequence, just build a single sched
* domain, ignoring cpusets.
*/
- num_cpus_frozen--;
- if (likely(num_cpus_frozen)) {
- partition_sched_domains(1, NULL, NULL);
+ partition_sched_domains(1, NULL, NULL);
+ if (--num_cpus_frozen)
break;
- }
/*
* This is the last CPU online operation. So fall through and
* restore the original sched domains by considering the
* cpuset configurations.
*/
+ cpuset_force_rebuild();
case CPU_ONLINE:
cpuset_update_active_cpus(true);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index b90f7434e13b..28977799017b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -322,11 +322,10 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- u64 last_freq_update_time = sg_policy->last_freq_update_time;
unsigned long util = 0, max = 1;
unsigned int j;
@@ -342,7 +341,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
* enough, don't take the CPU into account as it probably is
* idle now (and clear iowait_boost for it).
*/
- delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
j_sg_cpu->iowait_boost_pending = false;
@@ -387,7 +386,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
if (flags & SCHED_CPUFREQ_DL)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
- next_f = sugov_next_freq_shared(sg_cpu);
+ next_f = sugov_next_freq_shared(sg_cpu, time);
sugov_update_commit(sg_policy, time, next_f);
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 15a1795bbba1..efd384f3f852 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -457,14 +457,19 @@ static long seccomp_attach_filter(unsigned int flags,
return 0;
}
+void __get_seccomp_filter(struct seccomp_filter *filter)
+{
+ /* Reference count is bounded by the number of total processes. */
+ atomic_inc(&filter->usage);
+}
+
/* get_seccomp_filter - increments the reference count of the filter on @tsk */
void get_seccomp_filter(struct task_struct *tsk)
{
struct seccomp_filter *orig = tsk->seccomp.filter;
if (!orig)
return;
- /* Reference count is bounded by the number of total processes. */
- atomic_inc(&orig->usage);
+ __get_seccomp_filter(orig);
}
static inline void seccomp_filter_free(struct seccomp_filter *filter)
@@ -475,10 +480,8 @@ static inline void seccomp_filter_free(struct seccomp_filter *filter)
}
}
-/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
-void put_seccomp_filter(struct task_struct *tsk)
+static void __put_seccomp_filter(struct seccomp_filter *orig)
{
- struct seccomp_filter *orig = tsk->seccomp.filter;
/* Clean up single-reference branches iteratively. */
while (orig && atomic_dec_and_test(&orig->usage)) {
struct seccomp_filter *freeme = orig;
@@ -487,6 +490,12 @@ void put_seccomp_filter(struct task_struct *tsk)
}
}
+/* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */
+void put_seccomp_filter(struct task_struct *tsk)
+{
+ __put_seccomp_filter(tsk->seccomp.filter);
+}
+
/**
* seccomp_send_sigsys - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
@@ -927,13 +936,13 @@ long seccomp_get_filter(struct task_struct *task, unsigned long filter_off,
if (!data)
goto out;
- get_seccomp_filter(task);
+ __get_seccomp_filter(filter);
spin_unlock_irq(&task->sighand->siglock);
if (copy_to_user(data, fprog->filter, bpf_classic_proglen(fprog)))
ret = -EFAULT;
- put_seccomp_filter(task);
+ __put_seccomp_filter(filter);
return ret;
out:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8576e6385d63..3fbe2765f307 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1413,6 +1413,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = timer_migration_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
},
#endif
#ifdef CONFIG_BPF_SYSCALL
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 90a82deece45..903705687b52 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -131,7 +131,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
int ret;
mutex_lock(&mutex);
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write)
timers_update_migration(false);
mutex_unlock(&mutex);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 048bf074bef9..3c7b7a9bcad1 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -190,6 +190,17 @@ config FUNCTION_GRAPH_TRACER
address on the current task structure into a stack of calls.
+config PREEMPTIRQ_EVENTS
+ bool "Enable trace events for preempt and irq disable/enable"
+ select TRACE_IRQFLAGS
+ depends on DEBUG_PREEMPT || !PROVE_LOCKING
+ default n
+ help
+ Enable tracing of disable and enable events for preemption and irqs.
+ For tracing preempt disable/enable events, DEBUG_PREEMPT must be
+ enabled. For tracing irq disable/enable events, PROVE_LOCKING must
+ be disabled.
+
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
default n
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 2963266fb7bf..a0177ae43058 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_TRACING) += trace_stat.o
obj-$(CONFIG_TRACING) += trace_printk.o
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 38d73a6e2857..fc0051fd672d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4315,9 +4315,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
-static unsigned long save_global_trampoline;
-static unsigned long save_global_flags;
-
static int __init set_graph_function(char *str)
{
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -5907,17 +5904,6 @@ void unregister_ftrace_graph(void)
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-#ifdef CONFIG_DYNAMIC_FTRACE
- /*
- * Function graph does not allocate the trampoline, but
- * other global_ops do. We need to reset the ALLOC_TRAMP flag
- * if one was used.
- */
- global_ops.trampoline = save_global_trampoline;
- if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
- global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
-#endif
-
out:
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 35613a2a5164..6580ec6bc371 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3303,11 +3303,17 @@ static int tracing_open(struct inode *inode, struct file *file)
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
int cpu = tracing_get_cpu(inode);
+ struct trace_buffer *trace_buf = &tr->trace_buffer;
+
+#ifdef CONFIG_TRACER_MAX_TRACE
+ if (tr->current_trace->print_max)
+ trace_buf = &tr->max_buffer;
+#endif
if (cpu == RING_BUFFER_ALL_CPUS)
- tracing_reset_online_cpus(&tr->trace_buffer);
+ tracing_reset_online_cpus(trace_buf);
else
- tracing_reset(&tr->trace_buffer, cpu);
+ tracing_reset(trace_buf, cpu);
}
if (file->f_mode & FMODE_READ) {
@@ -4858,7 +4864,7 @@ static int tracing_wait_pipe(struct file *filp)
*
* iter->pos will be 0 if we haven't read anything.
*/
- if (!tracing_is_on() && iter->pos)
+ if (!tracer_tracing_is_on(iter->tr) && iter->pos)
break;
mutex_unlock(&iter->mutex);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index be3222b7d72e..21b162c07e83 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,6 +16,10 @@
#include "trace.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -451,63 +455,43 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
#else /* !CONFIG_PROVE_LOCKING */
/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
-/*
* We are only interested in hardirq on/off events:
*/
-void trace_hardirqs_on(void)
+static inline void tracer_hardirqs_on(void)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-EXPORT_SYMBOL(trace_hardirqs_on);
-void trace_hardirqs_off(void)
+static inline void tracer_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-EXPORT_SYMBOL(trace_hardirqs_off);
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
}
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
#endif /* CONFIG_PROVE_LOCKING */
#endif /* CONFIG_IRQSOFF_TRACER */
#ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
{
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
-void trace_preempt_off(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
{
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
@@ -770,3 +754,100 @@ __init static int init_irqsoff_tracer(void)
return 0;
}
core_initcall(init_irqsoff_tracer);
+#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
+
+#ifndef CONFIG_IRQSOFF_TRACER
+static inline void tracer_hardirqs_on(void) { }
+static inline void tracer_hardirqs_off(void) { }
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
+#endif
+
+#ifndef CONFIG_PREEMPT_TRACER
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+void trace_hardirqs_on(void)
+{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_on();
+
+ this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+ tracer_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_on_caller(caller_addr);
+
+ this_cpu_write(tracing_irq_cpu, 0);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+ tracer_hardirqs_off_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+/*
+ * Stubs:
+ */
+
+void trace_softirqs_on(unsigned long ip)
+{
+}
+
+void trace_softirqs_off(unsigned long ip)
+{
+}
+
+inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+#if defined(CONFIG_PREEMPT_TRACER) || \
+ (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+ trace_preempt_enable_rcuidle(a0, a1);
+ tracer_preempt_on(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+ trace_preempt_disable_rcuidle(a0, a1);
+ tracer_preempt_off(a0, a1);
+}
+#endif