summaryrefslogtreecommitdiff
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/hibernate.c20
-rw-r--r--kernel/power/main.c17
-rw-r--r--kernel/power/power.h9
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/qos.c222
-rw-r--r--kernel/power/suspend.c7
-rw-r--r--kernel/power/swap.c24
-rw-r--r--kernel/power/wakeup_reason.c547
9 files changed, 747 insertions, 111 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4335e7d1c391..500ba8b970e4 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -322,3 +322,7 @@ config PM_GENERIC_DOMAINS_OF
config CPU_PM
bool
+
+config DEDUCE_WAKEUP_REASONS
+ bool
+ default n
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 70d9f3f031e3..184ea54e721a 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -30,6 +30,7 @@
#include <linux/genhd.h>
#include <linux/ktime.h>
#include <trace/events/power.h>
+#include <soc/qcom/boot_stats.h>
#include "power.h"
@@ -469,6 +470,7 @@ static int resume_target_kernel(bool platform_mode)
touch_softlockup_watchdog();
syscore_resume();
+ place_marker("PM: Image Restoration failed!");
Enable_irqs:
local_irq_enable();
@@ -705,6 +707,7 @@ int hibernate(void)
pm_restore_gfp_mask();
} else {
pr_debug("PM: Image restored successfully.\n");
+ place_marker("PM: Image restored!");
}
Free_bitmaps:
@@ -1152,6 +1155,22 @@ static int __init kaslr_nohibernate_setup(char *str)
return nohibernate_setup(str);
}
+static int __init page_poison_nohibernate_setup(char *str)
+{
+#ifdef CONFIG_PAGE_POISONING_ZERO
+ /*
+ * The zeroing option for page poison skips the checks on alloc.
+ * since hibernation doesn't save free pages there's no way to
+ * guarantee the pages will still be zeroed.
+ */
+ if (!strcmp(str, "on")) {
+ pr_info("Disabling hibernation due to page poisoning\n");
+ return nohibernate_setup(str);
+ }
+#endif
+ return 1;
+}
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
@@ -1160,3 +1179,4 @@ __setup("resumewait", resumewait_setup);
__setup("resumedelay=", resumedelay_setup);
__setup("nohibernate", nohibernate_setup);
__setup("kaslr", kaslr_nohibernate_setup);
+__setup("page_poison=", page_poison_nohibernate_setup);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 68c0eaae8034..5ea50b1b7595 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -287,13 +287,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
}
-static ssize_t pm_wakeup_irq_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- return -EINVAL;
-}
-power_attr(pm_wakeup_irq);
+power_attr_ro(pm_wakeup_irq);
#else /* !CONFIG_PM_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
@@ -571,14 +565,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
return show_trace_dev_match(buf, PAGE_SIZE);
}
-static ssize_t
-pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- return -EINVAL;
-}
-
-power_attr(pm_trace_dev_match);
+power_attr_ro(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 436e302eb024..2610516601ee 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = { \
.store = _name##_store, \
}
+#define power_attr_ro(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = S_IRUGO, \
+ }, \
+ .show = _name##_show, \
+}
+
/* Preferred image size in bytes (default 500 MB) */
extern unsigned long image_size;
/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
diff --git a/kernel/power/process.c b/kernel/power/process.c
index cc177142a08f..372de061dda2 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -37,9 +37,6 @@ static int try_to_freeze_tasks(bool user_only)
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
-#ifdef CONFIG_PM_SLEEP
- char suspend_abort[MAX_SUSPEND_ABORT_LEN];
-#endif
do_gettimeofday(&start);
@@ -69,11 +66,6 @@ static int try_to_freeze_tasks(bool user_only)
break;
if (pm_wakeup_pending()) {
-#ifdef CONFIG_PM_SLEEP
- pm_get_active_wakeup_sources(suspend_abort,
- MAX_SUSPEND_ABORT_LEN);
- log_suspend_abort_reason(suspend_abort);
-#endif
wakeup = true;
break;
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 97b0df71303e..e6eceb0aa496 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -43,6 +43,9 @@
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/cpumask.h>
#include <linux/uaccess.h>
#include <linux/export.h>
@@ -67,6 +70,8 @@ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -81,6 +86,8 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
@@ -91,11 +98,12 @@ static struct pm_qos_object network_lat_pm_qos = {
.name = "network_latency",
};
-
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+ .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+ PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
@@ -259,22 +267,60 @@ static const struct file_operations pm_qos_debug_fops = {
.release = single_release,
};
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
+ struct cpumask *cpus)
+{
+ struct pm_qos_request *req = NULL;
+ int cpu;
+ s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
+
+ plist_for_each_entry(req, &c->list, node) {
+ for_each_cpu(cpu, &req->cpus_affine) {
+ switch (c->type) {
+ case PM_QOS_MIN:
+ if (qos_val[cpu] > req->node.prio)
+ qos_val[cpu] = req->node.prio;
+ break;
+ case PM_QOS_MAX:
+ if (req->node.prio > qos_val[cpu])
+ qos_val[cpu] = req->node.prio;
+ break;
+ case PM_QOS_SUM:
+ qos_val[cpu] += req->node.prio;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (c->target_per_cpu[cpu] != qos_val[cpu])
+ cpumask_set_cpu(cpu, cpus);
+ c->target_per_cpu[cpu] = qos_val[cpu];
+ }
+}
+
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
- * @node: request to add to the list, to update or to remove
+ * @req: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
- enum pm_qos_req_action action, int value)
+int pm_qos_update_target(struct pm_qos_constraints *c,
+ struct pm_qos_request *req,
+ enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
+ struct plist_node *node = &req->node;
+ struct cpumask cpus;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
@@ -305,17 +351,23 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
}
curr_value = pm_qos_get_value(c);
+ cpumask_clear(&cpus);
pm_qos_set_value(c, curr_value);
+ pm_qos_set_value_for_cpus(c, &cpus);
spin_unlock_irqrestore(&pm_qos_lock, flags);
trace_pm_qos_update_target(action, prev_value, curr_value);
- if (prev_value != curr_value) {
+ /*
+ * if cpu mask bits are set, call the notifier call chain
+ * to update the new qos restriction for the cores
+ */
+ if (!cpumask_empty(&cpus)) {
ret = 1;
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
- NULL);
+ &cpus);
} else {
ret = 0;
}
@@ -398,12 +450,56 @@ int pm_qos_request(int pm_qos_class)
}
EXPORT_SYMBOL_GPL(pm_qos_request);
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
+{
+ if (cpu_isolated(cpu))
+ return INT_MAX;
+
+ return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpu);
+
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
+{
+ unsigned long irqflags;
+ int cpu;
+ struct pm_qos_constraints *c = NULL;
+ int val;
+
+ spin_lock_irqsave(&pm_qos_lock, irqflags);
+ c = pm_qos_array[pm_qos_class]->constraints;
+ val = c->default_value;
+
+ for_each_cpu(cpu, mask) {
+ if (cpu_isolated(cpu))
+ continue;
+
+ switch (c->type) {
+ case PM_QOS_MIN:
+ if (c->target_per_cpu[cpu] < val)
+ val = c->target_per_cpu[cpu];
+ break;
+ case PM_QOS_MAX:
+ if (c->target_per_cpu[cpu] > val)
+ val = c->target_per_cpu[cpu];
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, irqflags);
+
+ return val;
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpumask);
+
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
@@ -412,7 +508,7 @@ static void __pm_qos_update_request(struct pm_qos_request *req,
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ req, PM_QOS_UPDATE_REQ, new_value);
}
/**
@@ -430,6 +526,41 @@ static void pm_qos_work_fn(struct work_struct *work)
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
+#ifdef CONFIG_SMP
+static void pm_qos_irq_release(struct kref *ref)
+{
+ unsigned long flags;
+ struct irq_affinity_notify *notify = container_of(ref,
+ struct irq_affinity_notify, kref);
+ struct pm_qos_request *req = container_of(notify,
+ struct pm_qos_request, irq_notify);
+ struct pm_qos_constraints *c =
+ pm_qos_array[req->pm_qos_class]->constraints;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ cpumask_setall(&req->cpus_affine);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value);
+}
+
+static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ unsigned long flags;
+ struct pm_qos_request *req = container_of(notify,
+ struct pm_qos_request, irq_notify);
+ struct pm_qos_constraints *c =
+ pm_qos_array[req->pm_qos_class]->constraints;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ cpumask_copy(&req->cpus_affine, mask);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio);
+}
+#endif
+
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
@@ -453,11 +584,70 @@ void pm_qos_add_request(struct pm_qos_request *req,
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
+
+ switch (req->type) {
+ case PM_QOS_REQ_AFFINE_CORES:
+ if (cpumask_empty(&req->cpus_affine)) {
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n");
+ }
+ break;
+#ifdef CONFIG_SMP
+ case PM_QOS_REQ_AFFINE_IRQ:
+ if (irq_can_set_affinity(req->irq)) {
+ struct irq_desc *desc = irq_to_desc(req->irq);
+ struct cpumask *mask;
+
+ if (!desc)
+ return;
+ mask = desc->irq_data.common->affinity;
+
+ /* Get the current affinity */
+ cpumask_copy(&req->cpus_affine, mask);
+ req->irq_notify.irq = req->irq;
+ req->irq_notify.notify = pm_qos_irq_notify;
+ req->irq_notify.release = pm_qos_irq_release;
+
+ } else {
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n",
+ req->irq);
+ }
+ break;
+#endif
+ default:
+ WARN(1, KERN_ERR "Unknown request type %d\n", req->type);
+ /* fall through */
+ case PM_QOS_REQ_ALL_CORES:
+ cpumask_setall(&req->cpus_affine);
+ break;
+ }
+
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
- &req->node, PM_QOS_ADD_REQ, value);
+ req, PM_QOS_ADD_REQ, value);
+
+#ifdef CONFIG_SMP
+ if (req->type == PM_QOS_REQ_AFFINE_IRQ &&
+ irq_can_set_affinity(req->irq)) {
+ int ret = 0;
+
+ ret = irq_set_affinity_notifier(req->irq,
+ &req->irq_notify);
+ if (ret) {
+ WARN(1, "IRQ affinity notify set failed\n");
+ req->type = PM_QOS_REQ_ALL_CORES;
+ cpumask_setall(&req->cpus_affine);
+ pm_qos_update_target(
+ pm_qos_array[pm_qos_class]->constraints,
+ req, PM_QOS_UPDATE_REQ, value);
+ }
+ }
+#endif
}
EXPORT_SYMBOL_GPL(pm_qos_add_request);
@@ -511,7 +701,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ req, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
@@ -531,15 +721,25 @@ void pm_qos_remove_request(struct pm_qos_request *req)
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
- WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+ WARN(1, "pm_qos_remove_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
+#ifdef CONFIG_SMP
+ if (req->type == PM_QOS_REQ_AFFINE_IRQ) {
+ int ret = 0;
+ /* Get the current affinity */
+ ret = irq_set_affinity_notifier(req->irq, NULL);
+ if (ret)
+ WARN(1, "IRQ affinity notify set failed\n");
+ }
+#endif
+
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_REMOVE_REQ,
+ req, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 58209d8bfc56..6e7832ee6d74 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -287,6 +287,7 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
+ log_suspend_abort_reason("One or more tasks refusing to freeze");
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
@@ -316,7 +317,6 @@ void __weak arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
- char suspend_abort[MAX_SUSPEND_ABORT_LEN];
int error, last_dev;
error = platform_suspend_prepare(state);
@@ -385,11 +385,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
state, false);
events_check_enabled = false;
} else if (*wakeup) {
- pm_get_active_wakeup_sources(suspend_abort,
- MAX_SUSPEND_ABORT_LEN);
- log_suspend_abort_reason(suspend_abort);
error = -EBUSY;
}
+
+ start_logging_wakeup_reasons();
syscore_resume();
}
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 160e1006640d..0336ab14b408 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -36,6 +36,15 @@
#define HIBERNATE_SIG "S1SUSPEND"
+static int goldenimage;
+/*
+ * When reading an {un,}compressed image, we may restore pages in place,
+ * in which case some architectures need these pages cleaning before they
+ * can be executed. We don't know which pages these may be, so clean the lot.
+ */
+static bool clean_pages_on_read;
+static bool clean_pages_on_decompress;
+
/*
* When reading an {un,}compressed image, we may restore pages in place,
* in which case some architectures need these pages cleaning before they
@@ -1531,7 +1540,13 @@ int swsusp_check(void)
goto put;
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
- memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
+ if (!goldenimage) {
+ pr_debug("PM: corrupt hibernate image header\n");
+ memcpy(swsusp_header->sig,
+ swsusp_header->orig_sig, 10);
+ } else {
+ pr_debug("PM: Header corruption avoided\n");
+ }
/* Reset swap signature now */
error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
swsusp_header, NULL);
@@ -1605,3 +1620,10 @@ static int swsusp_header_init(void)
}
core_initcall(swsusp_header_init);
+
+static int __init golden_image_setup(char *str)
+{
+ goldenimage = 1;
+ return 1;
+}
+__setup("golden_image", golden_image_setup);
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
index 252611fad2fe..44d8da2952c5 100644
--- a/kernel/power/wakeup_reason.c
+++ b/kernel/power/wakeup_reason.c
@@ -26,42 +26,232 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
-
+#include <linux/slab.h>
#define MAX_WAKEUP_REASON_IRQS 32
-static int irq_list[MAX_WAKEUP_REASON_IRQS];
-static int irqcount;
static bool suspend_abort;
static char abort_reason[MAX_SUSPEND_ABORT_LEN];
+
+static struct wakeup_irq_node *base_irq_nodes;
+static struct wakeup_irq_node *cur_irq_tree;
+static int cur_irq_tree_depth;
+static LIST_HEAD(wakeup_irqs);
+
+static struct kmem_cache *wakeup_irq_nodes_cache;
static struct kobject *wakeup_reason;
-static DEFINE_SPINLOCK(resume_reason_lock);
+static spinlock_t resume_reason_lock;
+bool log_wakeups __read_mostly;
+struct completion wakeups_completion;
static ktime_t last_monotime; /* monotonic time before last suspend */
static ktime_t curr_monotime; /* monotonic time after last suspend */
static ktime_t last_stime; /* monotonic boottime offset before last suspend */
static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
-static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+static void init_wakeup_irq_node(struct wakeup_irq_node *p, int irq)
{
- int irq_no, buf_offset = 0;
- struct irq_desc *desc;
- spin_lock(&resume_reason_lock);
- if (suspend_abort) {
- buf_offset = sprintf(buf, "Abort: %s", abort_reason);
- } else {
- for (irq_no = 0; irq_no < irqcount; irq_no++) {
- desc = irq_to_desc(irq_list[irq_no]);
- if (desc && desc->action && desc->action->name)
- buf_offset += sprintf(buf + buf_offset, "%d %s\n",
- irq_list[irq_no], desc->action->name);
- else
- buf_offset += sprintf(buf + buf_offset, "%d\n",
- irq_list[irq_no]);
+ p->irq = irq;
+ p->desc = irq_to_desc(irq);
+ p->child = NULL;
+ p->parent = NULL;
+ p->handled = false;
+ INIT_LIST_HEAD(&p->siblings);
+ INIT_LIST_HEAD(&p->next);
+}
+
+static struct wakeup_irq_node* alloc_irq_node(int irq)
+{
+ struct wakeup_irq_node *n;
+
+ n = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
+ if (!n) {
+ pr_warning("Failed to log chained wakeup IRQ %d\n",
+ irq);
+ return NULL;
+ }
+
+ init_wakeup_irq_node(n, irq);
+ return n;
+}
+
+static struct wakeup_irq_node *
+search_siblings(struct wakeup_irq_node *root, int irq)
+{
+ bool found = false;
+ struct wakeup_irq_node *n = NULL;
+ BUG_ON(!root);
+
+ if (root->irq == irq)
+ return root;
+
+ list_for_each_entry(n, &root->siblings, siblings) {
+ if (n->irq == irq) {
+ found = true;
+ break;
}
}
- spin_unlock(&resume_reason_lock);
- return buf_offset;
+
+ return found ? n : NULL;
+}
+
+static struct wakeup_irq_node *
+add_to_siblings(struct wakeup_irq_node *root, int irq)
+{
+ struct wakeup_irq_node *n;
+ if (root) {
+ n = search_siblings(root, irq);
+ if (n)
+ return n;
+ }
+ n = alloc_irq_node(irq);
+
+ if (n && root)
+ list_add(&n->siblings, &root->siblings);
+ return n;
+}
+
+#ifdef CONFIG_DEDUCE_WAKEUP_REASONS
+static struct wakeup_irq_node* add_child(struct wakeup_irq_node *root, int irq)
+{
+ if (!root->child) {
+ root->child = alloc_irq_node(irq);
+ if (!root->child)
+ return NULL;
+ root->child->parent = root;
+ return root->child;
+ }
+
+ return add_to_siblings(root->child, irq);
+}
+
+static struct wakeup_irq_node *find_first_sibling(struct wakeup_irq_node *node)
+{
+ struct wakeup_irq_node *n;
+ if (node->parent)
+ return node;
+ list_for_each_entry(n, &node->siblings, siblings) {
+ if (n->parent)
+ return n;
+ }
+ return NULL;
+}
+
+static struct wakeup_irq_node *
+get_base_node(struct wakeup_irq_node *node, unsigned depth)
+{
+ if (!node)
+ return NULL;
+
+ while (depth) {
+ node = find_first_sibling(node);
+ BUG_ON(!node);
+ node = node->parent;
+ depth--;
+ }
+
+ return node;
+}
+#endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
+
+static const struct list_head* get_wakeup_reasons_nosync(void);
+
+static void print_wakeup_sources(void)
+{
+ struct wakeup_irq_node *n;
+ const struct list_head *wakeups;
+
+ if (suspend_abort) {
+ pr_info("Abort: %s\n", abort_reason);
+ return;
+ }
+
+ wakeups = get_wakeup_reasons_nosync();
+ list_for_each_entry(n, wakeups, next) {
+ if (n->desc && n->desc->action && n->desc->action->name)
+ pr_info("Resume caused by IRQ %d, %s\n", n->irq,
+ n->desc->action->name);
+ else
+ pr_info("Resume caused by IRQ %d\n", n->irq);
+ }
+}
+
+static bool walk_irq_node_tree(struct wakeup_irq_node *root,
+ bool (*visit)(struct wakeup_irq_node *, void *),
+ void *cookie)
+{
+ struct wakeup_irq_node *n, *t;
+
+ if (!root)
+ return true;
+
+ list_for_each_entry_safe(n, t, &root->siblings, siblings) {
+ if (!walk_irq_node_tree(n->child, visit, cookie))
+ return false;
+ if (!visit(n, cookie))
+ return false;
+ }
+
+ if (!walk_irq_node_tree(root->child, visit, cookie))
+ return false;
+ return visit(root, cookie);
+}
+
+#ifdef CONFIG_DEDUCE_WAKEUP_REASONS
+static bool is_node_handled(struct wakeup_irq_node *n, void *_p)
+{
+ return n->handled;
+}
+
+static bool base_irq_nodes_done(void)
+{
+ return walk_irq_node_tree(base_irq_nodes, is_node_handled, NULL);
+}
+#endif
+
+struct buf_cookie {
+ char *buf;
+ int buf_offset;
+};
+
+static bool print_leaf_node(struct wakeup_irq_node *n, void *_p)
+{
+ struct buf_cookie *b = _p;
+ if (!n->child) {
+ if (n->desc && n->desc->action && n->desc->action->name)
+ b->buf_offset +=
+ snprintf(b->buf + b->buf_offset,
+ PAGE_SIZE - b->buf_offset,
+ "%d %s\n",
+ n->irq, n->desc->action->name);
+ else
+ b->buf_offset +=
+ snprintf(b->buf + b->buf_offset,
+ PAGE_SIZE - b->buf_offset,
+ "%d\n",
+ n->irq);
+ }
+ return true;
+}
+
+static ssize_t last_resume_reason_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ unsigned long flags;
+
+ struct buf_cookie b = {
+ .buf = buf,
+ .buf_offset = 0
+ };
+
+ spin_lock_irqsave(&resume_reason_lock, flags);
+ if (suspend_abort)
+ b.buf_offset = snprintf(buf, PAGE_SIZE, "Abort: %s", abort_reason);
+ else
+ walk_irq_node_tree(base_irq_nodes, print_leaf_node, &b);
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
+
+ return b.buf_offset;
}
static ssize_t last_suspend_time_show(struct kobject *kobj,
@@ -104,56 +294,155 @@ static struct attribute_group attr_group = {
.attrs = attrs,
};
+static inline void stop_logging_wakeup_reasons(void)
+{
+ ACCESS_ONCE(log_wakeups) = false;
+ smp_wmb();
+}
+
/*
- * logs all the wake up reasons to the kernel
- * stores the irqs to expose them to the userspace via sysfs
+ * stores the immediate wakeup irqs; these often aren't the ones seen by
+ * the drivers that registered them, due to chained interrupt controllers,
+ * and multiple-interrupt dispatch.
*/
-void log_wakeup_reason(int irq)
+void log_base_wakeup_reason(int irq)
{
- struct irq_desc *desc;
- desc = irq_to_desc(irq);
- if (desc && desc->action && desc->action->name)
- printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
- desc->action->name);
- else
- printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+ /* No locking is needed, since this function is called within
+ * syscore_resume, with both nonboot CPUs and interrupts disabled.
+ */
+ base_irq_nodes = add_to_siblings(base_irq_nodes, irq);
+ BUG_ON(!base_irq_nodes);
+#ifndef CONFIG_DEDUCE_WAKEUP_REASONS
+ base_irq_nodes->handled = true;
+#endif
+}
- spin_lock(&resume_reason_lock);
- if (irqcount == MAX_WAKEUP_REASON_IRQS) {
- spin_unlock(&resume_reason_lock);
- printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
- MAX_WAKEUP_REASON_IRQS);
- return;
+#ifdef CONFIG_DEDUCE_WAKEUP_REASONS
+
+/* This function is called by generic_handle_irq, which may call itself
+ * recursively. This happens with interrupts disabled. Using
+ * log_possible_wakeup_reason, we build a tree of interrupts, tracing the call
+ * stack of generic_handle_irq, for each wakeup source containing the
+ * interrupts actually handled.
+ *
+ * Most of these "trees" would either have a single node (in the event that the
+ * wakeup source is the final interrupt), or consist of a list of two
+ * interrupts, with the wakeup source at the root, and the final dispatched
+ * interrupt at the leaf.
+ *
+ * When *all* wakeup sources have been thusly spoken for, this function will
+ * clear the log_wakeups flag, and print the wakeup reasons.
+
+ TODO: percpu
+
+ */
+
+static struct wakeup_irq_node *
+log_possible_wakeup_reason_start(int irq, struct irq_desc *desc, unsigned depth)
+{
+ BUG_ON(!irqs_disabled());
+ BUG_ON((signed)depth < 0);
+
+ /* This function can race with a call to stop_logging_wakeup_reasons()
+ * from a thread context. If this happens, just exit silently, as we are no
+ * longer interested in logging interrupts.
+ */
+ if (!logging_wakeup_reasons())
+ return NULL;
+
+ /* If suspend was aborted, the base IRQ nodes are missing, and we stop
+ * logging interrupts immediately.
+ */
+ if (!base_irq_nodes) {
+ stop_logging_wakeup_reasons();
+ return NULL;
+ }
+
+ /* We assume wakeup interrupts are handlerd only by the first core. */
+ /* TODO: relax this by having percpu versions of the irq tree */
+ if (smp_processor_id() != 0) {
+ return NULL;
}
- irq_list[irqcount++] = irq;
- spin_unlock(&resume_reason_lock);
+ if (depth == 0) {
+ cur_irq_tree_depth = 0;
+ cur_irq_tree = search_siblings(base_irq_nodes, irq);
+ }
+ else if (cur_irq_tree) {
+ if (depth > cur_irq_tree_depth) {
+ BUG_ON(depth - cur_irq_tree_depth > 1);
+ cur_irq_tree = add_child(cur_irq_tree, irq);
+ if (cur_irq_tree)
+ cur_irq_tree_depth++;
+ }
+ else {
+ cur_irq_tree = get_base_node(cur_irq_tree,
+ cur_irq_tree_depth - depth);
+ cur_irq_tree_depth = depth;
+ cur_irq_tree = add_to_siblings(cur_irq_tree, irq);
+ }
+ }
+
+ return cur_irq_tree;
}
-int check_wakeup_reason(int irq)
+static void log_possible_wakeup_reason_complete(struct wakeup_irq_node *n,
+ unsigned depth,
+ bool handled)
{
- int irq_no;
- int ret = false;
-
- spin_lock(&resume_reason_lock);
- for (irq_no = 0; irq_no < irqcount; irq_no++)
- if (irq_list[irq_no] == irq) {
- ret = true;
- break;
+ if (!n)
+ return;
+ n->handled = handled;
+ if (depth == 0) {
+ if (base_irq_nodes_done()) {
+ stop_logging_wakeup_reasons();
+ complete(&wakeups_completion);
+ print_wakeup_sources();
+ }
}
- spin_unlock(&resume_reason_lock);
- return ret;
}
+bool log_possible_wakeup_reason(int irq,
+ struct irq_desc *desc,
+ bool (*handler)(struct irq_desc *))
+{
+ static DEFINE_PER_CPU(unsigned int, depth);
+
+ struct wakeup_irq_node *n;
+ bool handled;
+ unsigned d;
+
+ d = get_cpu_var(depth)++;
+ put_cpu_var(depth);
+
+ n = log_possible_wakeup_reason_start(irq, desc, d);
+
+ handled = handler(desc);
+
+ d = --get_cpu_var(depth);
+ put_cpu_var(depth);
+
+ if (!handled && desc && desc->action)
+ pr_debug("%s: irq %d action %pF not handled\n", __func__,
+ irq, desc->action->handler);
+
+ log_possible_wakeup_reason_complete(n, d, handled);
+
+ return handled;
+}
+
+#endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
+
void log_suspend_abort_reason(const char *fmt, ...)
{
va_list args;
+ unsigned long flags;
- spin_lock(&resume_reason_lock);
+ spin_lock_irqsave(&resume_reason_lock, flags);
//Suspend abort reason has already been logged.
if (suspend_abort) {
- spin_unlock(&resume_reason_lock);
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
return;
}
@@ -161,29 +450,128 @@ void log_suspend_abort_reason(const char *fmt, ...)
va_start(args, fmt);
vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
va_end(args);
- spin_unlock(&resume_reason_lock);
+
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
+}
+
+static bool match_node(struct wakeup_irq_node *n, void *_p)
+{
+ int irq = *((int *)_p);
+ return n->irq != irq;
+}
+
+int check_wakeup_reason(int irq)
+{
+ bool found;
+ unsigned long flags;
+ spin_lock_irqsave(&resume_reason_lock, flags);
+ found = !walk_irq_node_tree(base_irq_nodes, match_node, &irq);
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
+ return found;
+}
+
+static bool build_leaf_nodes(struct wakeup_irq_node *n, void *_p)
+{
+ struct list_head *wakeups = _p;
+ if (!n->child)
+ list_add(&n->next, wakeups);
+ return true;
+}
+
+static const struct list_head* get_wakeup_reasons_nosync(void)
+{
+ BUG_ON(logging_wakeup_reasons());
+ INIT_LIST_HEAD(&wakeup_irqs);
+ walk_irq_node_tree(base_irq_nodes, build_leaf_nodes, &wakeup_irqs);
+ return &wakeup_irqs;
+}
+
+static bool build_unfinished_nodes(struct wakeup_irq_node *n, void *_p)
+{
+ struct list_head *unfinished = _p;
+ if (!n->handled) {
+ pr_warning("%s: wakeup irq %d was not handled\n",
+ __func__, n->irq);
+ list_add(&n->next, unfinished);
+ }
+ return true;
+}
+
+const struct list_head* get_wakeup_reasons(unsigned long timeout,
+ struct list_head *unfinished)
+{
+ INIT_LIST_HEAD(unfinished);
+
+ if (logging_wakeup_reasons()) {
+ unsigned long signalled = 0;
+ if (timeout)
+ signalled = wait_for_completion_timeout(&wakeups_completion, timeout);
+ if (WARN_ON(!signalled)) {
+ stop_logging_wakeup_reasons();
+ walk_irq_node_tree(base_irq_nodes, build_unfinished_nodes, unfinished);
+ return NULL;
+ }
+ pr_info("%s: waited for %u ms\n",
+ __func__,
+ jiffies_to_msecs(timeout - signalled));
+ }
+
+ return get_wakeup_reasons_nosync();
+}
+
+static bool delete_node(struct wakeup_irq_node *n, void *unused)
+{
+ list_del(&n->siblings);
+ kmem_cache_free(wakeup_irq_nodes_cache, n);
+ return true;
+}
+
+void clear_wakeup_reasons(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&resume_reason_lock, flags);
+
+ BUG_ON(logging_wakeup_reasons());
+ walk_irq_node_tree(base_irq_nodes, delete_node, NULL);
+ base_irq_nodes = NULL;
+ cur_irq_tree = NULL;
+ cur_irq_tree_depth = 0;
+ INIT_LIST_HEAD(&wakeup_irqs);
+ suspend_abort = false;
+
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
}
/* Detects a suspend and clears all the previous wake up reasons*/
static int wakeup_reason_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
+ unsigned long flags;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- spin_lock(&resume_reason_lock);
- irqcount = 0;
+ spin_lock_irqsave(&resume_reason_lock, flags);
suspend_abort = false;
- spin_unlock(&resume_reason_lock);
+ spin_unlock_irqrestore(&resume_reason_lock, flags);
/* monotonic time since boot */
last_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
last_stime = ktime_get_boottime();
+ clear_wakeup_reasons();
break;
case PM_POST_SUSPEND:
/* monotonic time since boot */
curr_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
curr_stime = ktime_get_boottime();
+#ifdef CONFIG_DEDUCE_WAKEUP_REASONS
+ /* log_wakeups should have been cleared by now. */
+ if (WARN_ON(logging_wakeup_reasons())) {
+ stop_logging_wakeup_reasons();
+ print_wakeup_sources();
+ }
+#else
+ print_wakeup_sources();
+#endif
break;
default:
break;
@@ -195,31 +583,46 @@ static struct notifier_block wakeup_reason_pm_notifier_block = {
.notifier_call = wakeup_reason_pm_event,
};
-/* Initializes the sysfs parameter
- * registers the pm_event notifier
- */
int __init wakeup_reason_init(void)
{
- int retval;
+ spin_lock_init(&resume_reason_lock);
- retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
- if (retval)
- printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
- __func__, retval);
+ if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
+ pr_warning("[%s] failed to register PM notifier\n",
+ __func__);
+ goto fail;
+ }
wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
if (!wakeup_reason) {
- printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+ pr_warning("[%s] failed to create a sysfs kobject\n",
__func__);
- return 1;
+ goto fail_unregister_pm_notifier;
}
- retval = sysfs_create_group(wakeup_reason, &attr_group);
- if (retval) {
- kobject_put(wakeup_reason);
- printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
- __func__, retval);
+
+ if (sysfs_create_group(wakeup_reason, &attr_group)) {
+ pr_warning("[%s] failed to create a sysfs group\n",
+ __func__);
+ goto fail_kobject_put;
}
+
+ wakeup_irq_nodes_cache =
+ kmem_cache_create("wakeup_irq_node_cache",
+ sizeof(struct wakeup_irq_node), 0,
+ 0, NULL);
+ if (!wakeup_irq_nodes_cache)
+ goto fail_remove_group;
+
return 0;
+
+fail_remove_group:
+ sysfs_remove_group(wakeup_reason, &attr_group);
+fail_kobject_put:
+ kobject_put(wakeup_reason);
+fail_unregister_pm_notifier:
+ unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
+fail:
+ return 1;
}
late_initcall(wakeup_reason_init);