diff options
Diffstat (limited to 'kernel/power')
| -rw-r--r-- | kernel/power/hibernate.c | 17 | ||||
| -rw-r--r-- | kernel/power/main.c | 17 | ||||
| -rw-r--r-- | kernel/power/power.h | 9 | ||||
| -rw-r--r-- | kernel/power/qos.c | 222 | ||||
| -rw-r--r-- | kernel/power/swap.c | 18 |
5 files changed, 257 insertions, 26 deletions
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 797f19e2aaa9..1535c46808f1 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -1163,6 +1163,22 @@ static int __init kaslr_nohibernate_setup(char *str) return nohibernate_setup(str); } +static int __init page_poison_nohibernate_setup(char *str) +{ +#ifdef CONFIG_PAGE_POISONING_ZERO + /* + * The zeroing option for page poison skips the checks on alloc. + * since hibernation doesn't save free pages there's no way to + * guarantee the pages will still be zeroed. + */ + if (!strcmp(str, "on")) { + pr_info("Disabling hibernation due to page poisoning\n"); + return nohibernate_setup(str); + } +#endif + return 1; +} + __setup("noresume", noresume_setup); __setup("resume_offset=", resume_offset_setup); __setup("resume=", resume_setup); @@ -1171,3 +1187,4 @@ __setup("resumewait", resumewait_setup); __setup("resumedelay=", resumedelay_setup); __setup("nohibernate", nohibernate_setup); __setup("kaslr", kaslr_nohibernate_setup); +__setup("page_poison=", page_poison_nohibernate_setup); diff --git a/kernel/power/main.c b/kernel/power/main.c index 68c0eaae8034..5ea50b1b7595 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -287,13 +287,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj, return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA; } -static ssize_t pm_wakeup_irq_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} -power_attr(pm_wakeup_irq); +power_attr_ro(pm_wakeup_irq); #else /* !CONFIG_PM_SLEEP_DEBUG */ static inline void pm_print_times_init(void) {} @@ -571,14 +565,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj, return show_trace_dev_match(buf, PAGE_SIZE); } -static ssize_t -pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr, - const char *buf, size_t n) -{ - return -EINVAL; -} - -power_attr(pm_trace_dev_match); +power_attr_ro(pm_trace_dev_match); #endif /* CONFIG_PM_TRACE */ diff --git a/kernel/power/power.h b/kernel/power/power.h index 436da5bc72f4..51f02ecaf125 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = { \ .store = _name##_store, \ } +#define power_attr_ro(_name) \ +static struct kobj_attribute _name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + }, \ + .show = _name##_show, \ +} + /* Preferred image size in bytes (default 500 MB) */ extern unsigned long image_size; /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */ diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 97b0df71303e..e6eceb0aa496 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -43,6 +43,9 @@ #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/seq_file.h> +#include <linux/irq.h> +#include <linux/irqdesc.h> +#include <linux/cpumask.h> #include <linux/uaccess.h> #include <linux/export.h> @@ -67,6 +70,8 @@ static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier); static struct pm_qos_constraints cpu_dma_constraints = { .list = PLIST_HEAD_INIT(cpu_dma_constraints.list), .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, + .target_per_cpu = { [0 ... (NR_CPUS - 1)] = + PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE }, .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, @@ -81,6 +86,8 @@ static BLOCKING_NOTIFIER_HEAD(network_lat_notifier); static struct pm_qos_constraints network_lat_constraints = { .list = PLIST_HEAD_INIT(network_lat_constraints.list), .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, + .target_per_cpu = { [0 ... (NR_CPUS - 1)] = + PM_QOS_NETWORK_LAT_DEFAULT_VALUE }, .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE, .type = PM_QOS_MIN, @@ -91,11 +98,12 @@ static struct pm_qos_object network_lat_pm_qos = { .name = "network_latency", }; - static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier); static struct pm_qos_constraints network_tput_constraints = { .list = PLIST_HEAD_INIT(network_tput_constraints.list), .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, + .target_per_cpu = { [0 ... (NR_CPUS - 1)] = + PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE }, .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE, .type = PM_QOS_MAX, @@ -259,22 +267,60 @@ static const struct file_operations pm_qos_debug_fops = { .release = single_release, }; +static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c, + struct cpumask *cpus) +{ + struct pm_qos_request *req = NULL; + int cpu; + s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value }; + + plist_for_each_entry(req, &c->list, node) { + for_each_cpu(cpu, &req->cpus_affine) { + switch (c->type) { + case PM_QOS_MIN: + if (qos_val[cpu] > req->node.prio) + qos_val[cpu] = req->node.prio; + break; + case PM_QOS_MAX: + if (req->node.prio > qos_val[cpu]) + qos_val[cpu] = req->node.prio; + break; + case PM_QOS_SUM: + qos_val[cpu] += req->node.prio; + break; + default: + BUG(); + break; + } + } + } + + for_each_possible_cpu(cpu) { + if (c->target_per_cpu[cpu] != qos_val[cpu]) + cpumask_set_cpu(cpu, cpus); + c->target_per_cpu[cpu] = qos_val[cpu]; + } +} + /** * pm_qos_update_target - manages the constraints list and calls the notifiers * if needed * @c: constraints data struct - * @node: request to add to the list, to update or to remove + * @req: request to add to the list, to update or to remove * @action: action to take on the constraints list * @value: value of the request to add or update * * This function returns 1 if the aggregated constraint value has changed, 0 * otherwise. */ -int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, - enum pm_qos_req_action action, int value) +int pm_qos_update_target(struct pm_qos_constraints *c, + struct pm_qos_request *req, + enum pm_qos_req_action action, int value) { unsigned long flags; int prev_value, curr_value, new_value; + struct plist_node *node = &req->node; + struct cpumask cpus; int ret; spin_lock_irqsave(&pm_qos_lock, flags); @@ -305,17 +351,23 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, } curr_value = pm_qos_get_value(c); + cpumask_clear(&cpus); pm_qos_set_value(c, curr_value); + pm_qos_set_value_for_cpus(c, &cpus); spin_unlock_irqrestore(&pm_qos_lock, flags); trace_pm_qos_update_target(action, prev_value, curr_value); - if (prev_value != curr_value) { + /* + * if cpu mask bits are set, call the notifier call chain + * to update the new qos restriction for the cores + */ + if (!cpumask_empty(&cpus)) { ret = 1; if (c->notifiers) blocking_notifier_call_chain(c->notifiers, (unsigned long)curr_value, - NULL); + &cpus); } else { ret = 0; } @@ -398,12 +450,56 @@ int pm_qos_request(int pm_qos_class) } EXPORT_SYMBOL_GPL(pm_qos_request); +int pm_qos_request_for_cpu(int pm_qos_class, int cpu) +{ + if (cpu_isolated(cpu)) + return INT_MAX; + + return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu]; +} +EXPORT_SYMBOL(pm_qos_request_for_cpu); + int pm_qos_request_active(struct pm_qos_request *req) { return req->pm_qos_class != 0; } EXPORT_SYMBOL_GPL(pm_qos_request_active); +int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask) +{ + unsigned long irqflags; + int cpu; + struct pm_qos_constraints *c = NULL; + int val; + + spin_lock_irqsave(&pm_qos_lock, irqflags); + c = pm_qos_array[pm_qos_class]->constraints; + val = c->default_value; + + for_each_cpu(cpu, mask) { + if (cpu_isolated(cpu)) + continue; + + switch (c->type) { + case PM_QOS_MIN: + if (c->target_per_cpu[cpu] < val) + val = c->target_per_cpu[cpu]; + break; + case PM_QOS_MAX: + if (c->target_per_cpu[cpu] > val) + val = c->target_per_cpu[cpu]; + break; + default: + BUG(); + break; + } + } + spin_unlock_irqrestore(&pm_qos_lock, irqflags); + + return val; +} +EXPORT_SYMBOL(pm_qos_request_for_cpumask); + static void __pm_qos_update_request(struct pm_qos_request *req, s32 new_value) { @@ -412,7 +508,7 @@ static void __pm_qos_update_request(struct pm_qos_request *req, if (new_value != req->node.prio) pm_qos_update_target( pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + req, PM_QOS_UPDATE_REQ, new_value); } /** @@ -430,6 +526,41 @@ static void pm_qos_work_fn(struct work_struct *work) __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); } +#ifdef CONFIG_SMP +static void pm_qos_irq_release(struct kref *ref) +{ + unsigned long flags; + struct irq_affinity_notify *notify = container_of(ref, + struct irq_affinity_notify, kref); + struct pm_qos_request *req = container_of(notify, + struct pm_qos_request, irq_notify); + struct pm_qos_constraints *c = + pm_qos_array[req->pm_qos_class]->constraints; + + spin_lock_irqsave(&pm_qos_lock, flags); + cpumask_setall(&req->cpus_affine); + spin_unlock_irqrestore(&pm_qos_lock, flags); + + pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value); +} + +static void pm_qos_irq_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + unsigned long flags; + struct pm_qos_request *req = container_of(notify, + struct pm_qos_request, irq_notify); + struct pm_qos_constraints *c = + pm_qos_array[req->pm_qos_class]->constraints; + + spin_lock_irqsave(&pm_qos_lock, flags); + cpumask_copy(&req->cpus_affine, mask); + spin_unlock_irqrestore(&pm_qos_lock, flags); + + pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio); +} +#endif + /** * pm_qos_add_request - inserts new qos request into the list * @req: pointer to a preallocated handle @@ -453,11 +584,70 @@ void pm_qos_add_request(struct pm_qos_request *req, WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n"); return; } + + switch (req->type) { + case PM_QOS_REQ_AFFINE_CORES: + if (cpumask_empty(&req->cpus_affine)) { + req->type = PM_QOS_REQ_ALL_CORES; + cpumask_setall(&req->cpus_affine); + WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n"); + } + break; +#ifdef CONFIG_SMP + case PM_QOS_REQ_AFFINE_IRQ: + if (irq_can_set_affinity(req->irq)) { + struct irq_desc *desc = irq_to_desc(req->irq); + struct cpumask *mask; + + if (!desc) + return; + mask = desc->irq_data.common->affinity; + + /* Get the current affinity */ + cpumask_copy(&req->cpus_affine, mask); + req->irq_notify.irq = req->irq; + req->irq_notify.notify = pm_qos_irq_notify; + req->irq_notify.release = pm_qos_irq_release; + + } else { + req->type = PM_QOS_REQ_ALL_CORES; + cpumask_setall(&req->cpus_affine); + WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n", + req->irq); + } + break; +#endif + default: + WARN(1, KERN_ERR "Unknown request type %d\n", req->type); + /* fall through */ + case PM_QOS_REQ_ALL_CORES: + cpumask_setall(&req->cpus_affine); + break; + } + req->pm_qos_class = pm_qos_class; INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); trace_pm_qos_add_request(pm_qos_class, value); pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, - &req->node, PM_QOS_ADD_REQ, value); + req, PM_QOS_ADD_REQ, value); + +#ifdef CONFIG_SMP + if (req->type == PM_QOS_REQ_AFFINE_IRQ && + irq_can_set_affinity(req->irq)) { + int ret = 0; + + ret = irq_set_affinity_notifier(req->irq, + &req->irq_notify); + if (ret) { + WARN(1, "IRQ affinity notify set failed\n"); + req->type = PM_QOS_REQ_ALL_CORES; + cpumask_setall(&req->cpus_affine); + pm_qos_update_target( + pm_qos_array[pm_qos_class]->constraints, + req, PM_QOS_UPDATE_REQ, value); + } + } +#endif } EXPORT_SYMBOL_GPL(pm_qos_add_request); @@ -511,7 +701,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, if (new_value != req->node.prio) pm_qos_update_target( pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_UPDATE_REQ, new_value); + req, PM_QOS_UPDATE_REQ, new_value); schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); } @@ -531,15 +721,25 @@ void pm_qos_remove_request(struct pm_qos_request *req) /* silent return to keep pcm code cleaner */ if (!pm_qos_request_active(req)) { - WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n"); + WARN(1, "pm_qos_remove_request() called for unknown object\n"); return; } cancel_delayed_work_sync(&req->work); +#ifdef CONFIG_SMP + if (req->type == PM_QOS_REQ_AFFINE_IRQ) { + int ret = 0; + /* Get the current affinity */ + ret = irq_set_affinity_notifier(req->irq, NULL); + if (ret) + WARN(1, "IRQ affinity notify set failed\n"); + } +#endif + trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE); pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, - &req->node, PM_QOS_REMOVE_REQ, + req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); memset(req, 0, sizeof(*req)); } diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 12cd989dadf6..160e1006640d 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -37,6 +37,14 @@ #define HIBERNATE_SIG "S1SUSPEND" /* + * When reading an {un,}compressed image, we may restore pages in place, + * in which case some architectures need these pages cleaning before they + * can be executed. We don't know which pages these may be, so clean the lot. + */ +static bool clean_pages_on_read; +static bool clean_pages_on_decompress; + +/* * The swap map is a data structure used for keeping track of each page * written to a swap partition. It consists of many swap_map_page * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. @@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio) if (bio_data_dir(bio) == WRITE) put_page(page); + else if (clean_pages_on_read) + flush_icache_range((unsigned long)page_address(page), + (unsigned long)page_address(page) + PAGE_SIZE); if (bio->bi_error && !hb->error) hb->error = bio->bi_error; @@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle, hib_init_batch(&hb); + clean_pages_on_read = true; printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", nr_to_read); m = nr_to_read / 10; @@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data) d->unc_len = LZO_UNC_SIZE; d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, d->unc, &d->unc_len); + if (clean_pages_on_decompress) + flush_icache_range((unsigned long)d->unc, + (unsigned long)d->unc + d->unc_len); + atomic_set(&d->stop, 1); wake_up(&d->done); } @@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle, } memset(crc, 0, offsetof(struct crc_data, go)); + clean_pages_on_decompress = true; + /* * Start the decompression threads. */ |
