From 140fe3b1ab9c082182ef13359fab4ddba95c24c3 Mon Sep 17 00:00:00 2001 From: Xiao Guangrong Date: Tue, 21 Jun 2011 10:35:55 +0800 Subject: jump_label: Fix jump_label update for modules The jump labels entries for modules do not stop at __stop__jump_table, but after mod->jump_entries + mod_num_jump_entries. By checking the wrong end point, module trace events never get enabled. Cc: Ingo Molnar Acked-by: Jason Baron Tested-by: Avi Kivity Tested-by: Johannes Berg Signed-off-by: Xiao Guangrong Link: http://lkml.kernel.org/r/4E00038B.2060404@cn.fujitsu.com Signed-off-by: Steven Rostedt --- kernel/jump_label.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/jump_label.c b/kernel/jump_label.c index fa27e750dbc0..a8ce45097f3d 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -375,15 +375,19 @@ int jump_label_text_reserved(void *start, void *end) static void jump_label_update(struct jump_label_key *key, int enable) { - struct jump_entry *entry = key->entries; - - /* if there are no users, entry can be NULL */ - if (entry) - __jump_label_update(key, entry, __stop___jump_table, enable); + struct jump_entry *entry = key->entries, *stop = __stop___jump_table; #ifdef CONFIG_MODULES + struct module *mod = __module_address((jump_label_t)key); + __jump_label_mod_update(key, enable); + + if (mod) + stop = mod->jump_entries + mod->num_jump_entries; #endif + /* if there are no users, entry can be NULL */ + if (entry) + __jump_label_update(key, entry, stop, enable); } #endif -- cgit v1.2.3 From cd62287e364c0d15d517c6ced4e4808b54711475 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Sat, 4 Jun 2011 15:03:20 +0200 Subject: sched, cgroups: Fix MIN_SHARES on 64-bit boxen Commit c8b28116 ("sched: Increase SCHED_LOAD_SCALE resolution") intended to have no user-visible effect, but allows setting cpu.shares to < MIN_SHARES, which the user then sees. Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Cc: Nikhil Rao Link: http://lkml.kernel.org/r/1307192600.8618.3.camel@marge.simson.net Signed-off-by: Ingo Molnar --- kernel/sched.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 3f2e502d609b..9769c756ad66 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock); * (The default weight is 1024 - so there's no practical * limitation from this.) */ -#define MIN_SHARES 2 -#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION)) +#define MIN_SHARES (1UL << 1) +#define MAX_SHARES (1UL << 18) static int root_task_group_load = ROOT_TASK_GROUP_LOAD; #endif @@ -8450,10 +8450,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) if (!tg->se[0]) return -EINVAL; - if (shares < MIN_SHARES) - shares = MIN_SHARES; - else if (shares > MAX_SHARES) - shares = MAX_SHARES; + shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); mutex_lock(&shares_mutex); if (tg->shares == shares) -- cgit v1.2.3 From 23c570a67448e803408191f529ed9a83fd34a25a Mon Sep 17 00:00:00 2001 From: Ram Pai Date: Tue, 5 Jul 2011 23:44:30 -0700 Subject: resource: ability to resize an allocated resource Provides the ability to resize a resource that is already allocated. This functionality is put in place to support reallocation needs of pci resources. Signed-off-by: Ram Pai Acked-by: Jesse Barnes Signed-off-by: Linus Torvalds --- kernel/resource.c | 116 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 98 insertions(+), 18 deletions(-) (limited to 'kernel') diff --git a/kernel/resource.c b/kernel/resource.c index 798e2fae2a06..3ff40178dce7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -38,6 +38,14 @@ struct resource iomem_resource = { }; EXPORT_SYMBOL(iomem_resource); +/* constraints to be met while allocating resources */ +struct resource_constraint { + resource_size_t min, max, align; + resource_size_t (*alignf)(void *, const struct resource *, + resource_size_t, resource_size_t); + void *alignf_data; +}; + static DEFINE_RWLOCK(resource_lock); static void *r_next(struct seq_file *m, void *v, loff_t *pos) @@ -384,16 +392,13 @@ static bool resource_contains(struct resource *res1, struct resource *res2) } /* - * Find empty slot in the resource tree given range and alignment. + * Find empty slot in the resource tree with the given range and + * alignment constraints */ -static int find_resource(struct resource *root, struct resource *new, - resource_size_t size, resource_size_t min, - resource_size_t max, resource_size_t align, - resource_size_t (*alignf)(void *, - const struct resource *, - resource_size_t, - resource_size_t), - void *alignf_data) +static int __find_resource(struct resource *root, struct resource *old, + struct resource *new, + resource_size_t size, + struct resource_constraint *constraint) { struct resource *this = root->child; struct resource tmp = *new, avail, alloc; @@ -404,25 +409,26 @@ static int find_resource(struct resource *root, struct resource *new, * Skip past an allocated resource that starts at 0, since the assignment * of this->start - 1 to tmp->end below would cause an underflow. */ - if (this && this->start == 0) { - tmp.start = this->end + 1; + if (this && this->start == root->start) { + tmp.start = (this == old) ? old->start : this->end + 1; this = this->sibling; } for(;;) { if (this) - tmp.end = this->start - 1; + tmp.end = (this == old) ? this->end : this->start - 1; else tmp.end = root->end; - resource_clip(&tmp, min, max); + resource_clip(&tmp, constraint->min, constraint->max); arch_remove_reservations(&tmp); /* Check for overflow after ALIGN() */ avail = *new; - avail.start = ALIGN(tmp.start, align); + avail.start = ALIGN(tmp.start, constraint->align); avail.end = tmp.end; if (avail.start >= tmp.start) { - alloc.start = alignf(alignf_data, &avail, size, align); + alloc.start = constraint->alignf(constraint->alignf_data, &avail, + size, constraint->align); alloc.end = alloc.start + size - 1; if (resource_contains(&avail, &alloc)) { new->start = alloc.start; @@ -432,14 +438,75 @@ static int find_resource(struct resource *root, struct resource *new, } if (!this) break; - tmp.start = this->end + 1; + if (this != old) + tmp.start = this->end + 1; this = this->sibling; } return -EBUSY; } +/* + * Find empty slot in the resource tree given range and alignment. + */ +static int find_resource(struct resource *root, struct resource *new, + resource_size_t size, + struct resource_constraint *constraint) +{ + return __find_resource(root, NULL, new, size, constraint); +} + /** - * allocate_resource - allocate empty slot in the resource tree given range & alignment + * reallocate_resource - allocate a slot in the resource tree given range & alignment. + * The resource will be relocated if the new size cannot be reallocated in the + * current location. + * + * @root: root resource descriptor + * @old: resource descriptor desired by caller + * @newsize: new size of the resource descriptor + * @constraint: the size and alignment constraints to be met. + */ +int reallocate_resource(struct resource *root, struct resource *old, + resource_size_t newsize, + struct resource_constraint *constraint) +{ + int err=0; + struct resource new = *old; + struct resource *conflict; + + write_lock(&resource_lock); + + if ((err = __find_resource(root, old, &new, newsize, constraint))) + goto out; + + if (resource_contains(&new, old)) { + old->start = new.start; + old->end = new.end; + goto out; + } + + if (old->child) { + err = -EBUSY; + goto out; + } + + if (resource_contains(old, &new)) { + old->start = new.start; + old->end = new.end; + } else { + __release_resource(old); + *old = new; + conflict = __request_resource(root, old); + BUG_ON(conflict); + } +out: + write_unlock(&resource_lock); + return err; +} + + +/** + * allocate_resource - allocate empty slot in the resource tree given range & alignment. + * The resource will be reallocated with a new size if it was already allocated * @root: root resource descriptor * @new: resource descriptor desired by caller * @size: requested resource region size @@ -459,12 +526,25 @@ int allocate_resource(struct resource *root, struct resource *new, void *alignf_data) { int err; + struct resource_constraint constraint; if (!alignf) alignf = simple_align_resource; + constraint.min = min; + constraint.max = max; + constraint.align = align; + constraint.alignf = alignf; + constraint.alignf_data = alignf_data; + + if ( new->parent ) { + /* resource is already allocated, try reallocating with + the new constraints */ + return reallocate_resource(root, new, size, &constraint); + } + write_lock(&resource_lock); - err = find_resource(root, new, size, min, max, align, alignf, alignf_data); + err = find_resource(root, new, size, &constraint); if (err >= 0 && __request_resource(root, new)) err = -EBUSY; write_unlock(&resource_lock); -- cgit v1.2.3 From 4d4cf23cdde2f8f9324f5684a7f349e182039529 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 6 Jul 2011 20:15:23 +0200 Subject: PM / Hibernate: Fix free_unnecessary_pages() There is a bug in free_unnecessary_pages() that causes it to attempt to free too many pages in some cases, which triggers the BUG_ON() in memory_bm_clear_bit() for copy_bm. Namely, if count_data_pages() is initially greater than alloc_normal, we get to_free_normal equal to 0 and "save" greater from 0. In that case, if the sum of "save" and count_highmem_pages() is greater than alloc_highmem, we subtract a positive number from to_free_normal. Hence, since to_free_normal was 0 before the subtraction and is an unsigned int, the result is converted to a huge positive number that is used as the number of pages to free. Fix this bug by checking if to_free_normal is actually greater than or equal to the number we're going to subtract from it. Signed-off-by: Rafael J. Wysocki Reported-and-tested-by: Matthew Garrett Cc: stable@kernel.org --- kernel/power/snapshot.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index ace55889f702..06efa54f93d6 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1211,7 +1211,11 @@ static void free_unnecessary_pages(void) to_free_highmem = alloc_highmem - save; } else { to_free_highmem = 0; - to_free_normal -= save - alloc_highmem; + save -= alloc_highmem; + if (to_free_normal > save) + to_free_normal -= save; + else + to_free_normal = 0; } memory_bm_position_reset(©_bm); -- cgit v1.2.3 From 659fb32d1b67476f4ade25e9ea0e2642a5b9c4b5 Mon Sep 17 00:00:00 2001 From: Simon Guinot Date: Wed, 6 Jul 2011 12:41:31 -0400 Subject: genirq: replace irq_gc_ack() with {set,clr}_bit variants (fwd) This fixes a regression introduced by e59347a "arm: orion: Use generic irq chip". Depending on the device, interrupts acknowledgement is done by setting or by clearing a dedicated register. Replace irq_gc_ack() with some {set,clr}_bit variants allows to handle both cases. Note that this patch affects the following SoCs: Davinci, Samsung and Orion. Except for this last, the change is minor: irq_gc_ack() is just renamed into irq_gc_ack_set_bit(). For the Orion SoCs, the edge GPIO interrupts support is currently broken. irq_gc_ack() try to acknowledge a such interrupt by setting the corresponding cause register bit. The Orion GPIO device expect the opposite. To fix this issue, the irq_gc_ack_clr_bit() variant is used. Tested on Network Space v2. Reported-by: Joey Oravec Signed-off-by: Simon Guinot Signed-off-by: Arnd Bergmann --- kernel/irq/generic-chip.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 31a9db711906..3a2cab407b93 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -101,10 +101,10 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) } /** - * irq_gc_ack - Ack pending interrupt + * irq_gc_ack_set_bit - Ack pending interrupt via setting bit * @d: irq_data */ -void irq_gc_ack(struct irq_data *d) +void irq_gc_ack_set_bit(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); u32 mask = 1 << (d->irq - gc->irq_base); @@ -114,6 +114,20 @@ void irq_gc_ack(struct irq_data *d) irq_gc_unlock(gc); } +/** + * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit + * @d: irq_data + */ +void irq_gc_ack_clr_bit(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + u32 mask = ~(1 << (d->irq - gc->irq_base)); + + irq_gc_lock(gc); + irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); + irq_gc_unlock(gc); +} + /** * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt * @d: irq_data -- cgit v1.2.3