summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/slow-work-debugfs.c (renamed from kernel/slow-work-proc.c)4
-rw-r--r--kernel/slow-work.c64
-rw-r--r--kernel/slow-work.h6
5 files changed, 51 insertions, 30 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 776ffed1556d..d7c13d249b2d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -94,7 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
obj-$(CONFIG_RING_BUFFER) += trace/
obj-$(CONFIG_SMP) += sched_cpupri.o
obj-$(CONFIG_SLOW_WORK) += slow-work.o
-obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
+obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
- if (sechdrs[i].sh_flags & SHF_ALLOC)
+ if (sechdrs[i].sh_flags & SHF_ALLOC
+ && sechdrs[i].sh_size)
nloaded++;
size[0] = ALIGN(sizeof(*sect_attrs)
+ nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
for (i = 0; i < nsect; i++) {
if (! (sechdrs[i].sh_flags & SHF_ALLOC))
continue;
+ if (!sechdrs[i].sh_size)
+ continue;
sattr->address = sechdrs[i].sh_addr;
sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
GFP_KERNEL);
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-debugfs.c
index 3988032571f5..e45c43645298 100644
--- a/kernel/slow-work-proc.c
+++ b/kernel/slow-work-debugfs.c
@@ -57,7 +57,7 @@ static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
}
/*
- * Describe a slow work item for /proc
+ * Describe a slow work item for debugfs
*/
static int slow_work_runqueue_show(struct seq_file *m, void *v)
{
@@ -211,7 +211,7 @@ static const struct seq_operations slow_work_runqueue_ops = {
};
/*
- * open "/proc/slow_work_rq" to list queue contents
+ * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
*/
static int slow_work_runqueue_open(struct inode *inode, struct file *file)
{
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index da94f3c101af..00889bd3c590 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,7 +16,7 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/wait.h>
-#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
#include "slow-work.h"
static void slow_work_cull_timeout(unsigned long);
@@ -109,12 +109,36 @@ static struct module *slow_work_unreg_module;
static struct slow_work *slow_work_unreg_work_item;
static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
static DEFINE_MUTEX(slow_work_unreg_sync_lock);
+
+static void slow_work_set_thread_processing(int id, struct slow_work *work)
+{
+ if (work)
+ slow_work_thread_processing[id] = work->owner;
+}
+static void slow_work_done_thread_processing(int id, struct slow_work *work)
+{
+ struct module *module = slow_work_thread_processing[id];
+
+ slow_work_thread_processing[id] = NULL;
+ smp_mb();
+ if (slow_work_unreg_work_item == work ||
+ slow_work_unreg_module == module)
+ wake_up_all(&slow_work_unreg_wq);
+}
+static void slow_work_clear_thread_processing(int id)
+{
+ slow_work_thread_processing[id] = NULL;
+}
+#else
+static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
+static void slow_work_clear_thread_processing(int id) {}
#endif
/*
* Data for tracking currently executing items for indication through /proc
*/
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
DEFINE_RWLOCK(slow_work_execs_lock);
@@ -197,9 +221,6 @@ static unsigned slow_work_calc_vsmax(void)
*/
static noinline bool slow_work_execute(int id)
{
-#ifdef CONFIG_MODULES
- struct module *module;
-#endif
struct slow_work *work = NULL;
unsigned vsmax;
bool very_slow;
@@ -236,10 +257,7 @@ static noinline bool slow_work_execute(int id)
very_slow = false; /* avoid the compiler warning */
}
-#ifdef CONFIG_MODULES
- if (work)
- slow_work_thread_processing[id] = work->owner;
-#endif
+ slow_work_set_thread_processing(id, work);
if (work) {
slow_work_mark_time(work);
slow_work_begin_exec(id, work);
@@ -287,15 +305,7 @@ static noinline bool slow_work_execute(int id)
/* sort out the race between module unloading and put_ref() */
slow_work_put_ref(work);
-
-#ifdef CONFIG_MODULES
- module = slow_work_thread_processing[id];
- slow_work_thread_processing[id] = NULL;
- smp_mb();
- if (slow_work_unreg_work_item == work ||
- slow_work_unreg_module == module)
- wake_up_all(&slow_work_unreg_wq);
-#endif
+ slow_work_done_thread_processing(id, work);
return true;
@@ -310,7 +320,7 @@ auto_requeue:
else
list_add_tail(&work->link, &slow_work_queue);
spin_unlock_irq(&slow_work_queue_lock);
- slow_work_thread_processing[id] = NULL;
+ slow_work_clear_thread_processing(id);
return true;
}
@@ -813,7 +823,7 @@ static void slow_work_new_thread_execute(struct slow_work *work)
static const struct slow_work_ops slow_work_new_thread_ops = {
.owner = THIS_MODULE,
.execute = slow_work_new_thread_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
.desc = slow_work_new_thread_desc,
#endif
};
@@ -943,6 +953,7 @@ EXPORT_SYMBOL(slow_work_register_user);
*/
static void slow_work_wait_for_items(struct module *module)
{
+#ifdef CONFIG_MODULES
DECLARE_WAITQUEUE(myself, current);
struct slow_work *work;
int loop;
@@ -989,6 +1000,7 @@ static void slow_work_wait_for_items(struct module *module)
remove_wait_queue(&slow_work_unreg_wq, &myself);
mutex_unlock(&slow_work_unreg_sync_lock);
+#endif /* CONFIG_MODULES */
}
/**
@@ -1043,9 +1055,15 @@ static int __init init_slow_work(void)
if (slow_work_max_max_threads < nr_cpus * 2)
slow_work_max_max_threads = nr_cpus * 2;
#endif
-#ifdef CONFIG_SLOW_WORK_PROC
- proc_create("slow_work_rq", S_IFREG | 0400, NULL,
- &slow_work_runqueue_fops);
+#ifdef CONFIG_SLOW_WORK_DEBUG
+ {
+ struct dentry *dbdir;
+
+ dbdir = debugfs_create_dir("slow_work", NULL);
+ if (dbdir && !IS_ERR(dbdir))
+ debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
+ NULL, &slow_work_runqueue_fops);
+ }
#endif
return 0;
}
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
index 3c2f007f3ad6..321f3c59d732 100644
--- a/kernel/slow-work.h
+++ b/kernel/slow-work.h
@@ -19,7 +19,7 @@
/*
* slow-work.c
*/
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
extern struct slow_work *slow_work_execs[];
extern pid_t slow_work_pids[];
extern rwlock_t slow_work_execs_lock;
@@ -30,9 +30,9 @@ extern struct list_head vslow_work_queue;
extern spinlock_t slow_work_queue_lock;
/*
- * slow-work-proc.c
+ * slow-work-debugfs.c
*/
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
extern const struct file_operations slow_work_runqueue_fops;
extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);