diff options
| author | Jiri Kosina <jkosina@suse.cz> | 2011-05-18 17:06:31 +0200 |
|---|---|---|
| committer | Jiri Kosina <jkosina@suse.cz> | 2011-05-18 17:06:49 +0200 |
| commit | 6b7b8e488bbdedeccabdd001a78ffcbe43bb8a3a (patch) | |
| tree | f2f77cc31b4548745778fca6a51b09e1d8a49804 /include/linux/sched.h | |
| parent | b50f315cbb865079a16a12fd9ae6083f98fd592c (diff) | |
| parent | c1d10d18c542278b7fbc413c289d3cb6219da6b3 (diff) | |
Merge branch 'master' into upstream.
This is sync with Linus' tree to receive KEY_IMAGES definition
that went in through input tree.
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 21 |
1 files changed, 17 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index c15936fe998b..781abd137673 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -99,6 +99,7 @@ struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; +struct blk_plug; /* * List of flags we want to share for kernel threads, @@ -516,7 +517,7 @@ struct thread_group_cputimer { struct autogroup; /* - * NOTE! "signal_struct" does not have it's own + * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always * implies a shared sighand_struct, so locking * sighand_struct is always a proper superset of @@ -853,7 +854,7 @@ extern int __weak arch_sd_sibiling_asym_packing(void); /* * Optimise SD flags for power savings: - * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. + * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. * Keep default SD flags if sched_{smt,mc}_power_saving=0 */ @@ -1253,6 +1254,9 @@ struct task_struct { #endif struct mm_struct *mm, *active_mm; +#ifdef CONFIG_COMPAT_BRK + unsigned brk_randomized:1; +#endif #if defined(SPLIT_RSS_COUNTING) struct task_rss_stat rss_stat; #endif @@ -1428,6 +1432,11 @@ struct task_struct { /* stacked block device info */ struct bio_list *bio_list; +#ifdef CONFIG_BLOCK +/* stack plugging */ + struct blk_plug *plug; +#endif + /* VM state */ struct reclaim_state *reclaim_state; @@ -1471,6 +1480,7 @@ struct task_struct { #ifdef CONFIG_NUMA struct mempolicy *mempolicy; /* Protected by alloc_lock */ short il_next; + short pref_node_fork; #endif atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; @@ -1523,10 +1533,13 @@ struct task_struct { struct memcg_batch_info { int do_batch; /* incremented when batch uncharge started */ struct mem_cgroup *memcg; /* target memcg of uncharge */ - unsigned long bytes; /* uncharged usage */ - unsigned long memsw_bytes; /* uncharged mem+swap usage */ + unsigned long nr_pages; /* uncharged usage */ + unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ } memcg_batch; #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT + atomic_t ptrace_bp_refcnt; +#endif }; /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
