diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 67 | 
1 files changed, 32 insertions, 35 deletions
| diff --git a/include/linux/sched.h b/include/linux/sched.h index 306f4f0c987a..857ba40426ba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -33,6 +33,7 @@ struct sched_param {  #include <linux/smp.h>  #include <linux/sem.h> +#include <linux/shm.h>  #include <linux/signal.h>  #include <linux/compiler.h>  #include <linux/completion.h> @@ -813,7 +814,7 @@ struct task_delay_info {  	 * associated with the operation is added to XXX_delay.  	 * XXX_delay contains the accumulated delay time in nanoseconds.  	 */ -	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */ +	u64 blkio_start;	/* Shared by blkio, swapin */  	u64 blkio_delay;	/* wait for sync block io completion */  	u64 swapin_delay;	/* wait for swapin block io completion */  	u32 blkio_count;	/* total count of the number of sync block */ @@ -821,7 +822,7 @@ struct task_delay_info {  	u32 swapin_count;	/* total count of the number of swapin block */  				/* io operations performed */ -	struct timespec freepages_start, freepages_end; +	u64 freepages_start;  	u64 freepages_delay;	/* wait for memory reclaim */  	u32 freepages_count;	/* total count of memory reclaim */  }; @@ -872,21 +873,21 @@ enum cpu_idle_type {  #define SD_NUMA			0x4000	/* cross-node balancing */  #ifdef CONFIG_SCHED_SMT -static inline const int cpu_smt_flags(void) +static inline int cpu_smt_flags(void)  {  	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;  }  #endif  #ifdef CONFIG_SCHED_MC -static inline const int cpu_core_flags(void) +static inline int cpu_core_flags(void)  {  	return SD_SHARE_PKG_RESOURCES;  }  #endif  #ifdef CONFIG_NUMA -static inline const int cpu_numa_flags(void) +static inline int cpu_numa_flags(void)  {  	return SD_NUMA;  } @@ -999,7 +1000,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);  bool cpus_share_cache(int this_cpu, int that_cpu);  typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); -typedef const int (*sched_domain_flags_f)(void); +typedef int (*sched_domain_flags_f)(void);  #define SDTL_OVERLAP	0x01 @@ -1270,9 +1271,6 @@ struct task_struct {  #ifdef CONFIG_TREE_PREEMPT_RCU  	struct rcu_node *rcu_blocked_node;  #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -#ifdef CONFIG_RCU_BOOST -	struct rt_mutex *rcu_boost_mutex; -#endif /* #ifdef CONFIG_RCU_BOOST */  #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)  	struct sched_info sched_info; @@ -1307,13 +1305,12 @@ struct task_struct {  				 * execve */  	unsigned in_iowait:1; -	/* task may not gain privileges */ -	unsigned no_new_privs:1; -  	/* Revert to default priority/policy when forking */  	unsigned sched_reset_on_fork:1;  	unsigned sched_contributes_to_load:1; +	unsigned long atomic_flags; /* Flags needing atomic access. */ +  	pid_t pid;  	pid_t tgid; @@ -1367,8 +1364,8 @@ struct task_struct {  	} vtime_snap_whence;  #endif  	unsigned long nvcsw, nivcsw; /* context switch counts */ -	struct timespec start_time; 		/* monotonic time */ -	struct timespec real_start_time;	/* boot based time */ +	u64 start_time;		/* monotonic time in nsec */ +	u64 real_start_time;	/* boot based time in nsec */  /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */  	unsigned long min_flt, maj_flt; @@ -1389,6 +1386,7 @@ struct task_struct {  #ifdef CONFIG_SYSVIPC  /* ipc stuff */  	struct sysv_sem sysvsem; +	struct sysv_shm sysvshm;  #endif  #ifdef CONFIG_DETECT_HUNG_TASK  /* hung task detection */ @@ -1440,8 +1438,6 @@ struct task_struct {  	struct rb_node *pi_waiters_leftmost;  	/* Deadlock detection and priority inheritance handling */  	struct rt_mutex_waiter *pi_blocked_on; -	/* Top pi_waiters task */ -	struct task_struct *pi_top_task;  #endif  #ifdef CONFIG_DEBUG_MUTEXES @@ -1634,12 +1630,6 @@ struct task_struct {  	unsigned long trace_recursion;  #endif /* CONFIG_TRACING */  #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ -	struct memcg_batch_info { -		int do_batch;	/* incremented when batch uncharge started */ -		struct mem_cgroup *memcg; /* target memcg of uncharge */ -		unsigned long nr_pages;	/* uncharged usage */ -		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */ -	} memcg_batch;  	unsigned int memcg_kmem_skip_account;  	struct memcg_oom_info {  		struct mem_cgroup *memcg; @@ -1967,6 +1957,19 @@ static inline void memalloc_noio_restore(unsigned int flags)  	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;  } +/* Per-process atomic flags. */ +#define PFA_NO_NEW_PRIVS 0x00000001	/* May not gain new privileges. */ + +static inline bool task_no_new_privs(struct task_struct *p) +{ +	return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); +} + +static inline void task_set_no_new_privs(struct task_struct *p) +{ +	set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); +} +  /*   * task->jobctl flags   */ @@ -2009,9 +2012,6 @@ static inline void rcu_copy_process(struct task_struct *p)  #ifdef CONFIG_TREE_PREEMPT_RCU  	p->rcu_blocked_node = NULL;  #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -#ifdef CONFIG_RCU_BOOST -	p->rcu_boost_mutex = NULL; -#endif /* #ifdef CONFIG_RCU_BOOST */  	INIT_LIST_HEAD(&p->rcu_node_entry);  } @@ -2360,8 +2360,10 @@ static inline int on_sig_stack(unsigned long sp)  static inline int sas_ss_flags(unsigned long sp)  { -	return (current->sas_ss_size == 0 ? SS_DISABLE -		: on_sig_stack(sp) ? SS_ONSTACK : 0); +	if (!current->sas_ss_size) +		return SS_DISABLE; + +	return on_sig_stack(sp) ? SS_ONSTACK : 0;  }  static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) @@ -2788,7 +2790,7 @@ static inline bool __must_check current_set_polling_and_test(void)  	/*  	 * Polling state must be visible before we test NEED_RESCHED, -	 * paired by resched_task() +	 * paired by resched_curr()  	 */  	smp_mb__after_atomic(); @@ -2806,7 +2808,7 @@ static inline bool __must_check current_clr_polling_and_test(void)  	/*  	 * Polling state must be visible before we test NEED_RESCHED, -	 * paired by resched_task() +	 * paired by resched_curr()  	 */  	smp_mb__after_atomic(); @@ -2838,7 +2840,7 @@ static inline void current_clr_polling(void)  	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also  	 * fold.  	 */ -	smp_mb(); /* paired with resched_task() */ +	smp_mb(); /* paired with resched_curr() */  	preempt_fold_need_resched();  } @@ -2963,15 +2965,10 @@ static inline void inc_syscw(struct task_struct *tsk)  #ifdef CONFIG_MEMCG  extern void mm_update_next_owner(struct mm_struct *mm); -extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);  #else  static inline void mm_update_next_owner(struct mm_struct *mm)  {  } - -static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) -{ -}  #endif /* CONFIG_MEMCG */  static inline unsigned long task_rlimit(const struct task_struct *tsk, | 
