diff options
Diffstat (limited to 'include/linux/rcupdate.h')
| -rw-r--r-- | include/linux/rcupdate.h | 86 | 
1 files changed, 82 insertions, 4 deletions
| diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 81c04f4348ec..20fb776a1d4a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -42,6 +42,7 @@  #include <linux/lockdep.h>  #include <linux/completion.h>  #include <linux/debugobjects.h> +#include <linux/bug.h>  #include <linux/compiler.h>  #ifdef CONFIG_RCU_TORTURE_TEST @@ -190,6 +191,33 @@ extern void rcu_idle_exit(void);  extern void rcu_irq_enter(void);  extern void rcu_irq_exit(void); +/** + * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers + * @a: Code that RCU needs to pay attention to. + * + * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden + * in the inner idle loop, that is, between the rcu_idle_enter() and + * the rcu_idle_exit() -- RCU will happily ignore any such read-side + * critical sections.  However, things like powertop need tracepoints + * in the inner idle loop. + * + * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU()) + * will tell RCU that it needs to pay attending, invoke its argument + * (in this example, a call to the do_something_with_RCU() function), + * and then tell RCU to go back to ignoring this CPU.  It is permissible + * to nest RCU_NONIDLE() wrappers, but the nesting level is currently + * quite limited.  If deeper nesting is required, it will be necessary + * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. + * + * This macro may be used from process-level code only. + */ +#define RCU_NONIDLE(a) \ +	do { \ +		rcu_idle_exit(); \ +		do { a; } while (0); \ +		rcu_idle_enter(); \ +	} while (0) +  /*   * Infrastructure to implement the synchronize_() primitives in   * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. @@ -226,6 +254,15 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)  }  #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ +#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) +bool rcu_lockdep_current_cpu_online(void); +#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +static inline bool rcu_lockdep_current_cpu_online(void) +{ +	return 1; +} +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ +  #ifdef CONFIG_DEBUG_LOCK_ALLOC  #ifdef CONFIG_PROVE_RCU @@ -239,13 +276,11 @@ static inline int rcu_is_cpu_idle(void)  static inline void rcu_lock_acquire(struct lockdep_map *map)  { -	WARN_ON_ONCE(rcu_is_cpu_idle());  	lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);  }  static inline void rcu_lock_release(struct lockdep_map *map)  { -	WARN_ON_ONCE(rcu_is_cpu_idle());  	lock_release(map, 1, _THIS_IP_);  } @@ -270,6 +305,9 @@ extern int debug_lockdep_rcu_enabled(void);   * occur in the same context, for example, it is illegal to invoke   * rcu_read_unlock() in process context if the matching rcu_read_lock()   * was invoked from within an irq handler. + * + * Note that rcu_read_lock() is disallowed if the CPU is either idle or + * offline from an RCU perspective, so check for those as well.   */  static inline int rcu_read_lock_held(void)  { @@ -277,6 +315,8 @@ static inline int rcu_read_lock_held(void)  		return 1;  	if (rcu_is_cpu_idle())  		return 0; +	if (!rcu_lockdep_current_cpu_online()) +		return 0;  	return lock_is_held(&rcu_lock_map);  } @@ -313,6 +353,9 @@ extern int rcu_read_lock_bh_held(void);   * notice an extended quiescent state to other CPUs that started a grace   * period. Otherwise we would delay any grace period as long as we run in   * the idle task. + * + * Similarly, we avoid claiming an SRCU read lock held if the current + * CPU is offline.   */  #ifdef CONFIG_PREEMPT_COUNT  static inline int rcu_read_lock_sched_held(void) @@ -323,6 +366,8 @@ static inline int rcu_read_lock_sched_held(void)  		return 1;  	if (rcu_is_cpu_idle())  		return 0; +	if (!rcu_lockdep_current_cpu_online()) +		return 0;  	if (debug_locks)  		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);  	return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); @@ -374,15 +419,29 @@ extern int rcu_my_thread_group_empty(void);   */  #define rcu_lockdep_assert(c, s)					\  	do {								\ -		static bool __warned;					\ +		static bool __section(.data.unlikely) __warned;		\  		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\  			__warned = true;				\  			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\  		}							\  	} while (0) +#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +static inline void rcu_preempt_sleep_check(void) +{ +	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), +			   "Illegal context switch in RCU read-side " +			   "critical section"); +} +#else /* #ifdef CONFIG_PROVE_RCU */ +static inline void rcu_preempt_sleep_check(void) +{ +} +#endif /* #else #ifdef CONFIG_PROVE_RCU */ +  #define rcu_sleep_check()						\  	do {								\ +		rcu_preempt_sleep_check();				\  		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\  				   "Illegal context switch in RCU-bh"	\  				   " read-side critical section");	\ @@ -470,6 +529,13 @@ extern int rcu_my_thread_group_empty(void);   * NULL.  Although rcu_access_pointer() may also be used in cases where   * update-side locks prevent the value of the pointer from changing, you   * should instead use rcu_dereference_protected() for this use case. + * + * It is also permissible to use rcu_access_pointer() when read-side + * access to the pointer was removed at least one grace period ago, as + * is the case in the context of the RCU callback that is freeing up + * the data, or after a synchronize_rcu() returns.  This can be useful + * when tearing down multi-linked structures after a grace period + * has elapsed.   */  #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) @@ -659,6 +725,8 @@ static inline void rcu_read_lock(void)  	__rcu_read_lock();  	__acquire(RCU);  	rcu_lock_acquire(&rcu_lock_map); +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_lock() used illegally while idle");  }  /* @@ -678,6 +746,8 @@ static inline void rcu_read_lock(void)   */  static inline void rcu_read_unlock(void)  { +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_unlock() used illegally while idle");  	rcu_lock_release(&rcu_lock_map);  	__release(RCU);  	__rcu_read_unlock(); @@ -705,6 +775,8 @@ static inline void rcu_read_lock_bh(void)  	local_bh_disable();  	__acquire(RCU_BH);  	rcu_lock_acquire(&rcu_bh_lock_map); +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_lock_bh() used illegally while idle");  }  /* @@ -714,6 +786,8 @@ static inline void rcu_read_lock_bh(void)   */  static inline void rcu_read_unlock_bh(void)  { +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_unlock_bh() used illegally while idle");  	rcu_lock_release(&rcu_bh_lock_map);  	__release(RCU_BH);  	local_bh_enable(); @@ -737,6 +811,8 @@ static inline void rcu_read_lock_sched(void)  	preempt_disable();  	__acquire(RCU_SCHED);  	rcu_lock_acquire(&rcu_sched_lock_map); +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_lock_sched() used illegally while idle");  }  /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ @@ -753,6 +829,8 @@ static inline notrace void rcu_read_lock_sched_notrace(void)   */  static inline void rcu_read_unlock_sched(void)  { +	rcu_lockdep_assert(!rcu_is_cpu_idle(), +			   "rcu_read_unlock_sched() used illegally while idle");  	rcu_lock_release(&rcu_sched_lock_map);  	__release(RCU_SCHED);  	preempt_enable(); @@ -841,7 +919,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)  	/* See the kfree_rcu() header comment. */  	BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); -	call_rcu(head, (rcu_callback)offset); +	kfree_call_rcu(head, (rcu_callback)offset);  }  /** | 
