From ec484608c5885931c432e99ecfd2772288cd993c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 16:09:17 +0200 Subject: locking, kprobes: Annotate the hash locks and kretprobe.lock as raw The kprobe locks can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/kprobes.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index dd7c12e875bc..dce6e4dbeda7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -181,7 +181,7 @@ struct kretprobe { int nmissed; size_t data_size; struct hlist_head free_instances; - spinlock_t lock; + raw_spinlock_t lock; }; struct kretprobe_instance { -- cgit v1.2.3 From f032a450812f6c7edd532772cc7c48091bca9f27 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 16:21:48 +0200 Subject: locking, percpu_counter: Annotate ::lock as raw The percpu_counter::lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/percpu_counter.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 5edc9014263a..b9df9ed1adc0 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -16,7 +16,7 @@ #ifdef CONFIG_SMP struct percpu_counter { - spinlock_t lock; + raw_spinlock_t lock; s64 count; #ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ -- cgit v1.2.3 From 740969f91e950b64a18fdd0a25164cdee042abf0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 16:43:30 +0200 Subject: locking, lib/proportions: Annotate prop_local_percpu::lock as raw The prop_local_percpu::lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/proportions.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/proportions.h b/include/linux/proportions.h index cf793bbbd05e..ef35bb73f69b 100644 --- a/include/linux/proportions.h +++ b/include/linux/proportions.h @@ -58,7 +58,7 @@ struct prop_local_percpu { */ int shift; unsigned long period; - spinlock_t lock; /* protect the snapshot state */ + raw_spinlock_t lock; /* protect the snapshot state */ }; int prop_local_init_percpu(struct prop_local_percpu *pl); @@ -106,11 +106,11 @@ struct prop_local_single { */ unsigned long period; int shift; - spinlock_t lock; /* protect the snapshot state */ + raw_spinlock_t lock; /* protect the snapshot state */ }; #define INIT_PROP_LOCAL_SINGLE(name) \ -{ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ +{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ } int prop_local_init_single(struct prop_local_single *pl); -- cgit v1.2.3 From 07354eb1a74d1e1ece29f8bafe0b46e8c77a95ef Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 17:50:36 +0200 Subject: locking, printk: Annotate logbuf_lock as raw The logbuf_lock lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner [ merged and fixed it ] Signed-off-by: Ingo Molnar --- include/linux/ratelimit.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 2f007157fab9..e11ccb4cf48d 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -8,7 +8,7 @@ #define DEFAULT_RATELIMIT_BURST 10 struct ratelimit_state { - spinlock_t lock; /* protect the state */ + raw_spinlock_t lock; /* protect the state */ int interval; int burst; @@ -20,7 +20,7 @@ struct ratelimit_state { #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ \ struct ratelimit_state name = { \ - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ .interval = interval_init, \ .burst = burst_init, \ } @@ -28,7 +28,7 @@ struct ratelimit_state { static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { - spin_lock_init(&rs->lock); + raw_spin_lock_init(&rs->lock); rs->interval = interval; rs->burst = burst; rs->printed = 0; -- cgit v1.2.3 From ee30a7b2fc072f139dac44826860d2c1f422137c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 18:56:56 +0200 Subject: locking, sched: Annotate thread_group_cputimer as raw The thread_group_cputimer lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 2 +- include/linux/sched.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d14e058aaeed..08ffab01e76c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -42,7 +42,7 @@ extern struct fs_struct init_fs; .cputimer = { \ .cputime = INIT_CPUTIME, \ .running = 0, \ - .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ }, \ .cred_guard_mutex = \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac2c0578e0f..e672236c08e0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -510,7 +510,7 @@ struct task_cputime { struct thread_group_cputimer { struct task_cputime cputime; int running; - spinlock_t lock; + raw_spinlock_t lock; }; #include @@ -2566,7 +2566,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); static inline void thread_group_cputime_init(struct signal_struct *sig) { - spin_lock_init(&sig->cputimer.lock); + raw_spin_lock_init(&sig->cputimer.lock); } /* -- cgit v1.2.3 From 8292c9e15c3b069459794a04f5e2cf0d5665ddc4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Feb 2010 09:50:22 +0100 Subject: locking, semaphores: Annotate inner lock as raw There is no reason to have the spin_lock protecting the semaphore preemptible on -rt. Annotate it as a raw_spinlock. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. ( On rt this also solves lockdep complaining about the rt_mutex.wait_lock being not initialized. ) Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/semaphore.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 39fa04966aa8..dc368b8ce215 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -14,14 +14,14 @@ /* Please don't access any members of this structure directly */ struct semaphore { - spinlock_t lock; + raw_spinlock_t lock; unsigned int count; struct list_head wait_list; }; #define __SEMAPHORE_INITIALIZER(name, n) \ { \ - .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ + .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ .count = n, \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ } -- cgit v1.2.3 From ddb6c9b58a19edcfac93ac670b066c836ff729f1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 24 Feb 2010 09:54:54 +0100 Subject: locking, rwsem: Annotate inner lock as raw There is no reason to allow the lock protecting rwsems (the ownerless variant) to be preemptible on -rt. Convert it to raw. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/rwsem-spinlock.h | 2 +- include/linux/rwsem.h | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 34701241b673..d5b13bc07a0b 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h @@ -22,7 +22,7 @@ */ struct rw_semaphore { __s32 activity; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 6a6741440cb7..63d406554391 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -25,7 +25,7 @@ struct rw_semaphore; /* All arch specific implementations share the same struct */ struct rw_semaphore { long count; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; struct list_head wait_list; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -56,9 +56,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) # define __RWSEM_DEP_MAP_INIT(lockname) #endif -#define __RWSEM_INITIALIZER(name) \ - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \ - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, \ + __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) -- cgit v1.2.3 From 2d21a29fb62f142b8a62496700d8d82a6a8fd783 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 25 Jul 2009 16:18:34 +0200 Subject: locking, oprofile: Annotate oprofilefs lock as raw The oprofilefs_lock can be taken in atomic context (in profiling interrupts) and therefore cannot cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/oprofile.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 49c8727eeb57..a4c562453f6b 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h @@ -166,7 +166,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); /** lock for read/write safety */ -extern spinlock_t oprofilefs_lock; +extern raw_spinlock_t oprofilefs_lock; /** * Add the contents of a circular buffer to the event buffer. -- cgit v1.2.3 From 1f5b3c3fd2d73d6b30e9ef6dcbf131a791d5cbbd Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 19 Jul 2011 16:19:51 +0200 Subject: locking, x86, iommu: Annotate iommu->register_lock as raw The iommu->register_lock can be taken in atomic context and therefore must not be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/intel-iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 9310c699a37d..19728c462399 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -311,7 +311,7 @@ struct intel_iommu { u64 cap; u64 ecap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ - spinlock_t register_lock; /* protect register handling */ + raw_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ int agaw; /* agaw of this iommu */ int msagaw; /* max sagaw of this iommu */ -- cgit v1.2.3 From 3b8f40481513a7b6123def5a02db4cff96ae2198 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 19 Jul 2011 17:02:07 +0200 Subject: locking, x86, iommu: Annotate qi->q_lock as raw The qi->q_lock lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/intel-iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 19728c462399..8b9b5d365f4e 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -271,7 +271,7 @@ struct qi_desc { }; struct q_inval { - spinlock_t q_lock; + raw_spinlock_t q_lock; struct qi_desc *desc; /* invalidation queue */ int *desc_status; /* desc status */ int free_head; /* first free entry */ -- cgit v1.2.3