diff options
| author | Olof Johansson <olof@lixom.net> | 2014-07-19 14:59:07 -0700 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2014-07-19 14:59:07 -0700 |
| commit | 4e9816d012dbc28dc89559261c6ffbf8ffc440dd (patch) | |
| tree | dee9f8b31f3d6d2fb141541da88e1cc1329b017e /include/linux/percpu-refcount.h | |
| parent | da98f44f27d81d7fe9a41f69af4fe08c18d13b56 (diff) | |
| parent | 1795cd9b3a91d4b5473c97f491d63892442212ab (diff) | |
Merge tag 'v3.16-rc5' into next/fixes-non-critical
Linux 3.16-rc5
Diffstat (limited to 'include/linux/percpu-refcount.h')
| -rw-r--r-- | include/linux/percpu-refcount.h | 40 |
1 files changed, 36 insertions, 4 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 95961f0bf62d..5d8920e23073 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -110,7 +110,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) - __this_cpu_inc(*pcpu_count); + this_cpu_inc(*pcpu_count); else atomic_inc(&ref->count); @@ -121,6 +121,36 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get * + * Increment a percpu refcount unless its count already reached zero. + * Returns %true on success; %false on failure. + * + * The caller is responsible for ensuring that @ref stays accessible. + */ +static inline bool percpu_ref_tryget(struct percpu_ref *ref) +{ + unsigned __percpu *pcpu_count; + int ret = false; + + rcu_read_lock_sched(); + + pcpu_count = ACCESS_ONCE(ref->pcpu_count); + + if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + this_cpu_inc(*pcpu_count); + ret = true; + } else { + ret = atomic_inc_not_zero(&ref->count); + } + + rcu_read_unlock_sched(); + + return ret; +} + +/** + * percpu_ref_tryget_live - try to increment a live percpu refcount + * @ref: percpu_ref to try-get + * * Increment a percpu refcount unless it has already been killed. Returns * %true on success; %false on failure. * @@ -128,8 +158,10 @@ static inline void percpu_ref_get(struct percpu_ref *ref) * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be * used. After the confirm_kill callback is invoked, it's guaranteed that * no new reference will be given out by percpu_ref_tryget(). + * + * The caller is responsible for ensuring that @ref stays accessible. */ -static inline bool percpu_ref_tryget(struct percpu_ref *ref) +static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { unsigned __percpu *pcpu_count; int ret = false; @@ -139,7 +171,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { - __this_cpu_inc(*pcpu_count); + this_cpu_inc(*pcpu_count); ret = true; } @@ -164,7 +196,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) pcpu_count = ACCESS_ONCE(ref->pcpu_count); if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) - __this_cpu_dec(*pcpu_count); + this_cpu_dec(*pcpu_count); else if (unlikely(atomic_dec_and_test(&ref->count))) ref->release(ref); |
