From be832f69a95eb97b4ed0bc8c2b716ee7837ea617 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Thu, 24 Aug 2017 11:38:00 -0700 Subject: sched: walt: Leverage existing helper APIs to apply invariance There's no need for a separate hierarchy of notifiers, APIs and variables in walt.c for the purpose of applying frequency and IPC invariance. Let's just use capacity_curr_of and get rid of a lot of the infrastructure relating to capacity, load_scale_factor etc. Change-Id: Ia220e2c896373fa535db05bff60f9aa33aefc978 Signed-off-by: Vikram Mulukutla --- kernel/sched/core.c | 1 - kernel/sched/sched.h | 11 -- kernel/sched/walt.c | 309 ++------------------------------------------------- 3 files changed, 7 insertions(+), 314 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5e7f755b4a21..fec7b2c4f9fd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7702,7 +7702,6 @@ void __init sched_init_smp(void) { cpumask_var_t non_isolated_cpus; - walt_init_cpu_efficiency(); alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d4613c5be81d..9f3d89faacdc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -673,18 +673,7 @@ struct rq { #endif #ifdef CONFIG_SCHED_WALT - /* - * max_freq = user or thermal defined maximum - * max_possible_freq = maximum supported by hardware - */ - unsigned int cur_freq, max_freq, min_freq, max_possible_freq; - struct cpumask freq_domain_cpumask; - u64 cumulative_runnable_avg; - int efficiency; /* Differentiate cpus with different IPC capability */ - int load_scale_factor; - int capacity; - int max_possible_capacity; u64 window_start; u64 curr_runnable_sum; u64 prev_runnable_sum; diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 28e999554463..441cba01bc04 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -20,7 +20,6 @@ */ #include -#include #include #include "sched.h" #include "walt.h" @@ -45,29 +44,6 @@ unsigned int sysctl_sched_walt_init_task_load_pct = 15; /* 1 -> use PELT based load stats, 0 -> use window-based load stats */ unsigned int __read_mostly walt_disabled = 0; -static unsigned int max_possible_efficiency = 1024; -static unsigned int min_possible_efficiency = 1024; - -/* - * Maximum possible frequency across all cpus. Task demand and cpu - * capacity (cpu_power) metrics are scaled in reference to it. - */ -static unsigned int max_possible_freq = 1; - -/* - * Minimum possible max_freq across all cpus. This will be same as - * max_possible_freq on homogeneous systems and could be different from - * max_possible_freq on heterogenous systems. min_max_freq is used to derive - * capacity (cpu_power) of cpus. - */ -static unsigned int min_max_freq = 1; - -static unsigned int max_load_scale_factor = 1024; -static unsigned int max_possible_capacity = 1024; - -/* Mask of all CPUs that have max_possible_capacity */ -static cpumask_t mpc_mask = CPU_MASK_ALL; - /* Window size (in ns) */ __read_mostly unsigned int walt_ravg_window = 20000000; @@ -206,24 +182,16 @@ update_window_start(struct rq *rq, u64 wallclock) rq->window_start += (u64)nr_windows * (u64)walt_ravg_window; } +/* + * Translate absolute delta time accounted on a CPU + * to a scale where 1024 is the capacity of the most + * capable CPU running at FMAX + */ static u64 scale_exec_time(u64 delta, struct rq *rq) { - unsigned int cur_freq = rq->cur_freq; - int sf; - - if (unlikely(cur_freq > max_possible_freq)) - cur_freq = rq->max_possible_freq; - - /* round up div64 */ - delta = div64_u64(delta * cur_freq + max_possible_freq - 1, - max_possible_freq); - - sf = DIV_ROUND_UP(rq->efficiency * 1024, max_possible_efficiency); - - delta *= sf; - delta >>= 10; + unsigned long capcurr = capacity_curr_of(cpu_of(rq)); - return delta; + return (delta * capcurr) >> SCHED_CAPACITY_SHIFT; } static int cpu_is_waiting_on_io(struct rq *rq) @@ -746,33 +714,6 @@ done: p->ravg.mark_start = wallclock; } -unsigned long __weak arch_get_cpu_efficiency(int cpu) -{ - return SCHED_LOAD_SCALE; -} - -void walt_init_cpu_efficiency(void) -{ - int i, efficiency; - unsigned int max = 0, min = UINT_MAX; - - for_each_possible_cpu(i) { - efficiency = arch_get_cpu_efficiency(i); - cpu_rq(i)->efficiency = efficiency; - - if (efficiency > max) - max = efficiency; - if (efficiency < min) - min = efficiency; - } - - if (max) - max_possible_efficiency = max; - - if (min) - min_possible_efficiency = min; -} - static void reset_task_stats(struct task_struct *p) { u32 sum = 0; @@ -877,242 +818,6 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu) double_rq_unlock(src_rq, dest_rq); } -/* - * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that - * least efficient cpu gets capacity of 1024 - */ -static unsigned long capacity_scale_cpu_efficiency(int cpu) -{ - return (1024 * cpu_rq(cpu)->efficiency) / min_possible_efficiency; -} - -/* - * Return 'capacity' of a cpu in reference to cpu with lowest max_freq - * (min_max_freq), such that one with lowest max_freq gets capacity of 1024. - */ -static unsigned long capacity_scale_cpu_freq(int cpu) -{ - return (1024 * cpu_rq(cpu)->max_freq) / min_max_freq; -} - -/* - * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so - * that "most" efficient cpu gets a load_scale_factor of 1 - */ -static unsigned long load_scale_cpu_efficiency(int cpu) -{ - return DIV_ROUND_UP(1024 * max_possible_efficiency, - cpu_rq(cpu)->efficiency); -} - -/* - * Return load_scale_factor of a cpu in reference to cpu with best max_freq - * (max_possible_freq), so that one with best max_freq gets a load_scale_factor - * of 1. - */ -static unsigned long load_scale_cpu_freq(int cpu) -{ - return DIV_ROUND_UP(1024 * max_possible_freq, cpu_rq(cpu)->max_freq); -} - -static int compute_capacity(int cpu) -{ - int capacity = 1024; - - capacity *= capacity_scale_cpu_efficiency(cpu); - capacity >>= 10; - - capacity *= capacity_scale_cpu_freq(cpu); - capacity >>= 10; - - return capacity; -} - -static int compute_load_scale_factor(int cpu) -{ - int load_scale = 1024; - - /* - * load_scale_factor accounts for the fact that task load - * is in reference to "best" performing cpu. Task's load will need to be - * scaled (up) by a factor to determine suitability to be placed on a - * (little) cpu. - */ - load_scale *= load_scale_cpu_efficiency(cpu); - load_scale >>= 10; - - load_scale *= load_scale_cpu_freq(cpu); - load_scale >>= 10; - - return load_scale; -} - -static int cpufreq_notifier_policy(struct notifier_block *nb, - unsigned long val, void *data) -{ - struct cpufreq_policy *policy = (struct cpufreq_policy *)data; - int i, update_max = 0; - u64 highest_mpc = 0, highest_mplsf = 0; - const struct cpumask *cpus = policy->related_cpus; - unsigned int orig_min_max_freq = min_max_freq; - unsigned int orig_max_possible_freq = max_possible_freq; - /* Initialized to policy->max in case policy->related_cpus is empty! */ - unsigned int orig_max_freq = policy->max; - - if (val != CPUFREQ_NOTIFY) - return 0; - - for_each_cpu(i, policy->related_cpus) { - cpumask_copy(&cpu_rq(i)->freq_domain_cpumask, - policy->related_cpus); - orig_max_freq = cpu_rq(i)->max_freq; - cpu_rq(i)->min_freq = policy->min; - cpu_rq(i)->max_freq = policy->max; - cpu_rq(i)->cur_freq = policy->cur; - cpu_rq(i)->max_possible_freq = policy->cpuinfo.max_freq; - } - - max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq); - if (min_max_freq == 1) - min_max_freq = UINT_MAX; - min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq); - BUG_ON(!min_max_freq); - BUG_ON(!policy->max); - - /* Changes to policy other than max_freq don't require any updates */ - if (orig_max_freq == policy->max) - return 0; - - /* - * A changed min_max_freq or max_possible_freq (possible during bootup) - * needs to trigger re-computation of load_scale_factor and capacity for - * all possible cpus (even those offline). It also needs to trigger - * re-computation of nr_big_task count on all online cpus. - * - * A changed rq->max_freq otoh needs to trigger re-computation of - * load_scale_factor and capacity for just the cluster of cpus involved. - * Since small task definition depends on max_load_scale_factor, a - * changed load_scale_factor of one cluster could influence - * classification of tasks in another cluster. Hence a changed - * rq->max_freq will need to trigger re-computation of nr_big_task - * count on all online cpus. - * - * While it should be sufficient for nr_big_tasks to be - * re-computed for only online cpus, we have inadequate context - * information here (in policy notifier) with regard to hotplug-safety - * context in which notification is issued. As a result, we can't use - * get_online_cpus() here, as it can lead to deadlock. Until cpufreq is - * fixed up to issue notification always in hotplug-safe context, - * re-compute nr_big_task for all possible cpus. - */ - - if (orig_min_max_freq != min_max_freq || - orig_max_possible_freq != max_possible_freq) { - cpus = cpu_possible_mask; - update_max = 1; - } - - /* - * Changed load_scale_factor can trigger reclassification of tasks as - * big or small. Make this change "atomic" so that tasks are accounted - * properly due to changed load_scale_factor - */ - for_each_cpu(i, cpus) { - struct rq *rq = cpu_rq(i); - - rq->capacity = compute_capacity(i); - rq->load_scale_factor = compute_load_scale_factor(i); - - if (update_max) { - u64 mpc, mplsf; - - mpc = div_u64(((u64) rq->capacity) * - rq->max_possible_freq, rq->max_freq); - rq->max_possible_capacity = (int) mpc; - - mplsf = div_u64(((u64) rq->load_scale_factor) * - rq->max_possible_freq, rq->max_freq); - - if (mpc > highest_mpc) { - highest_mpc = mpc; - cpumask_clear(&mpc_mask); - cpumask_set_cpu(i, &mpc_mask); - } else if (mpc == highest_mpc) { - cpumask_set_cpu(i, &mpc_mask); - } - - if (mplsf > highest_mplsf) - highest_mplsf = mplsf; - } - } - - if (update_max) { - max_possible_capacity = highest_mpc; - max_load_scale_factor = highest_mplsf; - } - - return 0; -} - -static int cpufreq_notifier_trans(struct notifier_block *nb, - unsigned long val, void *data) -{ - struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data; - unsigned int cpu = freq->cpu, new_freq = freq->new; - unsigned long flags; - int i; - - if (val != CPUFREQ_POSTCHANGE) - return 0; - - BUG_ON(!new_freq); - - if (cpu_rq(cpu)->cur_freq == new_freq) - return 0; - - for_each_cpu(i, &cpu_rq(cpu)->freq_domain_cpumask) { - struct rq *rq = cpu_rq(i); - - raw_spin_lock_irqsave(&rq->lock, flags); - walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, - walt_ktime_clock(), 0); - rq->cur_freq = new_freq; - raw_spin_unlock_irqrestore(&rq->lock, flags); - } - - return 0; -} - -static struct notifier_block notifier_policy_block = { - .notifier_call = cpufreq_notifier_policy -}; - -static struct notifier_block notifier_trans_block = { - .notifier_call = cpufreq_notifier_trans -}; - -static int register_sched_callback(void) -{ - int ret; - - ret = cpufreq_register_notifier(¬ifier_policy_block, - CPUFREQ_POLICY_NOTIFIER); - - if (!ret) - ret = cpufreq_register_notifier(¬ifier_trans_block, - CPUFREQ_TRANSITION_NOTIFIER); - - return 0; -} - -/* - * cpufreq callbacks can be registered at core_initcall or later time. - * Any registration done prior to that is "forgotten" by cpufreq. See - * initialization of variable init_cpufreq_transition_notifier_list_called - * for further information. - */ -core_initcall(register_sched_callback); - void walt_init_new_task_load(struct task_struct *p) { int i; -- cgit v1.2.3 From 7ab48e4c8d9e0652bd978f3df26c29e64b5ea85a Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Fri, 21 Oct 2016 13:39:31 -0700 Subject: sched/fair: prevent meaningless active migration At present need_active_balance() determines whether an active upmigration is needed by using capacity_of(). A CPU's capacity may be reduced by RT pressure, and therefore distinguishing capability differences with capacity_of() may lead to suboptimal active migrations to less capable CPUs. Use capacity_orig_of to distinguish differently capable CPUs in addition to capacity_of(), thus avoiding placing tasks on less capable CPUs due to instantaneous RT pressure. Change-Id: I3e1435246a8edc3ad618ef98a34866cfbd8c16a5 Signed-off-by: Joonwoo Park [markivx: Reworked the commit text a bit] Signed-off-by: Vikram Mulukutla --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0f43ece69e8c..d7bea4761a55 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8863,6 +8863,7 @@ static int need_active_balance(struct lb_env *env) } if ((capacity_of(env->src_cpu) < capacity_of(env->dst_cpu)) && + ((capacity_orig_of(env->src_cpu) < capacity_orig_of(env->dst_cpu))) && env->src_rq->cfs.h_nr_running == 1 && cpu_overutilized(env->src_cpu) && !cpu_overutilized(env->dst_cpu)) { -- cgit v1.2.3 From 40c3aaa56a68455a1c8eccff1bb23d853d8e239c Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Tue, 20 Dec 2016 15:23:53 -0800 Subject: cpufreq: sched: update capacity request upon tick always At present, sched_freq_tick() skips updating of capacity update when current frequency is fmax. This can cause incorrect frequency drop when a CPU bound task goes into sleep for example : 1) A task (A) enqueues onto CPU 0 and executes for long time. 2) A new task (B) which has low task demand enqueues onto CPU 1 and executes long so becomes a CPU bound task. 3) Both CPU 0 and 1 gets scheduler tick but skip sched_freq_tick() since current frequency is fmax. 4) Task (A) sleeps and lower the CPU 0's capacity request. 5) Because task (B) voted CPU capacity at step 2 with low demand and skipped to request afterwards, cluster frequency for both CPU 0 and 1 drops to match capacity voted by CPU 1 at step 2 even though task (B) on CPU 1 requires max capacity. Fix such incorrectness by not skipping CPU capacity voting at tick path. Change-Id: Ieb46af1ac96ffce7a5532c58c7f07bf1ada06b86 Signed-off-by: Joonwoo Park Signed-off-by: Vikram Mulukutla --- kernel/sched/core.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fec7b2c4f9fd..f747e373ed6f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3025,16 +3025,9 @@ static void sched_freq_tick_walt(int cpu) static void sched_freq_tick(int cpu) { - unsigned long capacity_orig, capacity_curr; - if (!sched_freq()) return; - capacity_orig = capacity_orig_of(cpu); - capacity_curr = capacity_curr_of(cpu); - if (capacity_curr == capacity_orig) - return; - _sched_freq_tick(cpu); } #else -- cgit v1.2.3 From ed9e74966829b60ca2a2cd39cdcb0824a87a4da2 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Thu, 22 Dec 2016 12:08:50 -0800 Subject: sched: EAS/WALT: finish accounting prior to task_tick In order to set rq->misfit_task in time, call update_task_ravg() prior to task_tick. This reduces upmigration delay by 1 scheduler window. Change-Id: I7cc80badd423f2e7684125fbfd853b0a3610f0e8 Signed-off-by: Joonwoo Park Signed-off-by: Vikram Mulukutla --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f747e373ed6f..83f7c682032b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3048,11 +3048,11 @@ void scheduler_tick(void) raw_spin_lock(&rq->lock); walt_set_window_start(rq); + walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, + walt_ktime_clock(), 0); update_rq_clock(rq); curr->sched_class->task_tick(rq, curr, 0); update_cpu_load_active(rq); - walt_update_task_ravg(rq->curr, rq, TASK_UPDATE, - walt_ktime_clock(), 0); calc_global_load_tick(rq); sched_freq_tick(cpu); raw_spin_unlock(&rq->lock); -- cgit v1.2.3 From 724091f67f61e8362d00e15ddda4c1e224671f7e Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Thu, 29 Jun 2017 12:22:54 +0100 Subject: sched/fair: remove erroneous RCU_LOCKDEP_WARN from start_cpu() Fixes: https://bugs.linaro.org/show_bug.cgi?id=3075 Change-Id: I62d714fc4b9366a9b2535649aa92d1edc840cf94 Reported-by: Naresh Kamboju Signed-off-by: Dietmar Eggemann Signed-off-by: Brendan Jackman Signed-off-by: Chris Redpath --- kernel/sched/fair.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d7bea4761a55..b1df3873b6fd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6234,9 +6234,6 @@ static int start_cpu(bool boosted) { struct root_domain *rd = cpu_rq(smp_processor_id())->rd; - RCU_LOCKDEP_WARN(rcu_read_lock_sched_held(), - "sched RCU must be held"); - return boosted ? rd->max_cap_orig_cpu : rd->min_cap_orig_cpu; } -- cgit v1.2.3 From 2ec54b21dd7b25df0f070f1d67db2ea18987e69e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 15 Sep 2017 14:37:38 +0100 Subject: bpf/verifier: reject BPF_ALU64|BPF_END [ Upstream commit e67b8a685c7c984e834e3181ef4619cd7025a136 ] Neither ___bpf_prog_run nor the JITs accept it. Also adds a new test case. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 863e24f1e62e..70dc6dcf8649 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1033,7 +1033,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || - (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { + (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || + BPF_CLASS(insn->code) == BPF_ALU64) { verbose("BPF_END uses reserved fields\n"); return -EINVAL; } -- cgit v1.2.3 From 1a4f1ecdb2573cebc9ef1b8bbed0185c0bd45e6c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 18 Sep 2017 16:38:36 -0700 Subject: bpf: one perf event close won't free bpf program attached by another perf event [ Upstream commit ec9dd352d591f0c90402ec67a317c1ed4fb2e638 ] This patch fixes a bug exhibited by the following scenario: 1. fd1 = perf_event_open with attr.config = ID1 2. attach bpf program prog1 to fd1 3. fd2 = perf_event_open with attr.config = ID1 4. user program closes fd2 and prog1 is detached from the tracepoint. 5. user program with fd1 does not work properly as tracepoint no output any more. The issue happens at step 4. Multiple perf_event_open can be called successfully, but only one bpf prog pointer in the tp_event. In the current logic, any fd release for the same tp_event will free the tp_event->prog. The fix is to free tp_event->prog only when the closing fd corresponds to the one which registered the program. Signed-off-by: Yonghong Song Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/events/core.c b/kernel/events/core.c index 3697063dd09a..8f75386e61a7 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7108,6 +7108,7 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) } event->tp_event->prog = prog; + event->tp_event->bpf_prog_owner = event; return 0; } @@ -7120,7 +7121,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event) return; prog = event->tp_event->prog; - if (prog) { + if (prog && event->tp_event->bpf_prog_owner == event) { event->tp_event->prog = NULL; bpf_prog_put_rcu(prog); } -- cgit v1.2.3 From 28eab3db727efb7ad4eb17aaa83df59c3d50e330 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 Mar 2017 16:23:30 +0100 Subject: locking/lockdep: Add nest_lock integrity test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 7fb4a2cea6b18dab56d609530d077f168169ed6b ] Boqun reported that hlock->references can overflow. Add a debug test for that to generate a clear error when this happens. Without this, lockdep is likely to report a mysterious failure on unlock. Reported-by: Boqun Feng Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Chris Wilson Cc: Linus Torvalds Cc: Nicolai Hähnle Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/locking/lockdep.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 60ace56618f6..0e2c4911ba61 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3128,10 +3128,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, if (depth) { hlock = curr->held_locks + depth - 1; if (hlock->class_idx == class_idx && nest_lock) { - if (hlock->references) + if (hlock->references) { + /* + * Check: unsigned int references:12, overflow. + */ + if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1)) + return 0; + hlock->references++; - else + } else { hlock->references = 2; + } return 1; } -- cgit v1.2.3