summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-09-21 13:19:38 -0700
committerBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-09-21 13:19:38 -0700
commitc988eaaeaf5f1194a7366ecfab9209d0fda13b0e (patch)
tree81b5cf30dc473c449718e30ba5f526f2c409213e
parent6f777b2385c98a17d69bbeead6edbc7ad7470f72 (diff)
parenta8935c98cf97201e9ee50f22a27264a3220c82e8 (diff)
Merge android-4.4@a8935c9 (v4.4.87) into msm-4.4
* refs/heads/tmp-a8935c9: Linux 4.4.87 crypto: algif_skcipher - only call put_page on referenced and used pages epoll: fix race between ep_poll_callback(POLLFREE) and ep_free()/ep_remove() kvm: arm/arm64: Force reading uncached stage2 PGD kvm: arm/arm64: Fix race in resetting stage2 PGD drm/ttm: Fix accounting error when fail to get pages for pool xfrm: policy: check policy direction value wl1251: add a missing spin_lock_init() CIFS: remove endian related sparse warning CIFS: Fix maximum SMB2 header size alpha: uapi: Add support for __SANE_USERSPACE_TYPES__ cpuset: Fix incorrect memory_pressure control file mapping cpumask: fix spurious cpumask_of_node() on non-NUMA multi-node configs ceph: fix readpage from fscache i2c: ismt: Return EMSGSIZE for block reads with bogus length i2c: ismt: Don't duplicate the receive length for block reads irqchip: mips-gic: SYNC after enabling GIC region ANDROID: cpufreq-dt: Set sane defaults for schedutil rate limits BACKPORT: cpufreq: schedutil: Use policy-dependent transition delays FROMLIST: binder: fix an ret value override FROMLIST: binder: fix memory corruption in binder_transaction binder Linux 4.4.86 drm/i915: fix compiler warning in drivers/gpu/drm/i915/intel_uncore.c scsi: sg: reset 'res_in_use' after unlinking reserved array scsi: sg: protect accesses to 'reserved' page array arm64: fpsimd: Prevent registers leaking across exec x86/io: Add "memory" clobber to insb/insw/insl/outsb/outsw/outsl arm64: mm: abort uaccess retries upon fatal signal lpfc: Fix Device discovery failures during switch reboot test. p54: memset(0) whole array lightnvm: initialize ppa_addr in dev_to_generic_addr() gcov: support GCC 7.1 gcov: add support for gcc version >= 6 i2c: jz4780: drop superfluous init btrfs: remove duplicate const specifier ALSA: au88x0: Fix zero clear of stream->resources scsi: isci: avoid array subscript warning sched: WALT: fix window mis-alignment sched: EAS: kill incorrect nohz idle cpu kick sched: EAS: fix incorrect energy delta calculation due to rounding error sched: EAS/WALT: take into account of waking task's load cpufreq: sched: WALT: don't apply capacity margin twice sched: WALT: fix potential overflow sched: EAS: schedfreq: fix CPU util over estimation sched: EAS/WALT: use cr_avg instead of prev_runnable_sum sched: WALT: fix broken cumulative runnable average accounting sched: deadline: WALT: account cumulative runnable avg FROMLIST: android: binder: Add page usage in binder stats FROMLIST: android: binder: Add shrinker tracepoints FROMLIST: android: binder: Add global lru shrinker to binder FROMLIST: android: binder: Move buffer out of area shared with user space FROMLIST: android: binder: Add allocator selftest FROMLIST: android: binder: Refactor prev and next buffer into a helper function android: android-base.config: enable IP6_NF_MATCH_RPFILTER UPSTREAM: cpufreq: schedutil: Use unsigned int for iowait boost UPSTREAM: cpufreq: schedutil: Make iowait boost more energy efficient Conflicts: drivers/cpufreq/cpufreq-dt.c kernel/sched/deadline.c kernel/sched/fair.c kernel/sched/sched.h Change-Id: Iee31db3fd1a0d1650ebf3d6de307a4e4637120b4 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
-rw-r--r--Makefile2
-rw-r--r--android/configs/android-base.cfg1
-rw-r--r--arch/alpha/include/asm/types.h2
-rw-r--r--arch/alpha/include/uapi/asm/types.h12
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--crypto/algif_skcipher.c9
-rw-r--r--drivers/android/Kconfig10
-rw-r--r--drivers/android/Makefile1
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c396
-rw-r--r--drivers/android/binder_alloc.h32
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h55
-rw-r--r--drivers/cpufreq/cpufreq-dt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c5
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/scsi/isci/remote_node_context.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/sg.c49
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/ceph/addr.c24
-rw-r--r--fs/ceph/cache.c12
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/eventpoll.c37
-rw-r--r--include/asm-generic/topology.h6
-rw-r--r--include/linux/cpufreq.h10
-rw-r--r--include/linux/lightnvm.h1
-rw-r--r--kernel/cpuset.c1
-rw-r--r--kernel/gcov/base.c6
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/cpufreq_sched.c14
-rw-r--r--kernel/sched/cpufreq_schedutil.c62
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c44
-rw-r--r--kernel/sched/sched.h19
-rw-r--r--kernel/sched/walt.c8
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--sound/pci/au88x0/au88x0_core.c14
48 files changed, 1007 insertions, 238 deletions
diff --git a/Makefile b/Makefile
index 919851a86eaa..92dcacdc8a0d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 85
+SUBLEVEL = 87
EXTRAVERSION =
NAME = Blurry Fish Butt
diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg
index 419dca62542a..d354338540bd 100644
--- a/android/configs/android-base.cfg
+++ b/android/configs/android-base.cfg
@@ -36,6 +36,7 @@ CONFIG_INET_XFRM_MODE_TUNNEL=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_IPTABLES=y
CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_RAW=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IPV6=y
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d3452c..0bc66e1d3a7e 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd459777..8d1024d7be05 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 360cea172b06..4c055a63c9c6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -831,24 +831,25 @@ void stage2_unmap_vm(struct kvm *kvm)
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
*/
void kvm_free_stage2_pgd(struct kvm *kvm)
{
- if (kvm->arch.pgd == NULL)
- return;
+ void *pgd = NULL;
+ void *hwpgd = NULL;
spin_lock(&kvm->mmu_lock);
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ if (kvm->arch.pgd) {
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ pgd = READ_ONCE(kvm->arch.pgd);
+ hwpgd = kvm_get_hwpgd(kvm);
+ kvm->arch.pgd = NULL;
+ }
spin_unlock(&kvm->mmu_lock);
- kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
-
- kvm->arch.pgd = NULL;
+ if (hwpgd)
+ kvm_free_hwpgd(hwpgd);
+ if (KVM_PREALLOC_LEVEL > 0 && pgd)
+ kfree(pgd);
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 790d27e3b997..7950df171d86 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -202,9 +202,11 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2720d47da366..1f9a2620c692 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -370,8 +370,11 @@ retry:
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the initial
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad07853..9016b4b70375 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -304,13 +304,13 @@ static inline unsigned type in##bwl##_p(int port) \
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index f5e9f9310b48..b3b0004ea8ac 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -86,8 +86,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
}
sgl = sreq->tsg;
n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ for_each_sg(sgl, sg, n, i) {
+ struct page *page = sg_page(sg);
+
+ /* some SGs may not have a page mapped */
+ if (page && atomic_read(&page->_count))
+ put_page(page);
+ }
kfree(sreq->tsg);
}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1a6e25..01de42c8b74b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726bb560..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 13598d807de0..3419cb0b4447 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2481,7 +2481,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
(u64)node->ptr);
binder_node_unlock(node);
} else {
- int ret;
struct binder_ref_data dest_rdata;
binder_node_unlock(node);
@@ -3253,6 +3252,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -4580,6 +4580,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -5425,6 +5427,8 @@ static void print_binder_proc_stats(struct seq_file *m,
count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
@@ -5621,6 +5625,8 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
atomic_set(&binder_transaction_log.cur, ~0U);
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_deferred_workqueue = create_singlethread_workqueue("binder");
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index aabfebac6e57..1c1a7d9c86ed 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -27,9 +27,12 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/list_lru.h>
#include "binder_alloc.h"
#include "binder_trace.h"
+struct list_lru binder_alloc_lru;
+
static DEFINE_MUTEX(binder_alloc_mmap_lock);
enum {
@@ -48,14 +51,23 @@ module_param_named(debug_mask, binder_alloc_debug_mask,
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -105,9 +117,9 @@ static void binder_insert_allocated_buffer_locked(
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -122,18 +134,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
{
struct rb_node *n = alloc->allocated_buffers.rb_node;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -180,8 +191,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
{
void *page_addr;
unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
+ struct binder_lru_page *page;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: %s pages %pK-%pK\n", alloc->pid,
@@ -192,9 +204,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
trace_binder_update_page_range(alloc, allocate, start, end);
- if (vma)
- mm = NULL;
- else
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ if (!vma && need_mm)
mm = get_task_mm(alloc->tsk);
if (mm) {
@@ -207,10 +228,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
+ if (!vma && need_mm) {
pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
alloc->pid);
goto err_no_vma;
@@ -218,18 +236,40 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
+ bool on_lru;
+ size_t index;
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
pr_err("%d: binder_alloc_buf failed for page at %pK\n",
alloc->pid, page_addr);
goto err_alloc_page_failed;
}
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
flush_cache_vmap((unsigned long)page_addr,
(unsigned long)page_addr + PAGE_SIZE);
if (ret != 1) {
@@ -239,12 +279,14 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
user_page_addr =
(uintptr_t)page_addr + alloc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
alloc->pid, user_page_addr);
goto err_vm_insert_page_failed;
}
+
+ trace_binder_alloc_page_end(alloc, index);
/* vm_insert_page does not seem to increment the refcount */
}
if (mm) {
@@ -256,16 +298,27 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- alloc->user_buffer_offset, PAGE_SIZE, NULL);
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
err_alloc_page_failed:
+err_page_ptr_cleared:
;
}
err_no_vma:
@@ -321,6 +374,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
return ERR_PTR(-ENOSPC);
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
@@ -380,14 +436,9 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
@@ -395,17 +446,25 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (ret)
return ERR_PTR(ret);
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->free_in_progress = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ struct binder_buffer *new_buffer;
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
}
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
@@ -420,6 +479,12 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
alloc->pid, size, alloc->free_async_space);
}
return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -454,57 +519,59 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -525,8 +592,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -544,8 +611,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -553,8 +619,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
@@ -640,14 +705,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -658,7 +723,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
return 0;
-err_alloc_small_buf_failed:
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
@@ -678,14 +743,13 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transaction should already have been freed */
@@ -695,22 +759,36 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
void *page_addr;
+ bool on_lru;
- if (!alloc->pages[i])
+ if (!alloc->pages[i].page_ptr)
continue;
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK not freed\n",
- __func__, alloc->pid, i, page_addr);
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(alloc->pages[i]);
+ __free_page(alloc->pages[i].page_ptr);
page_count++;
}
kfree(alloc->pages);
@@ -754,6 +832,34 @@ void binder_alloc_print_allocated(struct seq_file *m,
}
/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
* binder_alloc_get_allocated_count() - return count of buffers
* @alloc: binder_alloc for this proc
*
@@ -787,6 +893,102 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
}
/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ if (alloc->vma) {
+ mm = get_task_mm(alloc->tsk);
+ if (!mm)
+ goto err_get_task_mm_failed;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(alloc->vma,
+ page_addr +
+ alloc->user_buffer_offset,
+ PAGE_SIZE, NULL);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ list_lru_isolate(lru, item);
+
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+ mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
* @alloc: binder_alloc for this proc
*
@@ -798,5 +1000,11 @@ void binder_alloc_init(struct binder_alloc *alloc)
alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 088e4ffc6230..a3a3602c689c 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -21,7 +21,9 @@
#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#include <linux/list_lru.h>
+extern struct list_lru binder_alloc_lru;
struct binder_transaction;
/**
@@ -57,7 +59,19 @@ struct binder_buffer {
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
};
/**
@@ -75,8 +89,7 @@ struct binder_buffer {
* @allocated_buffers: rb tree of allocated buffers sorted by address
* @free_async_space: VA space available for async buffers. This is
* initialized at mmap time to 1/2 the full VA space
- * @pages: array of physical page addresses for each
- * page of mmap'd space
+ * @pages: array of binder_lru_page
* @buffer_size: size of address space specified via mmap
* @pid: pid for associated binder_proc (invariant after init)
*
@@ -96,18 +109,27 @@ struct binder_alloc {
struct rb_root free_buffers;
struct rb_root allocated_buffers;
size_t free_async_space;
- struct page **pages;
+ struct binder_lru_page *pages;
size_t buffer_size;
uint32_t buffer_free;
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async);
extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
extern struct binder_buffer *
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
@@ -120,6 +142,8 @@ extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
extern void binder_alloc_print_allocated(struct seq_file *m,
struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
/**
* binder_alloc_get_free_async_space() - get free space available for async
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7967db16ba5a..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -291,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f951f911786e..a72ae98b4838 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -279,6 +279,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = transition_latency;
+ /*
+ * Android: set default parameters for parity between schedutil and
+ * schedfreq
+ */
+ policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
+ policy->down_transition_delay_us = 50000; /* 50ms */
+
return 0;
out_free_cpufreq_table:
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae832ffb..6fd7b50c5747 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@ hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429050c0..5d8dfe027b30 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..639d1a9c8793 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -339,8 +339,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663c27c5..4b58e8aaf5c5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -786,10 +786,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef27a183..6f1dbd52ec91 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -915,8 +915,11 @@ static int __init gic_of_init(struct device_node *node,
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 257a9eadd595..4ac6764f4897 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -488,7 +488,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index cd4777954f87..9bee3f11898a 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1567,6 +1567,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100638a2..00602abec0ea 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 0e6aaef9a038..c74f74ab981c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1054,7 +1054,10 @@ stop_rr_fcf_flogi:
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 29d8c74e85e3..f4170ea2c6b6 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -153,6 +153,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
@@ -166,6 +167,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -209,7 +211,6 @@ static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -625,6 +626,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -633,6 +635,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -732,7 +735,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -902,7 +905,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
@@ -977,12 +980,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
+
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1734,13 +1743,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
+ return -EBUSY;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -2031,6 +2049,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2142,6 +2162,7 @@ sg_add_sfp(Sg_device * sdp)
rwlock_init(&sfp->rq_list_lock);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2217,20 +2238,6 @@ sg_remove_sfp(struct kref *kref)
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9c62a6f9757a..600c67ef8a03 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -108,7 +108,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
},
};
-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
+const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index c6a1ec110c01..22bae2b434e2 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,7 +189,7 @@ static int ceph_releasepage(struct page *page, gfp_t g)
/*
* read a single page, without unlocking it.
*/
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -219,7 +219,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
- goto out;
+ return -EINPROGRESS;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
@@ -249,8 +249,11 @@ out:
static int ceph_readpage(struct file *filp, struct page *page)
{
- int r = readpage_nounlock(filp, page);
- unlock_page(page);
+ int r = ceph_do_readpage(filp, page);
+ if (r != -EINPROGRESS)
+ unlock_page(page);
+ else
+ r = 0;
return r;
}
@@ -1094,7 +1097,7 @@ retry_locked:
goto retry_locked;
r = writepage_nounlock(page, NULL);
if (r < 0)
- goto fail_nosnap;
+ goto fail_unlock;
goto retry_locked;
}
@@ -1122,11 +1125,14 @@ retry_locked:
}
/* we need to read it. */
- r = readpage_nounlock(file, page);
- if (r < 0)
- goto fail_nosnap;
+ r = ceph_do_readpage(file, page);
+ if (r < 0) {
+ if (r == -EINPROGRESS)
+ return -EAGAIN;
+ goto fail_unlock;
+ }
goto retry_locked;
-fail_nosnap:
+fail_unlock:
unlock_page(page);
return r;
}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a4766ded1ba7..ff1cfd7b1083 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -224,13 +224,7 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
fscache_relinquish_cookie(cookie, 0);
}
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
- if (!error)
- SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
{
if (!error)
SetPageUptodate(page);
@@ -259,7 +253,7 @@ int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
return -ENOBUFS;
ret = fscache_read_or_alloc_page(ci->fscache, page,
- ceph_vfs_readpage_complete, NULL,
+ ceph_readpage_from_fscache_complete, NULL,
GFP_KERNEL);
switch (ret) {
@@ -288,7 +282,7 @@ int ceph_readpages_from_fscache(struct inode *inode,
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
- ceph_vfs_readpage_complete_unlock,
+ ceph_readpage_from_fscache_complete,
NULL, mapping_gfp_mask(mapping));
switch (ret) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index fa8df3fef6fc..297e05c9e2b0 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -194,7 +194,7 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
int i;
if (unlikely(direntry->d_name.len >
- tcon->fsAttrInfo.MaxPathNameComponentLength))
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
return -ENAMETOOLONG;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index b8f553b32dda..aacb15bd56fe 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -82,8 +82,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 3ab9c68b8bce..066df649a6b0 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -519,8 +519,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
wait_queue_head_t *whead;
rcu_read_lock();
- /* If it is cleared by POLLFREE, it should be rcu-safe */
- whead = rcu_dereference(pwq->whead);
+ /*
+ * If it is cleared by POLLFREE, it should be rcu-safe.
+ * If we read NULL we need a barrier paired with
+ * smp_store_release() in ep_poll_callback(), otherwise
+ * we rely on whead->lock.
+ */
+ whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
@@ -1004,17 +1009,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
- if ((unsigned long)key & POLLFREE) {
- ep_pwq_from_wait(wait)->whead = NULL;
- /*
- * whead = NULL above can race with ep_remove_wait_queue()
- * which can do another remove_wait_queue() after us, so we
- * can't use __remove_wait_queue(). whead->lock is held by
- * the caller.
- */
- list_del_init(&wait->task_list);
- }
-
spin_lock_irqsave(&ep->lock, flags);
/*
@@ -1079,6 +1073,23 @@ out_unlock:
if (pwake)
ep_poll_safewake(&ep->poll_wait);
+
+ if ((unsigned long)key & POLLFREE) {
+ /*
+ * If we race with ep_remove_wait_queue() it can miss
+ * ->whead = NULL and do another remove_wait_queue() after
+ * us, so we can't use __remove_wait_queue().
+ */
+ list_del_init(&wait->task_list);
+ /*
+ * ->whead != NULL protects us from the race with ep_free()
+ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+ * held by the caller. Once we nullify it, nothing protects
+ * ep/epi or even wait.
+ */
+ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+ }
+
return 1;
}
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2828f3..5d2add1a6c96 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5daa9e78584c..a998cf205cdc 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -119,7 +119,15 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
- /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int up_transition_delay_us;
+ unsigned int down_transition_delay_us;
+
+ /* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 782d4e814e21..4bc4b1b13193 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -310,6 +310,7 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1656a48d5bee..f92ec9a71af3 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1916,6 +1916,7 @@ static struct cftype files[] = {
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
},
{
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 7080ae1eb6c1..f850e906564b 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
}
EXPORT_SYMBOL(__gcov_merge_icall_topn);
+void __gcov_exit(void)
+{
+ /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index e25e92fb44fa..46a18e72bce6 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4ecca604e64b..75e15e154cfc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3177,20 +3177,20 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
return sched_freq_tick_pelt(cpu);
/*
- * Add a margin to the WALT utilization.
+ * Add a margin to the WALT utilization to check if we will need to
+ * increase frequency.
* NOTE: WALT tracks a single CPU signal for all the scheduling
* classes, thus this margin is going to be added to the DL class as
* well, which is something we do not do in sched_freq_tick_pelt case.
*/
- cpu_utilization = add_capacity_margin(cpu_utilization);
- if (cpu_utilization <= capacity_curr)
+ if (add_capacity_margin(cpu_utilization) <= capacity_curr)
return;
/*
diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c
index f10d9f7d6d07..6ffb23adbcef 100644
--- a/kernel/sched/cpufreq_sched.c
+++ b/kernel/sched/cpufreq_sched.c
@@ -235,6 +235,18 @@ out:
cpufreq_cpu_put(policy);
}
+#ifdef CONFIG_SCHED_WALT
+static inline unsigned long
+requested_capacity(struct sched_capacity_reqs *scr)
+{
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ return scr->cfs;
+ return scr->cfs + scr->rt;
+}
+#else
+#define requested_capacity(scr) (scr->cfs + scr->rt)
+#endif
+
void update_cpu_capacity_request(int cpu, bool request)
{
unsigned long new_capacity;
@@ -245,7 +257,7 @@ void update_cpu_capacity_request(int cpu, bool request)
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
- new_capacity = scr->cfs + scr->rt;
+ new_capacity = requested_capacity(scr);
new_capacity = new_capacity * capacity_margin
/ SCHED_CAPACITY_SCALE;
new_capacity += scr->dl;
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index e12309c1b07b..b90f7434e13b 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -64,8 +64,9 @@ struct sugov_cpu {
struct update_util_data update_util;
struct sugov_policy *sg_policy;
- unsigned long iowait_boost;
- unsigned long iowait_boost_max;
+ bool iowait_boost_pending;
+ unsigned int iowait_boost;
+ unsigned int iowait_boost_max;
u64 last_update;
/* The fields below are only needed when sharing a policy. */
@@ -224,30 +225,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
unsigned int flags)
{
if (flags & SCHED_CPUFREQ_IOWAIT) {
- sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ if (sg_cpu->iowait_boost_pending)
+ return;
+
+ sg_cpu->iowait_boost_pending = true;
+
+ if (sg_cpu->iowait_boost) {
+ sg_cpu->iowait_boost <<= 1;
+ if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
+ sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
+ } else {
+ sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
+ }
} else if (sg_cpu->iowait_boost) {
s64 delta_ns = time - sg_cpu->last_update;
/* Clear iowait_boost if the CPU apprears to have been idle. */
- if (delta_ns > TICK_NSEC)
+ if (delta_ns > TICK_NSEC) {
sg_cpu->iowait_boost = 0;
+ sg_cpu->iowait_boost_pending = false;
+ }
}
}
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
- unsigned long boost_util = sg_cpu->iowait_boost;
- unsigned long boost_max = sg_cpu->iowait_boost_max;
+ unsigned int boost_util, boost_max;
- if (!boost_util)
+ if (!sg_cpu->iowait_boost)
return;
+ if (sg_cpu->iowait_boost_pending) {
+ sg_cpu->iowait_boost_pending = false;
+ } else {
+ sg_cpu->iowait_boost >>= 1;
+ if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
+ sg_cpu->iowait_boost = 0;
+ return;
+ }
+ }
+
+ boost_util = sg_cpu->iowait_boost;
+ boost_max = sg_cpu->iowait_boost_max;
+
if (*util * boost_max < *max * boost_util) {
*util = boost_util;
*max = boost_max;
}
- sg_cpu->iowait_boost >>= 1;
}
#ifdef CONFIG_NO_HZ_COMMON
@@ -320,6 +345,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
delta_ns = last_freq_update_time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
+ j_sg_cpu->iowait_boost_pending = false;
continue;
}
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
@@ -589,7 +615,6 @@ static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
struct sugov_tunables *tunables;
- unsigned int lat;
int ret = 0;
/* State should be equivalent to EXIT */
@@ -628,12 +653,19 @@ static int sugov_init(struct cpufreq_policy *policy)
goto stop_kthread;
}
- tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
- tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
- lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
- if (lat) {
- tunables->up_rate_limit_us *= lat;
- tunables->down_rate_limit_us *= lat;
+ if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
+ tunables->up_rate_limit_us = policy->up_transition_delay_us;
+ tunables->down_rate_limit_us = policy->down_transition_delay_us;
+ } else {
+ unsigned int lat;
+
+ tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
+ tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
+ lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+ if (lat) {
+ tunables->up_rate_limit_us *= lat;
+ tunables->down_rate_limit_us *= lat;
+ }
}
policy->governor_data = sg_policy;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index a105e97ab6bf..905d8cb5a6eb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -18,6 +18,8 @@
#include <linux/slab.h>
+#include "walt.h"
+
struct dl_bandwidth def_dl_bandwidth;
static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 853064319b0d..aa016919eab8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5809,10 +5809,11 @@ static inline void hrtick_update(struct rq *rq)
#endif
#ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
-#define boosted_cpu_util(cpu) cpu_util(cpu)
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
@@ -6626,10 +6627,8 @@ end:
*/
static int sched_group_energy(struct energy_env *eenv)
{
- struct sched_domain *sd;
- int cpu, total_energy = 0;
struct cpumask visit_cpus;
- struct sched_group *sg;
+ u64 total_energy = 0;
WARN_ON(!eenv->sg_top->sge);
@@ -6637,8 +6636,8 @@ static int sched_group_energy(struct energy_env *eenv)
while (!cpumask_empty(&visit_cpus)) {
struct sched_group *sg_shared_cap = NULL;
-
- cpu = cpumask_first(&visit_cpus);
+ int cpu = cpumask_first(&visit_cpus);
+ struct sched_domain *sd;
/*
* Is the group utilization affected by cpus outside this
@@ -6650,7 +6649,7 @@ static int sched_group_energy(struct energy_env *eenv)
sg_shared_cap = sd->parent->groups;
for_each_domain(cpu, sd) {
- sg = sd->groups;
+ struct sched_group *sg = sd->groups;
/* Has this sched_domain already been visited? */
if (sd->child && group_first_cpu(sg) != cpu)
@@ -6686,11 +6685,9 @@ static int sched_group_energy(struct energy_env *eenv)
idle_idx = group_idle_state(eenv, sg);
group_util = group_norm_util(eenv, sg);
- sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power);
sg_idle_energy = ((SCHED_LOAD_SCALE-group_util)
- * sg->sge->idle_states[idle_idx].power)
- >> SCHED_CAPACITY_SHIFT;
+ * sg->sge->idle_states[idle_idx].power);
total_energy += sg_busy_energy + sg_idle_energy;
@@ -6715,7 +6712,7 @@ next_cpu:
continue;
}
- eenv->energy = total_energy;
+ eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT;
return 0;
}
@@ -7004,9 +7001,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
return __task_fits(p, cpu, 0);
}
+static bool __cpu_overutilized(int cpu, int delta)
+{
+ return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
static bool cpu_overutilized(int cpu)
{
- return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+ return __cpu_overutilized(cpu, 0);
}
#ifdef CONFIG_SCHED_TUNE
@@ -7085,7 +7087,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
@@ -7729,6 +7731,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
}
if (target_cpu != prev_cpu) {
+ int delta = 0;
struct energy_env eenv = {
.util_delta = task_util(p),
.src_cpu = prev_cpu,
@@ -7736,8 +7739,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
.task = p,
};
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ delta = task_util(p);
+#endif
/* Not enough spare capacity on previous cpu */
- if (cpu_overutilized(prev_cpu)) {
+ if (__cpu_overutilized(prev_cpu, delta)) {
schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
goto unlock;
@@ -11348,8 +11356,8 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
return true;
/* Do idle load balance if there have misfit task */
- if (energy_aware() && rq->misfit_task)
- return 1;
+ if (energy_aware())
+ return rq->misfit_task;
return (rq->nr_running >= 2);
}
@@ -11391,7 +11399,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type)
#ifndef CONFIG_SCHED_HMP
rcu_read_lock();
sd = rcu_dereference(per_cpu(sd_busy, cpu));
- if (sd && !energy_aware()) {
+ if (sd) {
sgc = sd->groups->sgc;
nr_busy = atomic_read(&sgc->nr_busy_cpus);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 33bf0c07e757..c53970b5a8f0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2363,6 +2363,12 @@ static inline unsigned long __cpu_util(int cpu, int delta)
unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
unsigned long capacity = capacity_orig_of(cpu);
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+
delta += util;
if (delta < 0)
return 0;
@@ -2375,6 +2381,19 @@ static inline unsigned long cpu_util(int cpu)
return __cpu_util(cpu, 0);
}
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+ util = div64_u64(cpu_rq(cpu)->prev_runnable_sum,
+ walt_ravg_window >> SCHED_LOAD_SHIFT);
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 92c3aae8e056..28e999554463 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -111,8 +111,10 @@ walt_dec_cumulative_runnable_avg(struct rq *rq,
static void
fixup_cumulative_runnable_avg(struct rq *rq,
- struct task_struct *p, s64 task_load_delta)
+ struct task_struct *p, u64 new_task_load)
{
+ s64 task_load_delta = (s64)new_task_load - task_load(p);
+
rq->cumulative_runnable_avg += task_load_delta;
if ((s64)rq->cumulative_runnable_avg < 0)
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
@@ -802,11 +804,11 @@ void walt_set_window_start(struct rq *rq)
int cpu = cpu_of(rq);
struct rq *sync_rq = cpu_rq(sync_cpu);
- if (rq->window_start)
+ if (likely(rq->window_start))
return;
if (cpu == sync_cpu) {
- rq->window_start = walt_ktime_clock();
+ rq->window_start = 1;
} else {
raw_spin_unlock(&rq->lock);
double_rq_lock(rq, sync_rq);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c8700399d7fd..e8994c345c53 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3272,9 +3272,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 74177189063c..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
- sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}