summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorSrinivasarao P <spathi@codeaurora.org>2018-01-09 15:59:02 +0530
committerSrinivasarao P <spathi@codeaurora.org>2018-01-18 12:45:07 +0530
commitdd4f1e35fa5f5a452d34e1fcc681472f2011f93d (patch)
tree8f7c973e9920c4df577c5b7093da541ce12da7d1 /mm/slab.c
parent33260fbfb3632f876669fa815db1edb6d688ace3 (diff)
parent2fea0397a8e708952a22918bd89cce28b3087c11 (diff)
Merge android-4.4.106 (2fea039) into msm-4.4
* refs/heads/tmp-2fea039 Linux 4.4.106 usb: gadget: ffs: Forbid usb_ep_alloc_request from sleeping arm: KVM: Fix VTTBR_BADDR_MASK BUG_ON off-by-one Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers" Revert "x86/efi: Hoist page table switching code into efi_call_virt()" Revert "x86/efi: Build our own page table structures" net/packet: fix a race in packet_bind() and packet_notifier() packet: fix crash in fanout_demux_rollover() sit: update frag_off info rds: Fix NULL pointer dereference in __rds_rdma_map tipc: fix memory leak in tipc_accept_from_sock() more bio_map_user_iov() leak fixes s390: always save and restore all registers on context switch ipmi: Stop timers before cleaning up the module audit: ensure that 'audit=1' actually enables audit for PID 1 ipvlan: fix ipv6 outbound device afs: Connect up the CB.ProbeUuid IB/mlx5: Assign send CQ and recv CQ of UMR QP IB/mlx4: Increase maximal message size under UD QP xfrm: Copy policy family in clone_policy jump_label: Invoke jump_label_test() via early_initcall() atm: horizon: Fix irq release error sctp: use the right sk after waking up from wait_buf sleep sctp: do not free asoc when it is already dead in sctp_sendmsg sparc64/mm: set fields in deferred pages block: wake up all tasks blocked in get_request() sunrpc: Fix rpc_task_begin trace point NFS: Fix a typo in nfs_rename() dynamic-debug-howto: fix optional/omitted ending line number to be LARGE instead of 0 lib/genalloc.c: make the avail variable an atomic_long_t route: update fnhe_expires for redirect when the fnhe exists route: also update fnhe_genid when updating a route cache mac80211_hwsim: Fix memory leak in hwsim_new_radio_nl() kbuild: pkg: use --transform option to prefix paths in tar EDAC, i5000, i5400: Fix definition of NRECMEMB register EDAC, i5000, i5400: Fix use of MTR_DRAM_WIDTH macro powerpc/powernv/ioda2: Gracefully fail if too many TCE levels requested drm/amd/amdgpu: fix console deadlock if late init failed axonram: Fix gendisk handling netfilter: don't track fragmented packets zram: set physical queue limits to avoid array out of bounds accesses i2c: riic: fix restart condition crypto: s5p-sss - Fix completing crypto request in IRQ handler ipv6: reorder icmpv6_init() and ip6_mr_init() bnx2x: do not rollback VF MAC/VLAN filters we did not configure bnx2x: fix possible overrun of VFPF multicast addresses array bnx2x: prevent crash when accessing PTP with interface down spi_ks8995: fix "BUG: key accdaa28 not in .data!" arm64: KVM: Survive unknown traps from guests arm: KVM: Survive unknown traps from guests KVM: nVMX: reset nested_run_pending if the vCPU is going to be reset irqchip/crossbar: Fix incorrect type of register size scsi: lpfc: Fix crash during Hardware error recovery on SLI3 adapters workqueue: trigger WARN if queue_delayed_work() is called with NULL @wq libata: drop WARN from protocol error in ata_sff_qc_issue() kvm: nVMX: VMCLEAR should not cause the vCPU to shut down USB: gadgetfs: Fix a potential memory leak in 'dev_config()' usb: gadget: configs: plug memory leak HID: chicony: Add support for another ASUS Zen AiO keyboard gpio: altera: Use handle_level_irq when configured as a level_high ARM: OMAP2+: Release device node after it is no longer needed. ARM: OMAP2+: Fix device node reference counts module: set __jump_table alignment to 8 selftest/powerpc: Fix false failures for skipped tests x86/hpet: Prevent might sleep splat on resume ARM: OMAP2+: gpmc-onenand: propagate error on initialization failure vti6: Don't report path MTU below IPV6_MIN_MTU. Revert "s390/kbuild: enable modversions for symbols exported from asm" Revert "spi: SPI_FSL_DSPI should depend on HAS_DMA" Revert "drm/armada: Fix compile fail" mm: drop unused pmdp_huge_get_and_clear_notify() thp: fix MADV_DONTNEED vs. numa balancing race thp: reduce indentation level in change_huge_pmd() scsi: storvsc: Workaround for virtual DVD SCSI version ARM: avoid faulting on qemu ARM: BUG if jumping to usermode address in kernel mode arm64: fpsimd: Prevent registers leaking from dead tasks KVM: VMX: remove I/O port 0x80 bypass on Intel hosts arm64: KVM: fix VTTBR_BADDR_MASK BUG_ON off-by-one media: dvb: i2c transfers over usb cannot be done from stack drm/exynos: gem: Drop NONCONTIG flag for buffers allocated without IOMMU drm: extra printk() wrapper macros kdb: Fix handling of kallsyms_symbol_next() return value s390: fix compat system call table iommu/vt-d: Fix scatterlist offset handling ALSA: usb-audio: Add check return value for usb_string() ALSA: usb-audio: Fix out-of-bound error ALSA: seq: Remove spurious WARN_ON() at timer check ALSA: pcm: prevent UAF in snd_pcm_info x86/PCI: Make broadcom_postcore_init() check acpi_disabled X.509: reject invalid BIT STRING for subjectPublicKey ASN.1: check for error from ASN1_OP_END__ACT actions ASN.1: fix out-of-bounds read when parsing indefinite length item efi: Move some sysfs files to be read-only by root scsi: libsas: align sata_device's rps_resp on a cacheline isa: Prevent NULL dereference in isa_bus driver callbacks hv: kvp: Avoid reading past allocated blocks from KVP file virtio: release virtio index when fail to device_register can: usb_8dev: cancel urb on -EPIPE and -EPROTO can: esd_usb2: cancel urb on -EPIPE and -EPROTO can: ems_usb: cancel urb on -EPIPE and -EPROTO can: kvaser_usb: cancel urb on -EPIPE and -EPROTO can: kvaser_usb: ratelimit errors if incomplete messages are received can: kvaser_usb: Fix comparison bug in kvaser_usb_read_bulk_callback() can: kvaser_usb: free buf in error paths can: ti_hecc: Fix napi poll return value for repoll BACKPORT: irq: Make the irqentry text section unconditional UPSTREAM: arch, ftrace: for KASAN put hard/soft IRQ entries into separate sections UPSTREAM: x86, kasan, ftrace: Put APIC interrupt handlers into .irqentry.text UPSTREAM: kasan: make get_wild_bug_type() static UPSTREAM: kasan: separate report parts by empty lines UPSTREAM: kasan: improve double-free report format UPSTREAM: kasan: print page description after stacks UPSTREAM: kasan: improve slab object description UPSTREAM: kasan: change report header UPSTREAM: kasan: simplify address description logic UPSTREAM: kasan: change allocation and freeing stack traces headers UPSTREAM: kasan: unify report headers UPSTREAM: kasan: introduce helper functions for determining bug type BACKPORT: kasan: report only the first error by default UPSTREAM: kasan: fix races in quarantine_remove_cache() UPSTREAM: kasan: resched in quarantine_remove_cache() BACKPORT: kasan, sched/headers: Uninline kasan_enable/disable_current() BACKPORT: kasan: drain quarantine of memcg slab objects UPSTREAM: kasan: eliminate long stalls during quarantine reduction UPSTREAM: kasan: support panic_on_warn UPSTREAM: x86/suspend: fix false positive KASAN warning on suspend/resume UPSTREAM: kasan: support use-after-scope detection UPSTREAM: kasan/tests: add tests for user memory access functions UPSTREAM: mm, kasan: add a ksize() test UPSTREAM: kasan: test fix: warn if the UAF could not be detected in kmalloc_uaf2 UPSTREAM: kasan: modify kmalloc_large_oob_right(), add kmalloc_pagealloc_oob_right() UPSTREAM: lib/stackdepot: export save/fetch stack for drivers UPSTREAM: lib/stackdepot.c: bump stackdepot capacity from 16MB to 128MB BACKPORT: kprobes: Unpoison stack in jprobe_return() for KASAN UPSTREAM: kasan: remove the unnecessary WARN_ONCE from quarantine.c UPSTREAM: kasan: avoid overflowing quarantine size on low memory systems UPSTREAM: kasan: improve double-free reports BACKPORT: mm: coalesce split strings BACKPORT: mm/kasan: get rid of ->state in struct kasan_alloc_meta UPSTREAM: mm/kasan: get rid of ->alloc_size in struct kasan_alloc_meta UPSTREAM: mm: kasan: remove unused 'reserved' field from struct kasan_alloc_meta UPSTREAM: mm/kasan, slub: don't disable interrupts when object leaves quarantine UPSTREAM: mm/kasan: don't reduce quarantine in atomic contexts UPSTREAM: mm/kasan: fix corruptions and false positive reports UPSTREAM: lib/stackdepot.c: use __GFP_NOWARN for stack allocations BACKPORT: mm, kasan: switch SLUB to stackdepot, enable memory quarantine for SLUB UPSTREAM: kasan/quarantine: fix bugs on qlist_move_cache() UPSTREAM: mm: mempool: kasan: don't poot mempool objects in quarantine UPSTREAM: kasan: change memory hot-add error messages to info messages BACKPORT: mm/kasan: add API to check memory regions UPSTREAM: mm/kasan: print name of mem[set,cpy,move]() caller in report UPSTREAM: mm: kasan: initial memory quarantine implementation UPSTREAM: lib/stackdepot: avoid to return 0 handle UPSTREAM: lib/stackdepot.c: allow the stack trace hash to be zero UPSTREAM: mm, kasan: fix compilation for CONFIG_SLAB BACKPORT: mm, kasan: stackdepot implementation. Enable stackdepot for SLAB BACKPORT: mm, kasan: add GFP flags to KASAN API UPSTREAM: mm, kasan: SLAB support UPSTREAM: mm/slab: align cache size first before determination of OFF_SLAB candidate UPSTREAM: mm/slab: use more appropriate condition check for debug_pagealloc UPSTREAM: mm/slab: factor out debugging initialization in cache_init_objs() UPSTREAM: mm/slab: remove object status buffer for DEBUG_SLAB_LEAK UPSTREAM: mm/slab: alternative implementation for DEBUG_SLAB_LEAK UPSTREAM: mm/slab: clean up DEBUG_PAGEALLOC processing code UPSTREAM: mm/slab: activate debug_pagealloc in SLAB when it is actually enabled sched: EAS/WALT: Don't take into account of running task's util BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq UPSTREAM: kasan: add functions to clear stack poison Conflicts: arch/arm/include/asm/kvm_arm.h arch/arm64/kernel/vmlinux.lds.S include/linux/kasan.h kernel/softirq.c lib/Kconfig lib/Kconfig.kasan lib/Makefile lib/stackdepot.c mm/kasan/kasan.c sound/usb/mixer.c Change-Id: If70ced6da5f19be3dd92d10a8d8cd4d5841e5870 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c337
1 files changed, 200 insertions, 137 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 24a615d42d74..8fc762c178bd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -390,36 +390,26 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
-#define OBJECT_FREE (0)
-#define OBJECT_ACTIVE (1)
-
#ifdef CONFIG_DEBUG_SLAB_LEAK
-static void set_obj_status(struct page *page, int idx, int val)
+static inline bool is_store_user_clean(struct kmem_cache *cachep)
{
- int freelist_size;
- char *status;
- struct kmem_cache *cachep = page->slab_cache;
-
- freelist_size = cachep->num * sizeof(freelist_idx_t);
- status = (char *)page->freelist + freelist_size;
- status[idx] = val;
+ return atomic_read(&cachep->store_user_clean) == 1;
}
-static inline unsigned int get_obj_status(struct page *page, int idx)
+static inline void set_store_user_clean(struct kmem_cache *cachep)
{
- int freelist_size;
- char *status;
- struct kmem_cache *cachep = page->slab_cache;
-
- freelist_size = cachep->num * sizeof(freelist_idx_t);
- status = (char *)page->freelist + freelist_size;
+ atomic_set(&cachep->store_user_clean, 1);
+}
- return status[idx];
+static inline void set_store_user_dirty(struct kmem_cache *cachep)
+{
+ if (is_store_user_clean(cachep))
+ atomic_set(&cachep->store_user_clean, 0);
}
#else
-static inline void set_obj_status(struct page *page, int idx, int val) {}
+static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
#endif
@@ -480,9 +470,6 @@ static size_t calculate_freelist_size(int nr_objs, size_t align)
size_t freelist_size;
freelist_size = nr_objs * sizeof(freelist_idx_t);
- if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
- freelist_size += nr_objs * sizeof(char);
-
if (align)
freelist_size = ALIGN(freelist_size, align);
@@ -495,10 +482,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
int nr_objs;
size_t remained_size;
size_t freelist_size;
- int extra_space = 0;
- if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
- extra_space = sizeof(char);
/*
* Ignore padding for the initial guess. The padding
* is at most @align-1 bytes, and @buffer_size is at
@@ -507,7 +491,7 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
* into the memory allocation when taking the padding
* into account.
*/
- nr_objs = slab_size / (buffer_size + idx_size + extra_space);
+ nr_objs = slab_size / (buffer_size + idx_size);
/*
* This calculated number will be either the right
@@ -1670,6 +1654,14 @@ static void kmem_rcu_free(struct rcu_head *head)
}
#if DEBUG
+static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
+{
+ if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
+ (cachep->size % PAGE_SIZE) == 0)
+ return true;
+
+ return false;
+}
#ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
@@ -1703,6 +1695,23 @@ static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
}
*addr++ = 0x87654321;
}
+
+static void slab_kernel_map(struct kmem_cache *cachep, void *objp,
+ int map, unsigned long caller)
+{
+ if (!is_debug_pagealloc_cache(cachep))
+ return;
+
+ if (caller)
+ store_stackinfo(cachep, objp, caller);
+
+ kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map);
+}
+
+#else
+static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp,
+ int map, unsigned long caller) {}
+
#endif
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
@@ -1733,11 +1742,9 @@ static void dump_line(char *data, int offset, int limit)
if (bad_count == 1) {
error ^= POISON_FREE;
if (!(error & (error - 1))) {
- printk(KERN_ERR "Single bit error detected. Probably "
- "bad RAM.\n");
+ printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
#ifdef CONFIG_X86
- printk(KERN_ERR "Run memtest86+ or a similar memory "
- "test tool.\n");
+ printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
#else
printk(KERN_ERR "Run a memory test tool.\n");
#endif
@@ -1781,6 +1788,9 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
int size, i;
int lines = 0;
+ if (is_debug_pagealloc_cache(cachep))
+ return;
+
realobj = (char *)objp + obj_offset(cachep);
size = cachep->object_size;
@@ -1846,24 +1856,14 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
void *objp = index_to_obj(cachep, page, i);
if (cachep->flags & SLAB_POISON) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (cachep->size % PAGE_SIZE == 0 &&
- OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE, 1);
- else
- check_poison_obj(cachep, objp);
-#else
check_poison_obj(cachep, objp);
-#endif
+ slab_kernel_map(cachep, objp, 1, 0);
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "start of a freed object "
- "was overwritten");
+ slab_error(cachep, "start of a freed object was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "end of a freed object "
- "was overwritten");
+ slab_error(cachep, "end of a freed object was overwritten");
}
}
}
@@ -1945,16 +1945,13 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
break;
if (flags & CFLGS_OFF_SLAB) {
- size_t freelist_size_per_obj = sizeof(freelist_idx_t);
/*
* Max number of objs-per-slab for caches which
* use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow().
*/
- if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
- freelist_size_per_obj += sizeof(char);
offslab_limit = size;
- offslab_limit /= freelist_size_per_obj;
+ offslab_limit /= sizeof(freelist_idx_t);
if (num > offslab_limit)
break;
@@ -2179,7 +2176,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
else
size += BYTES_PER_WORD;
}
-#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+#endif
+
+ kasan_cache_create(cachep, &size, &flags);
+
+ size = ALIGN(size, cachep->align);
+ /*
+ * We should restrict the number of objects in a slab to implement
+ * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
+ */
+ if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
+ size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
+
+#if DEBUG
/*
* To activate debug pagealloc, off-slab management is necessary
* requirement. In early phase of initialization, small sized slab
@@ -2187,14 +2196,14 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* to check size >= 256. It guarantees that all necessary small
* sized slab is initialized in current slab initialization sequence.
*/
- if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
+ if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
+ !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
size >= 256 && cachep->object_size > cache_line_size() &&
- ALIGN(size, cachep->align) < PAGE_SIZE) {
- cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
+ size < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
-#endif
/*
* Determine if the slab management is 'on' or 'off' slab.
@@ -2203,20 +2212,13 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
* SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
*/
if (size >= OFF_SLAB_MIN_SIZE && !slab_early_init &&
- !(flags & SLAB_NOLEAKTRACE))
+ !(flags & SLAB_NOLEAKTRACE)) {
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
*/
flags |= CFLGS_OFF_SLAB;
-
- size = ALIGN(size, cachep->align);
- /*
- * We should restrict the number of objects in a slab to implement
- * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition.
- */
- if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE)
- size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align);
+ }
left_over = calculate_slab_order(cachep, size, cachep->align, flags);
@@ -2237,15 +2239,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
freelist_size = calculate_freelist_size(cachep->num, 0);
-
-#ifdef CONFIG_PAGE_POISONING
- /* If we're going to use the generic kernel_map_pages()
- * poisoning, then it's going to smash the contents of
- * the redzone and userword anyhow, so switch them off.
- */
- if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
-#endif
}
cachep->colour_off = cache_line_size();
@@ -2261,7 +2254,19 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
- if (flags & CFLGS_OFF_SLAB) {
+#if DEBUG
+ /*
+ * If we're going to use the generic kernel_map_pages()
+ * poisoning, then it's going to smash the contents of
+ * the redzone and userword anyhow, so switch them off.
+ */
+ if (IS_ENABLED(CONFIG_PAGE_POISONING) &&
+ (cachep->flags & SLAB_POISON) &&
+ is_debug_pagealloc_cache(cachep))
+ cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
+#endif
+
+ if (OFF_SLAB(cachep)) {
cachep->freelist_cache = kmalloc_slab(freelist_size, 0u);
/*
* This is a possibility for one of the kmalloc_{dma,}_caches.
@@ -2480,17 +2485,14 @@ static inline void set_free_obj(struct page *page,
((freelist_idx_t *)(page->freelist))[idx] = val;
}
-static void cache_init_objs(struct kmem_cache *cachep,
- struct page *page)
+static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
{
+#if DEBUG
int i;
for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, page, i);
-#if DEBUG
- /* need to poison the objs? */
- if (cachep->flags & SLAB_POISON)
- poison_obj(cachep, objp, POISON_FREE);
+ kasan_init_slab_obj(cachep, objp);
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = NULL;
@@ -2503,26 +2505,46 @@ static void cache_init_objs(struct kmem_cache *cachep,
* cache which they are a constructor for. Otherwise, deadlock.
* They must also be threaded.
*/
- if (cachep->ctor && !(cachep->flags & SLAB_POISON))
+ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) {
+ kasan_unpoison_object_data(cachep,
+ objp + obj_offset(cachep));
cachep->ctor(objp + obj_offset(cachep));
+ kasan_poison_object_data(
+ cachep, objp + obj_offset(cachep));
+ }
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "constructor overwrote the"
- " end of an object");
+ slab_error(cachep, "constructor overwrote the end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
- slab_error(cachep, "constructor overwrote the"
- " start of an object");
+ slab_error(cachep, "constructor overwrote the start of an object");
}
- if ((cachep->size % PAGE_SIZE) == 0 &&
- OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
- kernel_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE, 0);
-#else
- if (cachep->ctor)
- cachep->ctor(objp);
+ /* need to poison the objs? */
+ if (cachep->flags & SLAB_POISON) {
+ poison_obj(cachep, objp, POISON_FREE);
+ slab_kernel_map(cachep, objp, 0, 0);
+ }
+ }
#endif
- set_obj_status(page, i, OBJECT_FREE);
+}
+
+static void cache_init_objs(struct kmem_cache *cachep,
+ struct page *page)
+{
+ int i;
+ void *objp;
+
+ cache_init_objs_debug(cachep, page);
+
+ for (i = 0; i < cachep->num; i++) {
+ /* constructor could break poison info */
+ if (DEBUG == 0 && cachep->ctor) {
+ objp = index_to_obj(cachep, page, i);
+ kasan_unpoison_object_data(cachep, objp);
+ cachep->ctor(objp);
+ kasan_poison_object_data(cachep, objp);
+ }
+
set_free_obj(page, i, i);
}
}
@@ -2548,6 +2570,11 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
#endif
+#if DEBUG
+ if (cachep->flags & SLAB_STORE_USER)
+ set_store_user_dirty(cachep);
+#endif
+
return objp;
}
@@ -2564,8 +2591,8 @@ static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
/* Verify double free bug */
for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
+ printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n",
+ cachep->name, objp);
BUG();
}
}
@@ -2650,6 +2677,7 @@ static int cache_grow(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist);
+ kasan_poison_slab(page);
cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags))
@@ -2726,27 +2754,19 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
- if (cachep->flags & SLAB_STORE_USER)
+ if (cachep->flags & SLAB_STORE_USER) {
+ set_store_user_dirty(cachep);
*dbg_userword(cachep, objp) = (void *)caller;
+ }
objnr = obj_to_index(cachep, page, objp);
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, page, objnr));
- set_obj_status(page, objnr, OBJECT_FREE);
if (cachep->flags & SLAB_POISON) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
- store_stackinfo(cachep, objp, caller);
- kernel_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE, 0);
- } else {
- poison_obj(cachep, objp, POISON_FREE);
- }
-#else
poison_obj(cachep, objp, POISON_FREE);
-#endif
+ slab_kernel_map(cachep, objp, 0, caller);
}
return objp;
}
@@ -2868,20 +2888,11 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, unsigned long caller)
{
- struct page *page;
-
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
- kernel_map_pages(virt_to_page(objp),
- cachep->size / PAGE_SIZE, 1);
- else
- check_poison_obj(cachep, objp);
-#else
check_poison_obj(cachep, objp);
-#endif
+ slab_kernel_map(cachep, objp, 1, 0);
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
@@ -2890,8 +2901,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
- slab_error(cachep, "double free, or memory outside"
- " object was overwritten");
+ slab_error(cachep, "double free, or memory outside object was overwritten");
printk(KERN_ERR
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
@@ -2901,8 +2911,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
- page = virt_to_head_page(objp);
- set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
@@ -3366,6 +3374,16 @@ free_done:
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
unsigned long caller)
{
+ /* Put the object into the quarantine, don't touch it for now. */
+ if (kasan_slab_free(cachep, objp))
+ return;
+
+ ___cache_free(cachep, objp, caller);
+}
+
+void ___cache_free(struct kmem_cache *cachep, void *objp,
+ unsigned long caller)
+{
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
@@ -3406,6 +3424,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
+ kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3434,6 +3453,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_);
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@@ -3457,6 +3477,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@@ -3475,6 +3496,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
@@ -3487,11 +3509,15 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;
+ void *ret;
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
+ kasan_kmalloc(cachep, ret, size, flags);
+
+ return ret;
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@@ -3525,6 +3551,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep;
ret = slab_alloc(cachep, flags, caller);
+ kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
size, cachep->size, flags);
@@ -3998,8 +4025,7 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow;
- seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
- "%4lu %4lu %4lu %4lu %4lu",
+ seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
node_frees, overflows);
@@ -4104,15 +4130,34 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
struct page *page)
{
void *p;
- int i;
+ int i, j;
+ unsigned long v;
if (n[0] == n[1])
return;
for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
- if (get_obj_status(page, i) != OBJECT_ACTIVE)
+ bool active = true;
+
+ for (j = page->active; j < c->num; j++) {
+ if (get_free_obj(page, j) == i) {
+ active = false;
+ break;
+ }
+ }
+
+ if (!active)
+ continue;
+
+ /*
+ * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
+ * mapping is established when actual object allocation and
+ * we could mistakenly access the unmapped object in the cpu
+ * cache.
+ */
+ if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
continue;
- if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
+ if (!add_caller(n, v))
return;
}
}
@@ -4148,21 +4193,31 @@ static int leaks_show(struct seq_file *m, void *p)
if (!(cachep->flags & SLAB_RED_ZONE))
return 0;
- /* OK, we can do it */
+ /*
+ * Set store_user_clean and start to grab stored user information
+ * for all objects on this cache. If some alloc/free requests comes
+ * during the processing, information would be wrong so restart
+ * whole processing.
+ */
+ do {
+ set_store_user_clean(cachep);
+ drain_cpu_caches(cachep);
- x[1] = 0;
+ x[1] = 0;
- for_each_kmem_cache_node(cachep, node, n) {
+ for_each_kmem_cache_node(cachep, node, n) {
- check_irq_on();
- spin_lock_irq(&n->list_lock);
+ check_irq_on();
+ spin_lock_irq(&n->list_lock);
+
+ list_for_each_entry(page, &n->slabs_full, lru)
+ handle_slab(x, cachep, page);
+ list_for_each_entry(page, &n->slabs_partial, lru)
+ handle_slab(x, cachep, page);
+ spin_unlock_irq(&n->list_lock);
+ }
+ } while (!is_store_user_clean(cachep));
- list_for_each_entry(page, &n->slabs_full, lru)
- handle_slab(x, cachep, page);
- list_for_each_entry(page, &n->slabs_partial, lru)
- handle_slab(x, cachep, page);
- spin_unlock_irq(&n->list_lock);
- }
name = cachep->name;
if (x[0] == x[1]) {
/* Increase the buffer size */
@@ -4272,10 +4327,18 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
*/
size_t ksize(const void *objp)
{
+ size_t size;
+
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- return virt_to_cache(objp)->object_size;
+ size = virt_to_cache(objp)->object_size;
+ /* We assume that ksize callers could use the whole allocated area,
+ * so we need to unpoison this area.
+ */
+ kasan_krealloc(objp, size, GFP_NOWAIT);
+
+ return size;
}
EXPORT_SYMBOL(ksize);