diff options
| author | Pekka Enberg <penberg@kernel.org> | 2013-11-11 18:09:00 +0200 |
|---|---|---|
| committer | Pekka Enberg <penberg@kernel.org> | 2013-11-11 18:09:00 +0200 |
| commit | ea982d9ffdfaa22552e9b0b9be94f459afa36102 (patch) | |
| tree | d3f3c3d9e43f4bc616b0c7156b54b3760b48f60e /include | |
| parent | d56791b38e34e480d869d1b88735df16c81aa684 (diff) | |
| parent | 7e00735520ffb00bda3e08c441d0a4dba42913a7 (diff) | |
Merge branch 'slab/struct-page' into slab/next
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/mm_types.h | 24 | ||||
| -rw-r--r-- | include/linux/slab.h | 9 | ||||
| -rw-r--r-- | include/linux/slab_def.h | 4 |
3 files changed, 26 insertions, 11 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index faf4b7c1ad12..95bf0c5a7eb9 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -42,18 +42,22 @@ struct page { /* First double word block */ unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ - struct address_space *mapping; /* If low bit clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, low bit is set, and - * it points to anon_vma object: - * see PAGE_MAPPING_ANON below. - */ + union { + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + void *s_mem; /* slab first object */ + }; + /* Second double word */ struct { union { pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* slub/slob first free object */ + void *freelist; /* sl[aou]b first free object */ bool pfmemalloc; /* If set by the page allocator, * ALLOC_NO_WATERMARKS was set * and the low watermark was not @@ -109,6 +113,7 @@ struct page { }; atomic_t _count; /* Usage count, see below. */ }; + unsigned int active; /* SLAB */ }; }; @@ -130,6 +135,9 @@ struct page { struct list_head list; /* slobs list of pages */ struct slab *slab_page; /* slab fields */ + struct rcu_head rcu_head; /* Used by SLAB + * when destroying via RCU + */ }; /* Remainder is not double word aligned */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 6c5cc0ea8713..caaad51fee1f 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -51,7 +51,14 @@ * } * rcu_read_unlock(); * - * See also the comment on struct slab_rcu in mm/slab.c. + * This is useful if we need to approach a kernel structure obliquely, + * from its address obtained without the usual locking. We can lock + * the structure to stabilize it and check it's still at the given address, + * only if we can be sure that the memory has not been meanwhile reused + * for some other kind of object (which our subsystem's lock might corrupt). + * + * rcu_read_lock before reading the address, then rcu_read_unlock after + * taking the spinlock within the structure expected at that address. */ #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cd401580bdd3..ca82e8ff89fa 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -41,8 +41,8 @@ struct kmem_cache { size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ - struct kmem_cache *slabp_cache; - unsigned int slab_size; + struct kmem_cache *freelist_cache; + unsigned int freelist_size; /* constructor func */ void (*ctor)(void *obj); |
