diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2011-03-10 15:22:00 +0800 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2011-03-11 18:06:34 +0200 |
commit | da9a638c6f8fc0633fa94a334f1c053f5e307177 (patch) | |
tree | 786966087ccd4fd2c97757ce7b722f728c17ca2b /mm/slub.c | |
parent | ab9a0f196f2f4f080df54402493ea3dc31b5243e (diff) |
slub,rcu: don't assume the size of struct rcu_head
The size of struct rcu_head may be changed. When it becomes larger,
it will pollute the page array.
We reserve some some bytes for struct rcu_head when a slab
is allocated in this situation.
Changed from V1:
use VM_BUG_ON instead BUG_ON
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 30 |
1 files changed, 25 insertions, 5 deletions
diff --git a/mm/slub.c b/mm/slub.c index d3d17677bab5..ebba3eb19369 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1254,21 +1254,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_pages(page, order); } +#define need_reserve_slab_rcu \ + (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) + static void rcu_free_slab(struct rcu_head *h) { struct page *page; - page = container_of((struct list_head *)h, struct page, lru); + if (need_reserve_slab_rcu) + page = virt_to_head_page(h); + else + page = container_of((struct list_head *)h, struct page, lru); + __free_slab(page->slab, page); } static void free_slab(struct kmem_cache *s, struct page *page) { if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { - /* - * RCU free overloads the RCU head over the LRU - */ - struct rcu_head *head = (void *)&page->lru; + struct rcu_head *head; + + if (need_reserve_slab_rcu) { + int order = compound_order(page); + int offset = (PAGE_SIZE << order) - s->reserved; + + VM_BUG_ON(s->reserved != sizeof(*head)); + head = page_address(page) + offset; + } else { + /* + * RCU free overloads the RCU head over the LRU + */ + head = (void *)&page->lru; + } call_rcu(head, rcu_free_slab); } else @@ -2356,6 +2373,9 @@ static int kmem_cache_open(struct kmem_cache *s, s->flags = kmem_cache_flags(size, flags, name, ctor); s->reserved = 0; + if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) + s->reserved = sizeof(struct rcu_head); + if (!calculate_sizes(s, -1)) goto error; if (disable_higher_order_debug) { |