diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-10-28 19:22:34 +1100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-07 10:05:59 +0100 |
commit | 9b46333406b9cb3397ab538485a4d57c316af0ff (patch) | |
tree | 02c347d86d128b21b3138cb56c7c544bc4e1b930 | |
parent | 4bab0ea1d42dd1927af9df6fbf0003fc00617c50 (diff) |
vmap: cope with vm_unmap_aliases before vmalloc_init()
Xen can end up calling vm_unmap_aliases() before vmalloc_init() has
been called. In this case its safe to make it a simple no-op.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Linux Memory Management List <linux-mm@kvack.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | mm/vmalloc.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 66fad3fc02b1..ba6b0f5f7fac 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -592,6 +592,8 @@ static void free_unmap_vmap_area_addr(unsigned long addr) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) +static bool vmap_initialized __read_mostly = false; + struct vmap_block_queue { spinlock_t lock; struct list_head free; @@ -828,6 +830,9 @@ void vm_unmap_aliases(void) int cpu; int flush = 0; + if (unlikely(!vmap_initialized)) + return; + for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; @@ -942,6 +947,8 @@ void __init vmalloc_init(void) INIT_LIST_HEAD(&vbq->dirty); vbq->nr_dirty = 0; } + + vmap_initialized = true; } void unmap_kernel_range(unsigned long addr, unsigned long size) |