diff options
author | John W. Linville <linville@tuxdriver.com> | 2006-05-05 16:50:23 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2006-05-05 16:50:23 -0400 |
commit | aad61439e6a00bdb72cb649e11f6e166590c5f66 (patch) | |
tree | 2279f3c2a15f81526d14182c6acb358cafd0b359 /mm | |
parent | 3c304956755fa63ee80ca51ce38078fe1c4e8818 (diff) | |
parent | d98550e334715b2d9e45f8f0f4e1608720108640 (diff) |
Merge branch 'from-linus' into upstream
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 32 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 6 | ||||
-rw-r--r-- | mm/migrate.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 5 | ||||
-rw-r--r-- | mm/sparse.c | 9 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
7 files changed, 59 insertions, 8 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 3ef20739e725..fd57442186cb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -697,6 +697,38 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, return ret; } +/** + * find_get_pages_contig - gang contiguous pagecache lookup + * @mapping: The address_space to search + * @index: The starting page index + * @nr_pages: The maximum number of pages + * @pages: Where the resulting pages are placed + * + * find_get_pages_contig() works exactly like find_get_pages(), except + * that the returned number of pages are guaranteed to be contiguous. + * + * find_get_pages_contig() returns the number of pages which were found. + */ +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, + unsigned int nr_pages, struct page **pages) +{ + unsigned int i; + unsigned int ret; + + read_lock_irq(&mapping->tree_lock); + ret = radix_tree_gang_lookup(&mapping->page_tree, + (void **)pages, index, nr_pages); + for (i = 0; i < ret; i++) { + if (pages[i]->mapping == NULL || pages[i]->index != index) + break; + + page_cache_get(pages[i]); + index++; + } + read_unlock_irq(&mapping->tree_lock); + return i; +} + /* * Like find_get_pages, except we only return pages which are tagged with * `tag'. We update *index to index the next page for the traversal. diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1fe76d963ac2..1ae2b2cc3a54 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -69,12 +69,16 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn, for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) { err = __add_section(zone, phys_start_pfn + i); - if (err) + /* We want to keep adding the rest of the + * sections if the first ones already exist + */ + if (err && (err != -EEXIST)) break; } return err; } +EXPORT_SYMBOL_GPL(__add_pages); static void grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) diff --git a/mm/migrate.c b/mm/migrate.c index d444229f2599..1c25040693d2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -439,6 +439,17 @@ redo: goto unlock_both; } + /* Make sure the dirty bit is up to date */ + if (try_to_unmap(page, 1) == SWAP_FAIL) { + rc = -EPERM; + goto unlock_both; + } + + if (page_mapcount(page)) { + rc = -EAGAIN; + goto unlock_both; + } + /* * Default handling if a filesystem does not provide * a migration function. We can only migrate clean diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 123c60586740..ea77c999047e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1962,7 +1962,7 @@ static inline void free_zone_pagesets(int cpu) } } -static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, +static int pageset_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { diff --git a/mm/slab.c b/mm/slab.c index e6ef9bd52335..c32af7e7581e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -979,7 +979,8 @@ static void __drain_alien_cache(struct kmem_cache *cachep, * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ - transfer_objects(rl3->shared, ac, ac->limit); + if (rl3->shared) + transfer_objects(rl3->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; @@ -1036,7 +1037,7 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, +static int cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; diff --git a/mm/sparse.c b/mm/sparse.c index 0a51f36ba3a1..d7c32de99ee8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -32,7 +32,10 @@ static struct mem_section *sparse_index_alloc(int nid) unsigned long array_size = SECTIONS_PER_ROOT * sizeof(struct mem_section); - section = alloc_bootmem_node(NODE_DATA(nid), array_size); + if (system_state == SYSTEM_RUNNING) + section = kmalloc_node(array_size, GFP_KERNEL, nid); + else + section = alloc_bootmem_node(NODE_DATA(nid), array_size); if (section) memset(section, 0, array_size); @@ -281,9 +284,9 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, ret = sparse_init_one_section(ms, section_nr, memmap); - if (ret <= 0) - __kfree_section_memmap(memmap, nr_pages); out: pgdat_resize_unlock(pgdat, &flags); + if (ret <= 0) + __kfree_section_memmap(memmap, nr_pages); return ret; } diff --git a/mm/vmscan.c b/mm/vmscan.c index acdf001d6941..4649a63a8cb6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1328,7 +1328,7 @@ repeat: not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, +static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { pg_data_t *pgdat; |