summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/mempolicy.c7
-rw-r--r--mm/mmap.c5
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c47
-rw-r--r--mm/swapfile.c1
-rw-r--r--mm/vmscan.c4
8 files changed, 43 insertions, 31 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 4e9937ac3529..391ffc54d136 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -29,7 +29,7 @@ config FLATMEM_MANUAL
If unsure, choose this option (Flat Memory) over any other.
config DISCONTIGMEM_MANUAL
- bool "Discontigious Memory"
+ bool "Discontiguous Memory"
depends on ARCH_DISCONTIGMEM_ENABLE
help
This option provides enhanced support for discontiguous
@@ -52,7 +52,7 @@ config SPARSEMEM_MANUAL
memory hotplug systems. This is normal.
For many other systems, this will be an alternative to
- "Discontigious Memory". This option provides some potential
+ "Discontiguous Memory". This option provides some potential
performance benefits, along with decreased code complexity,
but it is newer, and more experimental.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index afa06e184d88..9033f0859aa8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -333,8 +333,13 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
if (prev && prev->vm_end < vma->vm_start)
return ERR_PTR(-EFAULT);
if ((flags & MPOL_MF_STRICT) && !is_vm_hugetlb_page(vma)) {
+ unsigned long endvma = vma->vm_end;
+ if (endvma > end)
+ endvma = end;
+ if (vma->vm_start > start)
+ start = vma->vm_start;
err = check_pgd_range(vma->vm_mm,
- vma->vm_start, vma->vm_end, nodes);
+ start, endvma, nodes);
if (err) {
first = ERR_PTR(err);
break;
diff --git a/mm/mmap.c b/mm/mmap.c
index 12334aecf8ad..fa11d91242e8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1640,7 +1640,7 @@ static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
/*
* Get rid of page table information in the indicated region.
*
- * Called with the page table lock held.
+ * Called with the mm semaphore held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -1993,6 +1993,9 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
return -ENOMEM;
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ security_vm_enough_memory(vma_pages(vma)))
+ return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index e9fbd013ad9a..57577f63b305 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -248,7 +248,8 @@ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
- if ((newflags & ~(newflags >> 4)) & 0xf) {
+ /* newflags >> 4 shift VM_MAY% in place of VM_% */
+ if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
error = -EACCES;
goto out;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c5823c395f71..ae2903339e71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -22,6 +22,7 @@
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -117,7 +118,7 @@ static void bad_page(const char *function, struct page *page)
set_page_count(page, 0);
reset_page_mapcount(page);
page->mapping = NULL;
- tainted |= TAINT_BAD_PAGE;
+ add_taint(TAINT_BAD_PAGE);
}
#ifndef CONFIG_HUGETLB_PAGE
diff --git a/mm/slab.c b/mm/slab.c
index 9e876d6dfad9..c9adfce00405 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define SIZE_L3 (1 + MAX_NUMNODES)
/*
- * This function may be completely optimized away if
+ * This function must be completely optimized away if
* a constant is passed to it. Mostly the same as
* what is in linux/slab.h except it returns an
* index.
*/
-static inline int index_of(const size_t size)
+static __always_inline int index_of(const size_t size)
{
if (__builtin_constant_p(size)) {
int i = 0;
@@ -329,7 +329,8 @@ static inline int index_of(const size_t size)
extern void __bad_size(void);
__bad_size();
}
- }
+ } else
+ BUG();
return 0;
}
@@ -639,7 +640,7 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
-static void free_block(kmem_cache_t* cachep, void** objpp, int len);
+static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
static void enable_cpucache (kmem_cache_t *cachep);
static void cache_reap (void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node);
@@ -659,7 +660,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size,
* kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized.
*/
- BUG_ON(csizep->cs_cachep == NULL);
+ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
while (size > csizep->cs_size)
csizep++;
@@ -804,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache
if (ac->avail) {
spin_lock(&rl3->list_lock);
- free_block(cachep, ac->entry, ac->avail);
+ free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
spin_unlock(&rl3->list_lock);
}
@@ -925,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
- free_block(cachep, nc->entry, nc->avail);
+ free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
spin_unlock(&l3->list_lock);
@@ -934,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
if (l3->shared) {
free_block(cachep, l3->shared->entry,
- l3->shared->avail);
+ l3->shared->avail, node);
kfree(l3->shared);
l3->shared = NULL;
}
@@ -1882,12 +1883,13 @@ static void do_drain(void *arg)
{
kmem_cache_t *cachep = (kmem_cache_t*)arg;
struct array_cache *ac;
+ int node = numa_node_id();
check_irq_off();
ac = ac_data(cachep);
- spin_lock(&cachep->nodelists[numa_node_id()]->list_lock);
- free_block(cachep, ac->entry, ac->avail);
- spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock);
+ spin_lock(&cachep->nodelists[node]->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node);
+ spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}
@@ -2608,7 +2610,7 @@ done:
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
+static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
{
int i;
struct kmem_list3 *l3;
@@ -2617,14 +2619,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
void *objp = objpp[i];
struct slab *slabp;
unsigned int objnr;
- int nodeid = 0;
slabp = GET_PAGE_SLAB(virt_to_page(objp));
- nodeid = slabp->nodeid;
- l3 = cachep->nodelists[nodeid];
+ l3 = cachep->nodelists[node];
list_del(&slabp->list);
objnr = (objp - slabp->s_mem) / cachep->objsize;
- check_spinlock_acquired_node(cachep, nodeid);
+ check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
@@ -2664,13 +2664,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
+ int node = numa_node_id();
batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off();
- l3 = cachep->nodelists[numa_node_id()];
+ l3 = cachep->nodelists[node];
spin_lock(&l3->list_lock);
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
@@ -2686,7 +2687,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
}
}
- free_block(cachep, ac->entry, batchcount);
+ free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
{
@@ -2751,7 +2752,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
} else {
spin_lock(&(cachep->nodelists[nodeid])->
list_lock);
- free_block(cachep, &objp, 1);
+ free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->
list_lock);
}
@@ -2844,7 +2845,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
unsigned long save_flags;
void *ptr;
- if (nodeid == numa_node_id() || nodeid == -1)
+ if (nodeid == -1)
return __cache_alloc(cachep, flags);
if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3079,7 +3080,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
if ((nc = cachep->nodelists[node]->shared))
free_block(cachep, nc->entry,
- nc->avail);
+ nc->avail, node);
l3->shared = new;
if (!cachep->nodelists[node]->alien) {
@@ -3160,7 +3161,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
- free_block(cachep, ccold->entry, ccold->avail);
+ free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
kfree(ccold);
}
@@ -3240,7 +3241,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
if (tofree > ac->avail) {
tofree = (ac->avail+1)/2;
}
- free_block(cachep, ac->entry, tofree);
+ free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void*)*ac->avail);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 0184f510aace..1dcaeda039f4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
error = bd_claim(bdev, sys_swapon);
if (error < 0) {
bdev = NULL;
+ error = -EINVAL;
goto bad_swap;
}
p->old_block_size = block_size(bdev);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a740778f688d..0ea71e887bb6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1258,9 +1258,9 @@ void wakeup_kswapd(struct zone *zone, int order)
pgdat->kswapd_max_order = order;
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
return;
- if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
+ if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);
+ wake_up_interruptible(&pgdat->kswapd_wait);
}
#ifdef CONFIG_PM