diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-04-13 23:32:13 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-04-13 23:32:13 +0100 |
commit | a84bd2ee81ea1bdbd238cd1c380ec25f50a876c5 (patch) | |
tree | ba054d4b1a20dadec088fd4d8983fb5939227e46 /mm/memcontrol.c | |
parent | aec995900fbc8cffa9f0f9e797ef07a0beb2b079 (diff) | |
parent | 7db6a7fa09884b34d2a5d4e6e4ed58664a5f0cf8 (diff) |
Merge branch 'fix' of git://git.kernel.org/pub/scm/linux/kernel/git/ycmiao/pxa-linux-2.6 into fixes
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1f0b460fe58c..010f9166fa6e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1466,7 +1466,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, break; } /* - * We want to do more targetted reclaim. + * We want to do more targeted reclaim. * excess >> 2 is not to excessive so as to * reclaim too much, nor too less that we keep * coming back to reclaim from this cgroup @@ -2265,7 +2265,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) * - compound_lock is held when nr_pages > 1 * * This function doesn't do "charge" nor css_get to new cgroup. It should be - * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is + * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is * true, this function does "uncharge" from old cgroup, but it doesn't if * @uncharge is false, so a caller should do "uncharge". */ @@ -2318,7 +2318,7 @@ static int mem_cgroup_move_account(struct page *page, * We charges against "to" which may not have any tasks. Then, "to" * can be under rmdir(). But in current implementation, caller of * this function is just force_empty() and move charge, so it's - * garanteed that "to" is never removed. So, we don't check rmdir + * guaranteed that "to" is never removed. So, we don't check rmdir * status here. */ move_unlock_page_cgroup(pc, &flags); @@ -2648,7 +2648,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *mem, batch->memcg = mem; /* * do_batch > 0 when unmapping pages or inode invalidate/truncate. - * In those cases, all pages freed continously can be expected to be in + * In those cases, all pages freed continuously can be expected to be in * the same cgroup and we have chance to coalesce uncharges. * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE) * because we want to do uncharge as soon as possible. |