diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 10:08:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 10:08:33 -0700 |
commit | d3d07d941fd80c173b6d690ded00ee5fb8302e06 (patch) | |
tree | f1a82c956e393df9933c8544bb564ef1735384ee /arch/sh/mm/mmap.c | |
parent | 6cd8e300b49332eb9eeda45816c711c198d31505 (diff) | |
parent | 54ff328b46e58568c4b3350c2fa3223ef862e5a4 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (266 commits)
sh: Tie sparseirq in to Kconfig.
sh: Wire up sys_rt_tgsigqueueinfo.
sh: Fix sys_pwritev() syscall table entry for sh32.
sh: Fix sh4a llsc-based cmpxchg()
sh: sh7724: Add JPU support
sh: sh7724: INTC setting update
sh: sh7722 clock framework rewrite
sh: sh7366 clock framework rewrite
sh: sh7343 clock framework rewrite
sh: sh7724 clock framework rewrite V3
sh: sh7723 clock framework rewrite V2
sh: add enable()/disable()/set_rate() to div6 code
sh: add AP325RXA mode pin configuration
sh: add Migo-R mode pin configuration
sh: sh7722 mode pin definitions
sh: sh7724 mode pin comments
sh: sh7723 mode pin V2
sh: rework mode pin code
sh: clock div6 helper code
sh: clock div4 frequency table offset fix
...
Diffstat (limited to 'arch/sh/mm/mmap.c')
-rw-r--r-- | arch/sh/mm/mmap.c | 136 |
1 files changed, 132 insertions, 4 deletions
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 931f4d003fa0..1b5fdfb4e0c2 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -1,7 +1,7 @@ /* * arch/sh/mm/mmap.c * - * Copyright (C) 2008 Paul Mundt + * Copyright (C) 2008 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask); /* * To avoid cache aliases, we map the shared page with same color. */ -#define COLOUR_ALIGN(addr, pgoff) \ - ((((addr) + shm_align_mask) & ~shm_align_mask) + \ - (((pgoff) << PAGE_SHIFT) & shm_align_mask)) +static inline unsigned long COLOUR_ALIGN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = (addr + shm_align_mask) & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + return base + off; +} + +static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, + unsigned long pgoff) +{ + unsigned long base = addr & ~shm_align_mask; + unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask; + + if (base + off <= addr) + return base + off; + + return base - off; +} unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -103,6 +120,117 @@ full_search: addr = COLOUR_ALIGN(addr, pgoff); } } + +unsigned long +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + const unsigned long len, const unsigned long pgoff, + const unsigned long flags) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; + int do_colour_align; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && + ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) + return -EINVAL; + return addr; + } + + if (unlikely(len > TASK_SIZE)) + return -ENOMEM; + + do_colour_align = 0; + if (filp || (flags & MAP_SHARED)) + do_colour_align = 1; + + /* requesting a specific address */ + if (addr) { + if (do_colour_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + /* check if free_area_cache is useful for us */ + if (len <= mm->cached_hole_size) { + mm->cached_hole_size = 0; + mm->free_area_cache = mm->mmap_base; + } + + /* either no address requested or can't fit in requested address hole */ + addr = mm->free_area_cache; + if (do_colour_align) { + unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); + + addr = base + len; + } + + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + } + + if (unlikely(mm->mmap_base < len)) + goto bottomup; + + addr = mm->mmap_base-len; + if (do_colour_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + + do { + /* + * Lookup failure means no vma is above this address, + * else if new region fits below vma->vm_start, + * return with success: + */ + vma = find_vma(mm, addr); + if (likely(!vma || addr+len <= vma->vm_start)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } + + /* remember the largest hole we saw so far */ + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; + if (do_colour_align) + addr = COLOUR_ALIGN_DOWN(addr, pgoff); + } while (likely(len < vma->vm_start)); + +bottomup: + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + mm->cached_hole_size = ~0UL; + mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ + mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + + return addr; +} #endif /* CONFIG_MMU */ /* |