diff options
author | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-07-29 21:09:39 -0300 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2012-07-29 21:09:39 -0300 |
commit | 73bcc49959e4e40911dd0dd634bf1b353827df66 (patch) | |
tree | 6b0c1d440c490a65c51ab5cf5aee7095cb4089d3 /arch/arm/mm | |
parent | 8447c4d15e357a458c9051ddc84aa6c8b9c27000 (diff) | |
parent | 28a33cbc24e4256c143dce96c7d93bf423229f92 (diff) |
Merge tag 'v3.5'
Linux 3.5
* tag 'v3.5': (1242 commits)
Linux 3.5
Remove SYSTEM_SUSPEND_DISK system state
kdb: Switch to nolock variants of kmsg_dump functions
printk: Implement some unlocked kmsg_dump functions
printk: Remove kdb_syslog_data
kdb: Revive dmesg command
dm raid1: set discard_zeroes_data_unsupported
dm thin: do not send discards to shared blocks
dm raid1: fix crash with mirror recovery and discard
pnfs-obj: Fix __r4w_get_page when offset is beyond i_size
pnfs-obj: don't leak objio_state if ore_write/read fails
ore: Unlock r4w pages in exact reverse order of locking
ore: Remove support of partial IO request (NFS crash)
ore: Fix NFS crash by supporting any unaligned RAID IO
UBIFS: fix a bug in empty space fix-up
cx25821: Remove bad strcpy to read-only char*
HID: hid-multitouch: add support for Zytronic panels
MIPS: PCI: Move fixups from __init to __devinit.
MIPS: Fix bug.h MIPS build regression
MIPS: sync-r4k: remove redundant irq operation
...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 10 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 74 |
4 files changed, 82 insertions, 8 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 106c4c0ebccd..655878bcc96d 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -228,7 +228,7 @@ static pte_t **consistent_pte; #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M -unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; +static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; void __init init_consistent_dma_size(unsigned long size) { @@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = { .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), }; -size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; static int __init early_coherent_pool(char *p) { @@ -1067,7 +1067,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t return NULL; while (count) { - int j, order = __ffs(count); + int j, order = __fls(count); pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); while (!pages[i] && order) @@ -1091,7 +1091,7 @@ error: while (--i) if (pages[i]) __free_pages(pages[i], 0); - if (array_size < PAGE_SIZE) + if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); @@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s for (i = 0; i < count; i++) if (pages[i]) __free_pages(pages[i], 0); - if (array_size < PAGE_SIZE) + if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c21d06c7dd7e..f54d59219764 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); * allocations. This must be the smallest DMA mask in the system, * so a successful GFP_DMA allocation will always satisfy this. */ -u32 arm_dma_limit; +phys_addr_t arm_dma_limit; static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, unsigned long dma_size) diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 93dc0c17cdcb..2e8a1efdf7b8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -62,9 +62,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #endif #ifdef CONFIG_ZONE_DMA -extern u32 arm_dma_limit; +extern phys_addr_t arm_dma_limit; #else -#define arm_dma_limit ((u32)~0) +#define arm_dma_limit ((phys_addr_t)~0) #endif extern phys_addr_t arm_lowmem_limit; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e5dad60b558b..cf4528d51774 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr) } } +#ifndef CONFIG_ARM_LPAE + +/* + * The Linux PMD is made of two consecutive section entries covering 2MB + * (see definition in include/asm/pgtable-2level.h). However a call to + * create_mapping() may optimize static mappings by using individual + * 1MB section mappings. This leaves the actual PMD potentially half + * initialized if the top or bottom section entry isn't used, leaving it + * open to problems if a subsequent ioremap() or vmalloc() tries to use + * the virtual space left free by that unused section entry. + * + * Let's avoid the issue by inserting dummy vm entries covering the unused + * PMD halves once the static mappings are in place. + */ + +static void __init pmd_empty_section_gap(unsigned long addr) +{ + struct vm_struct *vm; + + vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); + vm->addr = (void *)addr; + vm->size = SECTION_SIZE; + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->caller = pmd_empty_section_gap; + vm_area_add_early(vm); +} + +static void __init fill_pmd_gaps(void) +{ + struct vm_struct *vm; + unsigned long addr, next = 0; + pmd_t *pmd; + + /* we're still single threaded hence no lock needed here */ + for (vm = vmlist; vm; vm = vm->next) { + if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + continue; + addr = (unsigned long)vm->addr; + if (addr < next) + continue; + + /* + * Check if this vm starts on an odd section boundary. + * If so and the first section entry for this PMD is free + * then we block the corresponding virtual address. + */ + if ((addr & ~PMD_MASK) == SECTION_SIZE) { + pmd = pmd_off_k(addr); + if (pmd_none(*pmd)) + pmd_empty_section_gap(addr & PMD_MASK); + } + + /* + * Then check if this vm ends on an odd section boundary. + * If so and the second section entry for this PMD is empty + * then we block the corresponding virtual address. + */ + addr += vm->size; + if ((addr & ~PMD_MASK) == SECTION_SIZE) { + pmd = pmd_off_k(addr) + 1; + if (pmd_none(*pmd)) + pmd_empty_section_gap(addr); + } + + /* no need to look at any vm entry until we hit the next PMD */ + next = (addr + PMD_SIZE - 1) & PMD_MASK; + } +} + +#else +#define fill_pmd_gaps() do { } while (0) +#endif + static void * __initdata vmalloc_min = (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); @@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) */ if (mdesc->map_io) mdesc->map_io(); + fill_pmd_gaps(); /* * Finally flush the caches and tlb to ensure that we're in a |