diff options
Diffstat (limited to 'arch/arm/mm')
33 files changed, 278 insertions, 217 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9b4f29e595a4..b4f92b9a13ac 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -738,7 +738,7 @@ config CPU_ICACHE_DISABLE config CPU_DCACHE_DISABLE bool "Disable D-Cache (C-bit)" - depends on CPU_CP15 + depends on CPU_CP15 && !SMP help Say Y here to disable the processor data cache. Unless you have a reason not to or are unsure, say N. @@ -825,6 +825,20 @@ config KUSER_HELPERS Say N here only if you are absolutely certain that you do not need these helpers; otherwise, the safe option is to say Y. +config VDSO + bool "Enable VDSO for acceleration of some system calls" + depends on AEABI && MMU && CPU_V7 + default y if ARM_ARCH_TIMER + select GENERIC_TIME_VSYSCALL + help + Place in the process address space an ELF shared object + providing fast implementations of gettimeofday and + clock_gettime. Systems that implement the ARM architected + timer will receive maximum benefit. + + You must have glibc 2.22 or later for programs to seamlessly + take advantage of this. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 2c0c541c60ca..9769f1eefe3b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -201,7 +201,7 @@ union offset_union { THUMB( "1: "ins" %1, [%2]\n" ) \ THUMB( " add %2, %2, #1\n" ) \ "2:\n" \ - " .pushsection .fixup,\"ax\"\n" \ + " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ @@ -261,7 +261,7 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "2: "ins" %1, [%2]\n" \ "3:\n" \ - " .pushsection .fixup,\"ax\"\n" \ + " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, #1\n" \ " b 3b\n" \ @@ -301,7 +301,7 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "4: "ins" %1, [%2]\n" \ "5:\n" \ - " .pushsection .fixup,\"ax\"\n" \ + " .pushsection .text.fixup,\"ax\"\n" \ " .align 2\n" \ "6: mov %0, #1\n" \ " b 5b\n" \ diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 8f15f70622a6..e309c8f35af5 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1647,6 +1647,7 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) struct device_node *np; struct resource res; u32 cache_id, old_aux; + u32 cache_level = 2; np = of_find_matching_node(NULL, l2x0_ids); if (!np) @@ -1679,6 +1680,12 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) if (!of_property_read_bool(np, "cache-unified")) pr_err("L2C: device tree omits to specify unified cache\n"); + if (of_property_read_u32(np, "cache-level", &cache_level)) + pr_err("L2C: device tree omits to specify cache-level\n"); + + if (cache_level != 2) + pr_err("L2C: device tree specifies invalid cache level\n"); + /* Read back current (default) hardware configuration */ if (data->save) data->save(l2x0_base); diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index b966656d2c2d..a134d8a13d00 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -36,10 +36,10 @@ ENTRY(v7_invalidate_l1) mcr p15, 2, r0, c0, c0, 0 mrc p15, 1, r0, c0, c0, 0 - ldr r1, =0x7fff + movw r1, #0x7fff and r2, r1, r0, lsr #13 - ldr r1, =0x3ff + movw r1, #0x3ff and r3, r1, r0, lsr #3 @ NumWays - 1 add r2, r2, #1 @ NumSets @@ -90,21 +90,20 @@ ENDPROC(v7_flush_icache_all) ENTRY(v7_flush_dcache_louis) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr - ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr - ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr +ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position +ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position + ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr + bne start_flush_levels @ LoU != 0, start flushing #ifdef CONFIG_ARM_ERRATA_643719 - ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register - ALT_UP(reteq lr) @ LoUU is zero, so nothing to do - ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? - biceq r2, r2, #0x0000000f @ clear minor revision number - teqeq r2, r1 @ test for errata affected core and if so... - orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') +ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register +ALT_UP( ret lr) @ LoUU is zero, so nothing to do + movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p? + movt r1, #:upper16:(0x410fc090 >> 4) + teq r1, r2, lsr #4 @ test for errata affected core and if so... + moveq r3, #1 << 1 @ fix LoUIS value + beq start_flush_levels @ start flushing cache levels #endif - ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 - ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 - reteq lr @ return if level == 0 - mov r10, #0 @ r10 (starting level) = 0 - b flush_levels @ start flushing cache levels + ret lr ENDPROC(v7_flush_dcache_louis) /* @@ -119,9 +118,10 @@ ENDPROC(v7_flush_dcache_louis) ENTRY(v7_flush_dcache_all) dmb @ ensure ordering with previous memory accesses mrc p15, 1, r0, c0, c0, 1 @ read clidr - ands r3, r0, #0x7000000 @ extract loc from clidr - mov r3, r3, lsr #23 @ left align loc bit field + mov r3, r0, lsr #23 @ move LoC into position + ands r3, r3, #7 << 1 @ extract LoC*2 from clidr beq finished @ if loc is 0, then no need to clean +start_flush_levels: mov r10, #0 @ start clean at cache level 0 flush_levels: add r2, r10, r10, lsr #1 @ work out 3x current cache level @@ -140,10 +140,10 @@ flush_levels: #endif and r2, r1, #7 @ extract the length of the cache lines add r2, r2, #4 @ add 4 (line length offset) - ldr r4, =0x3ff + movw r4, #0x3ff ands r4, r4, r1, lsr #3 @ find maximum number on the way size clz r5, r4 @ find bit position of way size increment - ldr r7, =0x7fff + movw r7, #0x7fff ands r7, r7, r1, lsr #13 @ extract max number of the index size loop1: mov r9, r7 @ create working copy of max index diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c27447653903..09c5fe3d30c2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -289,11 +289,11 @@ static void __dma_free_buffer(struct page *page, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller); + const void *caller, bool want_vaddr); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, - const void *caller); + const void *caller, bool want_vaddr); static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, @@ -357,10 +357,10 @@ static int __init atomic_pool_init(void) if (dev_get_cma_area(NULL)) ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, - &page, atomic_pool_init); + &page, atomic_pool_init, true); else ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, - &page, atomic_pool_init); + &page, atomic_pool_init, true); if (ptr) { int ret; @@ -467,13 +467,15 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot) static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, - const void *caller) + const void *caller, bool want_vaddr) { struct page *page; - void *ptr; + void *ptr = NULL; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; + if (!want_vaddr) + goto out; ptr = __dma_alloc_remap(page, size, gfp, prot, caller); if (!ptr) { @@ -481,6 +483,7 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, return NULL; } + out: *ret_page = page; return ptr; } @@ -523,12 +526,12 @@ static int __free_from_pool(void *start, size_t size) static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, - const void *caller) + const void *caller, bool want_vaddr) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; struct page *page; - void *ptr; + void *ptr = NULL; page = dma_alloc_from_contiguous(dev, count, order); if (!page) @@ -536,6 +539,9 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, __dma_clear_buffer(page, size); + if (!want_vaddr) + goto out; + if (PageHighMem(page)) { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); if (!ptr) { @@ -546,17 +552,21 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size, __dma_remap(page, size, prot); ptr = page_address(page); } + + out: *ret_page = page; return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, - void *cpu_addr, size_t size) + void *cpu_addr, size_t size, bool want_vaddr) { - if (PageHighMem(page)) - __dma_free_remap(cpu_addr, size); - else - __dma_remap(page, size, PAGE_KERNEL); + if (want_vaddr) { + if (PageHighMem(page)) + __dma_free_remap(cpu_addr, size); + else + __dma_remap(page, size, PAGE_KERNEL); + } dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } @@ -574,12 +584,12 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define nommu() 1 -#define __get_dma_pgprot(attrs, prot) __pgprot(0) -#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL +#define __get_dma_pgprot(attrs, prot) __pgprot(0) +#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL #define __alloc_from_pool(size, ret_page) NULL -#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL +#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL #define __free_from_pool(cpu_addr, size) 0 -#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) +#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0) #endif /* CONFIG_MMU */ @@ -599,11 +609,13 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) + gfp_t gfp, pgprot_t prot, bool is_coherent, + struct dma_attrs *attrs, const void *caller) { u64 mask = get_coherent_dma_mask(dev); struct page *page = NULL; void *addr; + bool want_vaddr; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; @@ -631,20 +643,21 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); else if (!dev_get_cma_area(dev)) - addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); + addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); else - addr = __alloc_from_contiguous(dev, size, prot, &page, caller); + addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); - if (addr) + if (page) *handle = pfn_to_dma(dev, page_to_pfn(page)); - return addr; + return want_vaddr ? addr : page; } /* @@ -661,7 +674,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, - __builtin_return_address(0)); + attrs, __builtin_return_address(0)); } static void *arm_coherent_dma_alloc(struct device *dev, size_t size, @@ -674,7 +687,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size, return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, - __builtin_return_address(0)); + attrs, __builtin_return_address(0)); } /* @@ -715,6 +728,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, bool is_coherent) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; @@ -726,14 +740,15 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, } else if (__free_from_pool(cpu_addr, size)) { return; } else if (!dev_get_cma_area(dev)) { - __dma_free_remap(cpu_addr, size); + if (want_vaddr) + __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { /* * Non-atomic allocations cannot be freed with IRQs disabled */ WARN_ON(irqs_disabled()); - __free_from_contiguous(dev, page, cpu_addr, size); + __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); } } @@ -1135,13 +1150,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp |= __GFP_NOWARN | __GFP_HIGHMEM; while (count) { - int j, order = __fls(count); + int j, order; + + for (order = __fls(count); order > 0; --order) { + /* + * We do not want OOM killer to be invoked as long + * as we can fall back to single pages, so we force + * __GFP_NORETRY for orders higher than zero. + */ + pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); + if (pages[i]) + break; + } - pages[i] = alloc_pages(gfp, order); - while (!pages[i] && order) - pages[i] = alloc_pages(gfp, --order); - if (!pages[i]) - goto error; + if (!pages[i]) { + /* + * Fall back to single page allocation. + * Might invoke OOM killer as last resort. + */ + pages[i] = alloc_pages(gfp, 0); + if (!pages[i]) + goto error; + } if (order) { split_page(pages[i], order); @@ -1206,7 +1236,7 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i, ret = DMA_ERROR_CODE; @@ -1242,7 +1272,7 @@ fail: static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); /* * add optional in-page offset from iova to size and align @@ -1457,7 +1487,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova, iova_base; int ret = 0; unsigned int count; @@ -1678,7 +1708,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t dma_addr; int ret, prot, len = PAGE_ALIGN(size + offset); @@ -1731,7 +1761,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); @@ -1756,7 +1786,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); int offset = handle & ~PAGE_MASK; @@ -1775,7 +1805,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; @@ -1789,7 +1819,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; @@ -1950,7 +1980,7 @@ static int __arm_iommu_attach_device(struct device *dev, return err; kref_get(&mapping->kref); - dev->archdata.mapping = mapping; + to_dma_iommu_mapping(dev) = mapping; pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; @@ -1995,7 +2025,7 @@ static void __arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); - dev->archdata.mapping = NULL; + to_dma_iommu_mapping(dev) = NULL; pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } @@ -2027,6 +2057,13 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, if (!iommu) return false; + /* + * currently arm_iommu_create_mapping() takes a max of size_t + * for size param. So check this limit for now. + */ + if (size > SIZE_MAX) + return false; + mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", @@ -2046,7 +2083,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_teardown_iommu_dma_ops(struct device *dev) { - struct dma_iommu_mapping *mapping = dev->archdata.mapping; + struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); if (!mapping) return; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1609b022a72f..be92fa0f2f35 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -86,55 +86,6 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); -/* - * This keeps memory configuration data used by a couple memory - * initialization functions, as well as show_mem() for the skipping - * of holes in the memory map. It is populated by arm_add_memory(). - */ -void show_mem(unsigned int filter) -{ - int free = 0, total = 0, reserved = 0; - int shared = 0, cached = 0, slab = 0; - struct memblock_region *reg; - - printk("Mem-info:\n"); - show_free_areas(filter); - - for_each_memblock (memory, reg) { - unsigned int pfn1, pfn2; - struct page *page, *end; - - pfn1 = memblock_region_memory_base_pfn(reg); - pfn2 = memblock_region_memory_end_pfn(reg); - - page = pfn_to_page(pfn1); - end = pfn_to_page(pfn2 - 1) + 1; - - do { - total++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (PageSlab(page)) - slab++; - else if (!page_count(page)) - free++; - else - shared += page_count(page) - 1; - pfn1++; - page = pfn_to_page(pfn1); - } while (pfn1 < pfn2); - } - - printk("%d pages of RAM\n", total); - printk("%d free pages\n", free); - printk("%d reserved pages\n", reserved); - printk("%d slab pages\n", slab); - printk("%d pages shared\n", shared); - printk("%d pages swap cached\n", cached); -} - static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) { @@ -335,6 +286,9 @@ void __init bootmem_init(void) find_limits(&min, &max_low, &max_high); + early_memtest((phys_addr_t)min << PAGE_SHIFT, + (phys_addr_t)max_low << PAGE_SHIFT); + /* * Sparsemem tries to allocate bootmem in memory_present(), * so must be done after the fixed reservations diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 5e85ed371364..407dc786583a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -169,14 +169,22 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + /* 8 bits of randomness in 20 address space bits */ + rnd = (unsigned long)get_random_int() % (1 << 8); + + return rnd << PAGE_SHIFT; +} + void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; - /* 8 bits of randomness in 20 address space bits */ - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) - random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT; + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 86ee5d47ce3c..aa0519eed698 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -507,7 +507,7 @@ cpu_arm1020_name: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm1020_proc_info,#object __arm1020_proc_info: @@ -519,7 +519,7 @@ __arm1020_proc_info: .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm1020_setup + initfn __arm1020_setup, __arm1020_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index a6331d78601f..bff4c7f70fd6 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -465,7 +465,7 @@ arm1020e_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm1020e_proc_info,#object __arm1020e_proc_info: @@ -479,7 +479,7 @@ __arm1020e_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm1020e_setup + initfn __arm1020e_setup, __arm1020e_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index a126b7a59928..dbb2413fe04d 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -448,7 +448,7 @@ arm1022_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm1022_proc_info,#object __arm1022_proc_info: @@ -462,7 +462,7 @@ __arm1022_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm1022_setup + initfn __arm1022_setup, __arm1022_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index fc294067e977..0b37b2cef9d3 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -442,7 +442,7 @@ arm1026_crval: string cpu_arm1026_name, "ARM1026EJ-S" .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm1026_proc_info,#object __arm1026_proc_info: @@ -456,7 +456,7 @@ __arm1026_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm1026_setup + initfn __arm1026_setup, __arm1026_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 2baa66b3ac9b..3651cd70e418 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -186,7 +186,7 @@ arm720_crval: * See <asm/procinfo.h> for a definition of this structure. */ - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req .type __\name\()_proc_info,#object @@ -203,7 +203,7 @@ __\name\()_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b \cpu_flush @ cpu_flush + initfn \cpu_flush, __\name\()_proc_info @ cpu_flush .long cpu_arch_name @ arch_name .long cpu_elf_name @ elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index ac1ea6b3bce4..024fb7732407 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -132,14 +132,14 @@ __arm740_setup: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm740_proc_info,#object __arm740_proc_info: .long 0x41807400 .long 0xfffffff0 .long 0 .long 0 - b __arm740_setup + initfn __arm740_setup, __arm740_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index bf6ba4bc30ff..25472d94426d 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -76,7 +76,7 @@ __arm7tdmi_setup: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ extra_hwcaps=0 @@ -86,7 +86,7 @@ __\name\()_proc_info: .long \cpu_mask .long 0 .long 0 - b __arm7tdmi_setup + initfn __arm7tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 22bf8dde4f84..7a14bd4414c9 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -448,7 +448,7 @@ arm920_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm920_proc_info,#object __arm920_proc_info: @@ -464,7 +464,7 @@ __arm920_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm920_setup + initfn __arm920_setup, __arm920_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 0c6d5ac5a6d4..edccfcdcd551 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -426,7 +426,7 @@ arm922_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm922_proc_info,#object __arm922_proc_info: @@ -442,7 +442,7 @@ __arm922_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm922_setup + initfn __arm922_setup, __arm922_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index c32d073282ea..ede8c54ab4aa 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -494,7 +494,7 @@ arm925_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .type __\name\()_proc_info,#object @@ -510,7 +510,7 @@ __\name\()_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm925_setup + initfn __arm925_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 252b2503038d..fb827c633693 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -474,7 +474,7 @@ arm926_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm926_proc_info,#object __arm926_proc_info: @@ -490,7 +490,7 @@ __arm926_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __arm926_setup + initfn __arm926_setup, __arm926_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index e5212d489377..ee5b66f847c4 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -297,26 +297,16 @@ __arm940_setup: mcr p15, 0, r0, c6, c0, 1 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM - mcr p15, 0, r0, c6, c1, 1 + ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM + mcr p15, 0, r3, c6, c1, 1 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH - mcr p15, 0, r0, c6, c2, 1 + ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) + pr_val r3, r0, r6, #1 + mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH + mcr p15, 0, r3, c6, c2, 1 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable @@ -354,14 +344,14 @@ __arm940_setup: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm940_proc_info,#object __arm940_proc_info: .long 0x41009400 .long 0xff00fff0 .long 0 - b __arm940_setup + initfn __arm940_setup, __arm940_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index b3dd9b2d0b8e..7361837edc31 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -343,24 +343,14 @@ __arm946_setup: mcr p15, 0, r0, c6, c0, 0 @ set region 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the region register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM + ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c1, 0 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the region register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH + ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c2, 0 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable @@ -409,14 +399,14 @@ __arm946_setup: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __arm946_proc_info,#object __arm946_proc_info: .long 0x41009460 .long 0xff00fff0 .long 0 .long 0 - b __arm946_setup + initfn __arm946_setup, __arm946_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 8227322bbb8f..7fac8c612134 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -70,7 +70,7 @@ __arm9tdmi_setup: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info, #object @@ -79,7 +79,7 @@ __\name\()_proc_info: .long \cpu_mask .long 0 .long 0 - b __arm9tdmi_setup + initfn __arm9tdmi_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index c494886892ba..4001b73af4ee 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -190,7 +190,7 @@ fa526_cr1_set: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __fa526_proc_info,#object __fa526_proc_info: @@ -206,7 +206,7 @@ __fa526_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __fa526_setup + initfn __fa526_setup, __fa526_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 03a1b75f2e16..e494d6d6acbe 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -584,7 +584,7 @@ feroceon_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req .type __\name\()_proc_info,#object @@ -601,7 +601,8 @@ __\name\()_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __feroceon_setup + initfn __feroceon_setup, __\name\()_proc_info + .long __feroceon_setup .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 082b9f2f7e90..c671f345266a 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -331,3 +331,31 @@ ENTRY(\name\()_tlb_fns) .globl \x .equ \x, \y .endm + +.macro initfn, func, base + .long \func - \base +.endm + + /* + * Macro to calculate the log2 size for the protection region + * registers. This calculates rd = log2(size) - 1. tmp must + * not be the same register as rd. + */ +.macro pr_sz, rd, size, tmp + mov \tmp, \size, lsr #12 + mov \rd, #11 +1: movs \tmp, \tmp, lsr #1 + addne \rd, \rd, #1 + bne 1b +.endm + + /* + * Macro to generate a protection region register value + * given a pre-masked address, size, and enable bit. + * Corrupts size. + */ +.macro pr_val, dest, addr, size, enable + pr_sz \dest, \size, \size @ calculate log2(size) - 1 + orr \dest, \addr, \dest, lsl #1 @ mask in the region size + orr \dest, \dest, \enable +.endm diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 53d393455f13..d65edf717bf7 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -427,7 +427,7 @@ mohawk_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __88sv331x_proc_info,#object __88sv331x_proc_info: @@ -443,7 +443,7 @@ __88sv331x_proc_info: PMD_BIT4 | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __mohawk_setup + initfn __mohawk_setup, __88sv331x_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 8008a0461cf5..ee2ce496239f 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -199,7 +199,7 @@ sa110_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .type __sa110_proc_info,#object __sa110_proc_info: @@ -213,7 +213,7 @@ __sa110_proc_info: .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __sa110_setup + initfn __sa110_setup, __sa110_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 89f97ac648a9..222d5836f666 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -242,7 +242,7 @@ sa1100_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req .type __\name\()_proc_info,#object @@ -257,7 +257,7 @@ __\name\()_proc_info: .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __sa1100_setup + initfn __sa1100_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index d0390f4b3f18..06d890a2342b 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -264,7 +264,7 @@ v6_crval: string cpu_elf_name, "v6" .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc /* * Match any ARMv6 processor core. @@ -287,7 +287,7 @@ __v6_proc_info: PMD_SECT_XN | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __v6_setup + initfn __v6_setup, __v6_proc_info .long cpu_arch_name .long cpu_elf_name /* See also feat_v6_fixup() for HWCAP_TLS */ diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index ed448d8a596b..10405b8d31af 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -37,15 +37,18 @@ * It is assumed that: * - we are not using split page tables */ -ENTRY(cpu_v7_switch_mm) +ENTRY(cpu_ca8_switch_mm) #ifdef CONFIG_MMU mov r2, #0 - mmid r1, r1 @ get mm->context.id - ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) - ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_ARM_ERRATA_430973 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB #endif +#endif +ENTRY(cpu_v7_switch_mm) +#ifdef CONFIG_MMU + mmid r1, r1 @ get mm->context.id + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) #ifdef CONFIG_PID_IN_CONTEXTIDR mrc p15, 0, r2, c13, c0, 1 @ read current context ID lsr r2, r2, #8 @ extract the PID @@ -61,6 +64,7 @@ ENTRY(cpu_v7_switch_mm) #endif bx lr ENDPROC(cpu_v7_switch_mm) +ENDPROC(cpu_ca8_switch_mm) /* * cpu_v7_set_pte_ext(ptep, pte) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 8b4ee5e81c14..3d1054f11a8a 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -153,6 +153,21 @@ ENDPROC(cpu_v7_do_resume) #endif /* + * Cortex-A8 + */ + globl_equ cpu_ca8_proc_init, cpu_v7_proc_init + globl_equ cpu_ca8_proc_fin, cpu_v7_proc_fin + globl_equ cpu_ca8_reset, cpu_v7_reset + globl_equ cpu_ca8_do_idle, cpu_v7_do_idle + globl_equ cpu_ca8_dcache_clean_area, cpu_v7_dcache_clean_area + globl_equ cpu_ca8_set_pte_ext, cpu_v7_set_pte_ext + globl_equ cpu_ca8_suspend_size, cpu_v7_suspend_size +#ifdef CONFIG_ARM_CPU_SUSPEND + globl_equ cpu_ca8_do_suspend, cpu_v7_do_suspend + globl_equ cpu_ca8_do_resume, cpu_v7_do_resume +#endif + +/* * Cortex-A9 processor functions */ globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init @@ -451,7 +466,10 @@ __v7_setup_stack: @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 +#ifndef CONFIG_ARM_LPAE + define_processor_functions ca8, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 +#endif #ifdef CONFIG_CPU_PJ4B define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 #endif @@ -462,19 +480,19 @@ __v7_setup_stack: string cpu_elf_name, "v7" .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc /* * Standard v7 proc info content */ -.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions +.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags - W(b) \initfunc + initfn \initfunc, \name .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ @@ -494,7 +512,7 @@ __v7_setup_stack: __v7_ca5mp_proc_info: .long 0x410fc050 .long 0xff0ffff0 - __v7_proc __v7_ca5mp_setup + __v7_proc __v7_ca5mp_proc_info, __v7_ca5mp_setup .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info /* @@ -504,9 +522,19 @@ __v7_ca5mp_proc_info: __v7_ca9mp_proc_info: .long 0x410fc090 .long 0xff0ffff0 - __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions + __v7_proc __v7_ca9mp_proc_info, __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info + /* + * ARM Ltd. Cortex A8 processor. + */ + .type __v7_ca8_proc_info, #object +__v7_ca8_proc_info: + .long 0x410fc080 + .long 0xff0ffff0 + __v7_proc __v7_ca8_proc_info, __v7_setup, proc_fns = ca8_processor_functions + .size __v7_ca8_proc_info, . - __v7_ca8_proc_info + #endif /* CONFIG_ARM_LPAE */ /* @@ -517,7 +545,7 @@ __v7_ca9mp_proc_info: __v7_pj4b_proc_info: .long 0x560f5800 .long 0xff0fff00 - __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions + __v7_proc __v7_pj4b_proc_info, __v7_pj4b_setup, proc_fns = pj4b_processor_functions .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info #endif @@ -528,7 +556,7 @@ __v7_pj4b_proc_info: __v7_cr7mp_proc_info: .long 0x410fc170 .long 0xff0ffff0 - __v7_proc __v7_cr7mp_setup + __v7_proc __v7_cr7mp_proc_info, __v7_cr7mp_setup .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info /* @@ -538,7 +566,7 @@ __v7_cr7mp_proc_info: __v7_ca7mp_proc_info: .long 0x410fc070 .long 0xff0ffff0 - __v7_proc __v7_ca7mp_setup + __v7_proc __v7_ca7mp_proc_info, __v7_ca7mp_setup .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info /* @@ -548,7 +576,7 @@ __v7_ca7mp_proc_info: __v7_ca12mp_proc_info: .long 0x410fc0d0 .long 0xff0ffff0 - __v7_proc __v7_ca12mp_setup + __v7_proc __v7_ca12mp_proc_info, __v7_ca12mp_setup .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info /* @@ -558,7 +586,7 @@ __v7_ca12mp_proc_info: __v7_ca15mp_proc_info: .long 0x410fc0f0 .long 0xff0ffff0 - __v7_proc __v7_ca15mp_setup + __v7_proc __v7_ca15mp_proc_info, __v7_ca15mp_setup .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info /* @@ -568,7 +596,7 @@ __v7_ca15mp_proc_info: __v7_b15mp_proc_info: .long 0x420f00f0 .long 0xff0ffff0 - __v7_proc __v7_b15mp_setup + __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info /* @@ -578,7 +606,7 @@ __v7_b15mp_proc_info: __v7_ca17mp_proc_info: .long 0x410fc0e0 .long 0xff0ffff0 - __v7_proc __v7_ca17mp_setup + __v7_proc __v7_ca17mp_proc_info, __v7_ca17mp_setup .size __v7_ca17mp_proc_info, . - __v7_ca17mp_proc_info /* @@ -594,7 +622,7 @@ __krait_proc_info: * do support them. They also don't indicate support for fused multiply * instructions even though they actually do support them. */ - __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 + __v7_proc __krait_proc_info, __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4 .size __krait_proc_info, . - __krait_proc_info /* @@ -604,5 +632,5 @@ __krait_proc_info: __v7_proc_info: .long 0x000f0000 @ Required ID value .long 0x000f0000 @ Mask for ID - __v7_proc __v7_setup + __v7_proc __v7_proc_info, __v7_setup .size __v7_proc_info, . - __v7_proc_info diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index d1e68b553d3b..e08e1f2bab76 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -135,7 +135,7 @@ __v7m_setup_stack_top: string cpu_elf_name "v7m" string cpu_v7m_name "ARMv7-M" - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc /* * Match any ARMv7-M processor core. @@ -146,7 +146,7 @@ __v7m_proc_info: .long 0x000f0000 @ Mask for ID .long 0 @ proc_info_list.__cpu_mm_mmu_flags .long 0 @ proc_info_list.__cpu_io_mmu_flags - b __v7m_setup @ proc_info_list.__cpu_flush + initfn __v7m_setup, __v7m_proc_info @ proc_info_list.__cpu_flush .long cpu_arch_name .long cpu_elf_name .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index f8acdfece036..293dcc2c441f 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -499,7 +499,7 @@ xsc3_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req .type __\name\()_proc_info,#object @@ -514,7 +514,7 @@ __\name\()_proc_info: .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __xsc3_setup + initfn __xsc3_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index afa2b3c4df4a..b6bbfdb6dfdc 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -612,7 +612,7 @@ xscale_crval: .align - .section ".proc.info.init", #alloc, #execinstr + .section ".proc.info.init", #alloc .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache .type __\name\()_proc_info,#object @@ -627,7 +627,7 @@ __\name\()_proc_info: .long PMD_TYPE_SECT | \ PMD_SECT_AP_WRITE | \ PMD_SECT_AP_READ - b __xscale_setup + initfn __xscale_setup, __\name\()_proc_info .long cpu_arch_name .long cpu_elf_name .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |