summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/alignment.c10
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/cache-tauros2.c12
-rw-r--r--arch/arm/mm/context.c58
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/fault.c31
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c94
-rw-r--r--arch/arm/mm/pageattr.c91
-rw-r--r--arch/arm/mm/proc-v7.S7
-rw-r--r--arch/arm/mm/proc-xscale.S4
18 files changed, 221 insertions, 139 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ae69809a9e47..7eb94e6fc376 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ depends on MMU
default y
help
Warning: disabling this option may break user programs.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 91da64de440f..d3afdf9eb65a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
- mmap.o pgd.o mmu.o
+ mmap.o pgd.o mmu.o pageattr.o
ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 83792f4324ea..2c0c541c60ca 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn)
new_usermode |= UM_FIXUP;
if (warn)
- printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
+ pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
}
return new_usermode;
@@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
* processor for us.
*/
if (addr != eaddr) {
- printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
+ pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
"addr = %08lx, eaddr = %08lx\n",
instruction_pointer(regs), instr, addr, eaddr);
show_regs(regs);
@@ -567,7 +567,7 @@ fault:
return TYPE_FAULT;
bad:
- printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
+ pr_err("Alignment trap: not handling ldm with s-bit set\n");
return TYPE_ERROR;
}
@@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 0;
swp:
- printk(KERN_ERR "Alignment trap: not handling swp instruction\n");
+ pr_err("Alignment trap: not handling swp instruction\n");
bad:
/*
* Oops, we didn't handle the instruction.
*/
- printk(KERN_ERR "Alignment trap: not handling instruction "
+ pr_err("Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
isize << 1,
isize == 2 ? tinstr : instr, instrptr);
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index e028a7f2ebcc..097181e08c25 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void)
*/
u = read_extra_features();
if (!(u & 0x01000000)) {
- printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
+ pr_info("Feroceon L2: Disabling L2 prefetch.\n");
write_extra_features(u | 0x01000000);
}
}
@@ -326,7 +326,7 @@ static void __init enable_l2(void)
if (!(u & 0x00400000)) {
int i, d;
- printk(KERN_INFO "Feroceon L2: Enabling L2\n");
+ pr_info("Feroceon L2: Enabling L2\n");
d = flush_and_disable_dcache();
i = invalidate_and_disable_icache();
@@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
enable_l2();
- printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
+ pr_info("Feroceon L2: Cache support initialised%s.\n",
l2_wt_override ? ", in WT override mode" : "");
}
#ifdef CONFIG_OF
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 55f9d6e0cc88..5e65ca8dea62 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt;
* @associativity: variable to return the calculated associativity in
* @max_way_size: the maximum size in bytes for the cache ways
*/
-static void __init l2x0_cache_size_of_parse(const struct device_node *np,
+static int __init l2x0_cache_size_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask,
u32 *associativity,
u32 max_way_size)
@@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
of_property_read_u32(np, "cache-line-size", &line_size);
if (!cache_size || !sets)
- return;
+ return -ENODEV;
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
@@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size > max_way_size) {
pr_err("L2C OF: set size %dKB is too large\n", way_size);
- return;
+ return -EINVAL;
}
pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
@@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size_bits < 1 || way_size_bits > 6) {
pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
way_size);
- return;
+ return -EINVAL;
}
mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
@@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
+
+ return 0;
}
static void __init l2x0_of_parse(const struct device_node *np,
@@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
u32 dirty = 0;
u32 val = 0, mask = 0;
u32 assoc;
+ int ret;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
@@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np,
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
- l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
+ ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
+ if (ret)
+ return;
+
if (assoc > 8) {
pr_err("l2x0 of: cache setting yield too high associativity\n");
pr_err("l2x0 of: %d calculated, max 8\n", assoc);
@@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
+ int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
@@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np,
l2x0_base + L310_ADDR_FILTER_START);
}
- l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
+ ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
+ if (ret)
+ return;
+
switch (assoc) {
case 16:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
@@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
default:
- pr_err("PL310 OF: cache setting yield illegal associativity\n");
- pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
+ pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
+ assoc);
break;
}
}
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index b273739e6359..1e373d268c04 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features)
u &= ~0x01000000;
else
u |= 0x01000000;
- printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
+ pr_info("Tauros2: %s L2 prefetch.\n",
(features & CACHE_TAUROS2_PREFETCH_ON)
? "Enabling" : "Disabling");
@@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features)
u |= 0x00100000;
else
u &= ~0x00100000;
- printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
+ pr_info("Tauros2: %s line fill burt8.\n",
(features & CACHE_TAUROS2_LINEFILL_BURST8)
? "Enabling" : "Disabling");
@@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features)
*/
feat = read_extra_features();
if (!(feat & 0x00400000)) {
- printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
+ pr_info("Tauros2: Enabling L2 cache.\n");
write_extra_features(feat | 0x00400000);
}
@@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features)
*/
actlr = read_actlr();
if (!(actlr & 0x00000002)) {
- printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
+ pr_info("Tauros2: Enabling L2 cache.\n");
write_actlr(actlr | 0x00000002);
}
@@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features)
#endif
if (mode == NULL) {
- printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
+ pr_crit("Tauros2: Unable to detect CPU mode.\n");
return;
}
- printk(KERN_INFO "Tauros2: L2 cache support initialised "
+ pr_info("Tauros2: L2 cache support initialised "
"in %s mode.\n", mode);
}
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6eb97b3a7481..91892569710f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
- if (asid != 0 && is_reserved_asid(asid)) {
+ if (asid != 0) {
/*
- * Our current ASID was active during a rollover, we can
- * continue to use it and this was just a false alarm.
+ * If our current ASID was active during a rollover, we
+ * can continue to use it and this was just a false alarm.
*/
- asid = generation | (asid & ~ASID_MASK);
- } else {
+ if (is_reserved_asid(asid))
+ return generation | (asid & ~ASID_MASK);
+
/*
- * Allocate a free ASID. If we can't find one, take a
- * note of the currently active ASIDs and mark the TLBs
- * as requiring flushes. We always count from ASID #1,
- * as we reserve ASID #0 to switch via TTBR0 and to
- * avoid speculative page table walks from hitting in
- * any partial walk caches, which could be populated
- * from overlapping level-1 descriptors used to map both
- * the module area and the userspace stack.
+ * We had a valid ASID in a previous life, so try to re-use
+ * it if possible.,
*/
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
- if (asid == NUM_USER_ASIDS) {
- generation = atomic64_add_return(ASID_FIRST_VERSION,
- &asid_generation);
- flush_context(cpu);
- asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
- }
- __set_bit(asid, asid_map);
- cur_idx = asid;
- asid |= generation;
- cpumask_clear(mm_cpumask(mm));
+ asid &= ~ASID_MASK;
+ if (!__test_and_set_bit(asid, asid_map))
+ goto bump_gen;
}
+ /*
+ * Allocate a free ASID. If we can't find one, take a note of the
+ * currently active ASIDs and mark the TLBs as requiring flushes.
+ * We always count from ASID #1, as we reserve ASID #0 to switch
+ * via TTBR0 and to avoid speculative page table walks from hitting
+ * in any partial walk caches, which could be populated from
+ * overlapping level-1 descriptors used to map both the module
+ * area and the userspace stack.
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+ if (asid == NUM_USER_ASIDS) {
+ generation = atomic64_add_return(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+ }
+
+ __set_bit(asid, asid_map);
+ cur_idx = asid;
+
+bump_gen:
+ asid |= generation;
+ cpumask_clear(mm_cpumask(mm));
return asid;
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index b9bcc9d79176..70423345da26 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto)
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
- "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
+ "r" ((unsigned long)kto + PAGE_SIZE - 1)
: "cc");
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c245d903927f..e8907117861e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
{
return dma_common_pages_remap(pages, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
- return NULL;
}
/*
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ff379ac115df..d9e0d00a6699 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void)
const char *reason;
unsigned long v = 1;
- printk(KERN_INFO "CPU: Testing write buffer coherency: ");
+ pr_info("CPU: Testing write buffer coherency: ");
page = alloc_page(GFP_KERNEL);
if (page) {
@@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void)
}
if (v) {
- printk("failed, %s\n", reason);
+ pr_cont("failed, %s\n", reason);
shared_pte_mask = L_PTE_MT_UNCACHED;
} else {
- printk("ok\n");
+ pr_cont("ok\n");
}
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index eb8830a4c5ed..a982dc3190df 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
if (!mm)
mm = &init_mm;
- printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pr_alert("pgd = %p\n", mm->pgd);
pgd = pgd_offset(mm, addr);
- printk(KERN_ALERT "[%08lx] *pgd=%08llx",
+ pr_alert("[%08lx] *pgd=%08llx",
addr, (long long)pgd_val(*pgd));
do {
@@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
if (pgd_bad(*pgd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pud = pud_offset(pgd, addr);
if (PTRS_PER_PUD != 1)
- printk(", *pud=%08llx", (long long)pud_val(*pud));
+ pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
if (pud_none(*pud))
break;
if (pud_bad(*pud)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
pmd = pmd_offset(pud, addr);
if (PTRS_PER_PMD != 1)
- printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
+ pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
if (pmd_none(*pmd))
break;
if (pmd_bad(*pmd)) {
- printk("(bad)");
+ pr_cont("(bad)");
break;
}
@@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%08llx", (long long)pte_val(*pte));
+ pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
#ifndef CONFIG_ARM_LPAE
- printk(", *ppte=%08llx",
+ pr_cont(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#else /* CONFIG_MMU */
void show_pte(struct mm_struct *mm, unsigned long addr)
@@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
* No handler, we'll have to terminate things with extreme prejudice.
*/
bust_spinlocks(1);
- printk(KERN_ALERT
- "Unable to handle kernel %s at virtual address %08lx\n",
- (addr < PAGE_SIZE) ? "NULL pointer dereference" :
- "paging request", addr);
+ pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
show_pte(mm, addr);
die("Oops", regs, fsr);
@@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
inf->name, fsr, addr);
info.si_signo = inf->sig;
@@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return;
- printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr);
info.si_signo = inf->sig;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 265b836b3bd1..34b66af516ea 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
asm( "mcrr p15, 0, %1, %0, c14\n"
" mcr p15, 0, %2, c7, c10, 4"
:
- : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
+ : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
: "cc");
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 45aeaaca9052..e17ed00828d7 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
+ struct page *page = pfn_to_page(pfn);
pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 92bba32d9230..6ca53c338519 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -67,7 +67,7 @@ early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
- printk(KERN_WARNING "ATAG_INITRD is deprecated; "
+ pr_warn("ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
@@ -544,7 +544,7 @@ void __init mem_init(void)
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
- printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..f86ce1a9f525 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -52,6 +52,8 @@ EXPORT_SYMBOL(empty_zero_page);
*/
pmd_t *top_pmd;
+pmdval_t user_pmd_table = _PAGE_USER_TABLE;
+
#define CPOLICY_UNCACHED 0
#define CPOLICY_BUFFERED 1
#define CPOLICY_WRITETHROUGH 2
@@ -192,7 +194,7 @@ early_param("cachepolicy", early_cachepolicy);
static int __init early_nocache(char *__unused)
{
char *p = "buffered";
- printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
+ pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
@@ -201,7 +203,7 @@ early_param("nocache", early_nocache);
static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
- printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
+ pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(p);
return 0;
}
@@ -354,44 +356,6 @@ const struct mem_type *get_mem_type(unsigned int type)
}
EXPORT_SYMBOL(get_mem_type);
-#define PTE_SET_FN(_name, pteop) \
-static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
- void *data) \
-{ \
- pte_t pte = pteop(*ptep); \
-\
- set_pte_ext(ptep, pte, 0); \
- return 0; \
-} \
-
-#define SET_MEMORY_FN(_name, callback) \
-int set_memory_##_name(unsigned long addr, int numpages) \
-{ \
- unsigned long start = addr; \
- unsigned long size = PAGE_SIZE*numpages; \
- unsigned end = start + size; \
-\
- if (start < MODULES_VADDR || start >= MODULES_END) \
- return -EINVAL;\
-\
- if (end < MODULES_VADDR || end >= MODULES_END) \
- return -EINVAL; \
-\
- apply_to_page_range(&init_mm, start, size, callback, NULL); \
- flush_tlb_kernel_range(start, end); \
- return 0;\
-}
-
-PTE_SET_FN(ro, pte_wrprotect)
-PTE_SET_FN(rw, pte_mkwrite)
-PTE_SET_FN(x, pte_mkexec)
-PTE_SET_FN(nx, pte_mknexec)
-
-SET_MEMORY_FN(ro, pte_set_ro)
-SET_MEMORY_FN(rw, pte_set_rw)
-SET_MEMORY_FN(x, pte_set_x)
-SET_MEMORY_FN(nx, pte_set_nx)
-
/*
* Adjust the PMD section entries according to the CPU in use.
*/
@@ -528,14 +492,23 @@ static void __init build_mem_type_table(void)
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
+#ifndef CONFIG_ARM_LPAE
/*
* We don't use domains on ARMv6 (since this causes problems with
* v6/v7 kernels), so we must use a separate memory type for user
* r/o, kernel r/w to map the vectors page.
*/
-#ifndef CONFIG_ARM_LPAE
if (cpu_arch == CPU_ARCH_ARMv6)
vecs_pgprot |= L_PTE_MT_VECTORS;
+
+ /*
+ * Check is it with support for the PXN bit
+ * in the Short-descriptor translation table format descriptors.
+ */
+ if (cpu_arch == CPU_ARCH_ARMv7 &&
+ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+ user_pmd_table |= PMD_PXNTABLE;
+ }
#endif
/*
@@ -605,6 +578,11 @@ static void __init build_mem_type_table(void)
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
+
+ /*
+ * Set PXN for user mappings
+ */
+ user_pgprot |= PTE_EXT_PXN;
#endif
for (i = 0; i < 16; i++) {
@@ -786,8 +764,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
length = PAGE_ALIGN(md->length);
if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
- printk(KERN_ERR "MM: CPU does not support supersection "
- "mapping for 0x%08llx at 0x%08lx\n",
+ pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -799,15 +776,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
* of the actual domain assignments in use.
*/
if (type->domain) {
- printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%08llx at 0x%08lx\n",
+ pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
- printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
- " at 0x%08lx invalid alignment\n",
+ pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
(long long)__pfn_to_phys((u64)md->pfn), addr);
return;
}
@@ -850,18 +825,16 @@ static void __init create_mapping(struct map_desc *md)
pgd_t *pgd;
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
- printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
- " at 0x%08lx in user region\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
- printk(KERN_WARNING "BUG: mapping for 0x%08llx"
- " at 0x%08lx out of vmalloc space\n",
- (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+ (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
@@ -881,9 +854,8 @@ static void __init create_mapping(struct map_desc *md)
length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
- printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
- "be mapped using pages, ignoring.\n",
- (long long)__pfn_to_phys(md->pfn), addr);
+ pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
+ (long long)__pfn_to_phys(md->pfn), addr);
return;
}
@@ -1053,15 +1025,13 @@ static int __init early_vmalloc(char *arg)
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
- printk(KERN_WARNING
- "vmalloc area too small, limiting to %luMB\n",
+ pr_warn("vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
- printk(KERN_WARNING
- "vmalloc area is too big, limiting to %luMB\n",
+ pr_warn("vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
@@ -1094,7 +1064,7 @@ void __init sanity_check_meminfo(void)
if (highmem) {
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
- &block_start, &block_end);
+ &block_start, &block_end);
memblock_remove(reg->base, reg->size);
continue;
}
@@ -1103,7 +1073,7 @@ void __init sanity_check_meminfo(void)
phys_addr_t overlap_size = reg->size - size_limit;
pr_notice("Truncating RAM at %pa-%pa to -%pa",
- &block_start, &block_end, &vmalloc_limit);
+ &block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
block_end = vmalloc_limit;
}
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 000000000000..004e35cdcfff
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *data)
+{
+ struct page_change_data *cdata = data;
+ pte_t pte = *ptep;
+
+ pte = clear_pte_bit(pte, cdata->clear_mask);
+ pte = set_pte_bit(pte, cdata->set_mask);
+
+ set_pte_ext(ptep, pte, 0);
+ return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+ pgprot_t set_mask, pgprot_t clear_mask)
+{
+ unsigned long start = addr;
+ unsigned long size = PAGE_SIZE*numpages;
+ unsigned long end = start + size;
+ int ret;
+ struct page_change_data data;
+
+ if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+ start &= PAGE_MASK;
+ end = start + size;
+ WARN_ON_ONCE(1);
+ }
+
+ if (!is_module_address(start) || !is_module_address(end - 1))
+ return -EINVAL;
+
+ data.set_mask = set_mask;
+ data.clear_mask = clear_mask;
+
+ ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+ &data);
+
+ flush_tlb_kernel_range(start, end);
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(L_PTE_RDONLY),
+ __pgprot(0));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(L_PTE_RDONLY));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(L_PTE_XN),
+ __pgprot(0));
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_common(addr, numpages,
+ __pgprot(0),
+ __pgprot(L_PTE_XN));
+}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3a947863ac7..8b4ee5e81c14 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
/* Auxiliary Debug Modes Control 1 Register */
#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
/* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
/* Auxiliary Debug Modes Control 1 Register */
mrc p15, 1, r0, c15, c1, 1
orr r0, r0, #PJ4B_CLEAN_LINE
- orr r0, r0, #PJ4B_BCK_OFF_STREX
orr r0, r0, #PJ4B_INTER_PARITY
bic r0, r0, #PJ4B_STATIC_BP
mcr p15, 1, r0, c15, c1, 1
@@ -593,9 +591,10 @@ __krait_proc_info:
/*
* Some Krait processors don't indicate support for SDIV and UDIV
* instructions in the ARM instruction set, even though they actually
- * do support them.
+ * do support them. They also don't indicate support for fused multiply
+ * instructions even though they actually do support them.
*/
- __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
.size __krait_proc_info, . - __krait_proc_info
/*
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 23259f104c66..afa2b3c4df4a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
mrc p15, 0, r5, c15, c1, 0 @ CP access reg
mrc p15, 0, r6, c13, c0, 0 @ PID
mrc p15, 0, r7, c3, c0, 0 @ domain ID
- mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
+ mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mrc p15, 0, r9, c1, c0, 0 @ control reg
bic r4, r4, #2 @ clear frequency change bit
stmia r0, {r4 - r9} @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
mcr p15, 0, r6, c13, c0, 0 @ PID
mcr p15, 0, r7, c3, c0, 0 @ domain ID
mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
- mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
+ mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
mov r0, r9 @ control register
b cpu_resume_mmu
ENDPROC(cpu_xscale_do_resume)