summaryrefslogtreecommitdiff
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c8
-rw-r--r--arch/mips/mm/init.c33
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c59
-rw-r--r--arch/mips/mm/tlbex.c98
5 files changed, 193 insertions, 7 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index f2e8302fa70f..fbcd8674ff1d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1230,19 +1230,19 @@ static void probe_pcache(void)
case CPU_R14000:
break;
+ case CPU_74K:
+ case CPU_1074K:
+ alias_74k_erratum(c);
+ /* Fall through. */
case CPU_M14KC:
case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
- case CPU_74K:
case CPU_1004K:
- case CPU_1074K:
case CPU_INTERAPTIV:
case CPU_P5600:
case CPU_PROAPTIV:
case CPU_M5150:
- if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
- alias_74k_erratum(c);
if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
(c->icache.waysize > PAGE_SIZE))
c->icache.flags |= MIPS_CACHE_ALIASES;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 6e4413330e36..571aab064936 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -325,6 +325,38 @@ static inline void mem_init_free_highmem(void)
#endif
}
+unsigned __weak platform_maar_init(unsigned num_maars)
+{
+ return 0;
+}
+
+static void maar_init(void)
+{
+ unsigned num_maars, used, i;
+
+ if (!cpu_has_maar)
+ return;
+
+ /* Detect the number of MAARs */
+ write_c0_maari(~0);
+ back_to_back_c0_hazard();
+ num_maars = read_c0_maari() + 1;
+
+ /* MAARs should be in pairs */
+ WARN_ON(num_maars % 2);
+
+ /* Configure the required MAARs */
+ used = platform_maar_init(num_maars / 2);
+
+ /* Disable any further MAARs */
+ for (i = (used * 2); i < num_maars; i++) {
+ write_c0_maari(i);
+ back_to_back_c0_hazard();
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ }
+}
+
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -337,6 +369,7 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ maar_init();
free_all_bootmem();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index d657493ef561..4094bbd42adf 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -158,7 +158,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
int cpu = smp_processor_id();
- if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
+ if (cpu_context(cpu, vma->vm_mm) != 0) {
unsigned long flags;
int oldpid, newpid, idx;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 3914e27456f2..fa6ebd4bc9e9 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -57,6 +57,7 @@ void local_flush_tlb_all(void)
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
+ htw_stop();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
@@ -90,6 +91,7 @@ void local_flush_tlb_all(void)
}
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ htw_start();
flush_itlb();
local_irq_restore(flags);
}
@@ -131,6 +133,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
int oldpid = read_c0_entryhi();
int newpid = cpu_asid(cpu, mm);
+ htw_stop();
while (start < end) {
int idx;
@@ -151,6 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
tlbw_use_hazard();
write_c0_entryhi(oldpid);
+ htw_start();
} else {
drop_mmu_context(mm, cpu);
}
@@ -174,6 +178,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
end &= (PAGE_MASK << 1);
+ htw_stop();
while (start < end) {
int idx;
@@ -195,6 +200,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
tlbw_use_hazard();
write_c0_entryhi(pid);
+ htw_start();
} else {
local_flush_tlb_all();
}
@@ -214,6 +220,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
page &= (PAGE_MASK << 1);
local_irq_save(flags);
oldpid = read_c0_entryhi();
+ htw_stop();
write_c0_entryhi(page | newpid);
mtc0_tlbw_hazard();
tlb_probe();
@@ -231,6 +238,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish:
write_c0_entryhi(oldpid);
+ htw_start();
flush_itlb_vm(vma);
local_irq_restore(flags);
}
@@ -247,6 +255,7 @@ void local_flush_tlb_one(unsigned long page)
local_irq_save(flags);
oldpid = read_c0_entryhi();
+ htw_stop();
page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
mtc0_tlbw_hazard();
@@ -263,6 +272,7 @@ void local_flush_tlb_one(unsigned long page)
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
+ htw_start();
flush_itlb();
local_irq_restore(flags);
}
@@ -351,6 +361,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
+ htw_stop();
old_pagemask = read_c0_pagemask();
wired = read_c0_wired();
write_c0_wired(wired + 1);
@@ -366,6 +377,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
write_c0_entryhi(old_ctx);
tlbw_use_hazard(); /* What is the hazard here? */
+ htw_start();
write_c0_pagemask(old_pagemask);
local_flush_tlb_all();
local_irq_restore(flags);
@@ -391,6 +403,51 @@ int __init has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+/*
+ * Used for loading TLB entries before trap_init() has started, when we
+ * don't actually want to add a wired entry which remains throughout the
+ * lifetime of the system
+ */
+
+int temp_tlb_entry __cpuinitdata;
+
+__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
+ unsigned long entryhi, unsigned long pagemask)
+{
+ int ret = 0;
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ local_irq_save(flags);
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = read_c0_wired();
+ if (--temp_tlb_entry < wired) {
+ printk(KERN_WARNING
+ "No TLB space left for add_temporary_entry\n");
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ write_c0_index(temp_tlb_entry);
+ write_c0_pagemask(pagemask);
+ write_c0_entryhi(entryhi);
+ write_c0_entrylo0(entrylo0);
+ write_c0_entrylo1(entrylo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+out:
+ local_irq_restore(flags);
+ return ret;
+}
+
static int ntlb;
static int __init set_ntlb(char *str)
{
@@ -431,6 +488,8 @@ static void r4k_tlb_configure(void)
write_c0_pagegrain(pg);
}
+ temp_tlb_entry = current_cpu_data.tlbsize - 1;
+
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index e80e10bafc83..a08dd53a1cc5 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -429,6 +429,7 @@ static void build_r3000_tlb_refill_handler(void)
(unsigned int)(p - tlb_handler));
memcpy((void *)ebase, tlb_handler, 0x80);
+ local_flush_icache_range(ebase, ebase + 0x80);
dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
}
@@ -1299,6 +1300,7 @@ static void build_r4000_tlb_refill_handler(void)
}
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
uasm_l_tlb_huge_update(&l, p);
+ UASM_i_LW(&p, K0, 0, K1);
build_huge_update_entries(&p, htlb_info.huge_pte, K1);
build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
htlb_info.restore_scratch);
@@ -1415,6 +1417,7 @@ static void build_r4000_tlb_refill_handler(void)
final_len);
memcpy((void *)ebase, final_handler, 0x100);
+ local_flush_icache_range(ebase, ebase + 0x100);
dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
@@ -1919,7 +1922,7 @@ static void build_r4000_tlb_load_handler(void)
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixiex) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
@@ -1986,7 +1989,7 @@ static void build_r4000_tlb_load_handler(void)
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixiex) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
@@ -2194,6 +2197,94 @@ static void flush_tlb_handlers(void)
(unsigned long)tlbmiss_handler_setup_pgd_end);
}
+static void print_htw_config(void)
+{
+ unsigned long config;
+ unsigned int pwctl;
+ const int field = 2 * sizeof(unsigned long);
+
+ config = read_c0_pwfield();
+ pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
+ (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
+ (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
+ (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
+ (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
+
+ config = read_c0_pwsize();
+ pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
+ (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
+ (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
+ (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
+ (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
+
+ pwctl = read_c0_pwctl();
+ pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
+ pwctl,
+ (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+ (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
+ (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
+ (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
+}
+
+static void config_htw_params(void)
+{
+ unsigned long pwfield, pwsize, ptei;
+ unsigned int config;
+
+ /*
+ * We are using 2-level page tables, so we only need to
+ * setup GDW and PTW appropriately. UDW and MDW will remain 0.
+ * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
+ * write values less than 0xc in these fields because the entire
+ * write will be dropped. As a result of which, we must preserve
+ * the original reset values and overwrite only what we really want.
+ */
+
+ pwfield = read_c0_pwfield();
+ /* re-initialize the GDI field */
+ pwfield &= ~MIPS_PWFIELD_GDI_MASK;
+ pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
+ /* re-initialize the PTI field including the even/odd bit */
+ pwfield &= ~MIPS_PWFIELD_PTI_MASK;
+ pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
+ /* Set the PTEI right shift */
+ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
+ pwfield |= ptei;
+ write_c0_pwfield(pwfield);
+ /* Check whether the PTEI value is supported */
+ back_to_back_c0_hazard();
+ pwfield = read_c0_pwfield();
+ if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
+ != ptei) {
+ pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
+ ptei);
+ /*
+ * Drop option to avoid HTW being enabled via another path
+ * (eg htw_reset())
+ */
+ current_cpu_data.options &= ~MIPS_CPU_HTW;
+ return;
+ }
+
+ pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
+ pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+ write_c0_pwsize(pwsize);
+
+ /* Make sure everything is set before we enable the HTW */
+ back_to_back_c0_hazard();
+
+ /* Enable HTW and disable the rest of the pwctl fields */
+ config = 1 << MIPS_PWCTL_PWEN_SHIFT;
+ write_c0_pwctl(config);
+ pr_info("Hardware Page Table Walker enabled\n");
+
+ print_htw_config();
+}
+
void build_tlb_refill_handler(void)
{
/*
@@ -2258,5 +2349,8 @@ void build_tlb_refill_handler(void)
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ if (cpu_has_htw)
+ config_htw_params();
+
}
}