summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-01-22 22:45:46 -0800
committerDavid S. Miller <davem@davemloft.net>2010-01-22 22:45:46 -0800
commit6be325719b3e54624397e413efd4b33a997e55a3 (patch)
tree57f321a56794cab2222e179b16731e0d76a4a68a /arch/powerpc/mm/pgtable_32.c
parent26d92f9276a56d55511a427fb70bd70886af647a (diff)
parent92dcffb916d309aa01778bf8963a6932e4014d07 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c38
1 files changed, 32 insertions, 6 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cb96cb2e17cc..573b3bd1c45b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,6 +26,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
+#include <linux/lmb.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -191,7 +192,8 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (mem_init_done && (p < virt_to_phys(high_memory))) {
+ if (mem_init_done && (p < virt_to_phys(high_memory)) &&
+ !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) {
printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
@@ -283,18 +285,18 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
}
/*
- * Map in a big chunk of physical memory starting at PAGE_OFFSET.
+ * Map in a chunk of physical memory starting at start.
*/
-void __init mapin_ram(void)
+void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s, f;
phys_addr_t p;
int ktext;
- s = mmu_mapin_ram();
+ s = offset;
v = PAGE_OFFSET + s;
p = memstart_addr + s;
- for (; s < total_lowmem; s += PAGE_SIZE) {
+ for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
map_page(v, p, f);
@@ -307,6 +309,30 @@ void __init mapin_ram(void)
}
}
+void __init mapin_ram(void)
+{
+ unsigned long s, top;
+
+#ifndef CONFIG_WII
+ top = total_lowmem;
+ s = mmu_mapin_ram(top);
+ __mapin_ram_chunk(s, top);
+#else
+ if (!wii_hole_size) {
+ s = mmu_mapin_ram(total_lowmem);
+ __mapin_ram_chunk(s, total_lowmem);
+ } else {
+ top = wii_hole_start;
+ s = mmu_mapin_ram(top);
+ __mapin_ram_chunk(s, top);
+
+ top = lmb_end_of_DRAM();
+ s = wii_mmu_mapin_mem2(top);
+ __mapin_ram_chunk(s, top);
+ }
+#endif
+}
+
/* Scan the real Linux page tables and return a PTE pointer for
* a virtual address in a context.
* Returns true (1) if PTE was found, zero otherwise. The pointer to
@@ -356,7 +382,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return 0;
if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
return -EINVAL;
- set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
+ __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb();
#ifdef CONFIG_PPC_STD_MMU
flush_hash_pages(0, address, pmd_val(*kpmd), 1);