summaryrefslogtreecommitdiff
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-02-21 22:31:11 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:13:56 -0800
commitd7744a09504d5ae84edc8289a02254e1f2102410 (patch)
treebe0f245ee0725f2f066bf87d17d254ce1e7279bf /arch/sparc64
parent9cc3a1ac9a819cadff05ca37bb7f208013a22035 (diff)
[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.
It can map all of the linear kernel mappings with zero TSB hash conflicts for systems with 16GB or less ram. In such cases, on SUN4V, once we load up this TSB the first time with all the mappings, we never take a linear kernel mapping TLB miss ever again, the hypervisor handles them all. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/ktlb.S15
-rw-r--r--arch/sparc64/mm/init.c24
2 files changed, 33 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac17bc8d..efcf38b6e284 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@ kvmap_dtlb_obp:
nop
.align 32
+kvmap_dtlb_tsb4m_load:
+ KTSB_LOCK_TAG(%g1, %g2, %g7)
+ KTSB_WRITE(%g1, %g5, %g6)
+ ba,pt %xcc, kvmap_dtlb_load
+ nop
+
kvmap_dtlb:
/* %g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@ kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear
nop
+ /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
+ KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+
+ /* TSB entry address left in %g1, lookup linear PTE.
+ * Must preserve %g1 and %g6 (TAG).
+ */
+kvmap_dtlb_tsb4m_miss:
sethi %hi(kpte_linear_bitmap), %g2
or %g2, %lo(kpte_linear_bitmap), %g2
@@ -163,7 +176,7 @@ kvmap_dtlb_4v:
.globl kvmap_linear_patch
kvmap_linear_patch:
- ba,pt %xcc, kvmap_dtlb_load
+ ba,pt %xcc, kvmap_dtlb_tsb4m_load
xor %g2, %g4, %g5
kvmap_dtlb_vmalloc_addr:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5869f00d2d1..2a123135b042 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -58,6 +58,9 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
*/
unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
+/* A special kernel TSB for 4MB and 256MB linear mappings. */
+struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+
#define MAX_BANKS 32
static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
@@ -1086,6 +1089,7 @@ static void __init sun4v_ktsb_init(void)
{
unsigned long ktsb_pa;
+ /* First KTSB for PAGE_SIZE mappings. */
ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
switch (PAGE_SIZE) {
@@ -1117,9 +1121,18 @@ static void __init sun4v_ktsb_init(void)
ktsb_descr[0].tsb_base = ktsb_pa;
ktsb_descr[0].resv = 0;
- /* XXX When we have a kernel large page size TSB, describe
- * XXX it in ktsb_descr[1] here.
- */
+ /* Second KTSB for 4MB/256MB mappings. */
+ ktsb_pa = (kern_base +
+ ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+
+ ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
+ ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
+ HV_PGSZ_MASK_256MB);
+ ktsb_descr[1].assoc = 1;
+ ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
+ ktsb_descr[1].ctx_idx = 0;
+ ktsb_descr[1].tsb_base = ktsb_pa;
+ ktsb_descr[1].resv = 0;
}
void __cpuinit sun4v_ktsb_register(void)
@@ -1132,8 +1145,7 @@ void __cpuinit sun4v_ktsb_register(void)
pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
func = HV_FAST_MMU_TSB_CTX0;
- /* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */
- arg0 = 1;
+ arg0 = 2;
arg1 = pa;
__asm__ __volatile__("ta %6"
: "=&r" (func), "=&r" (arg0), "=&r" (arg1)
@@ -1160,7 +1172,9 @@ void __init paging_init(void)
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+ /* Invalidate both kernel TSBs. */
memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+ memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
if (tlb_type == hypervisor)
sun4v_pgprot_init();