From fbc78b07ba53ace155f27491c81a009e541a93ad Mon Sep 17 00:00:00 2001 From: Philippe Gerum Date: Thu, 12 Feb 2009 12:18:46 +0000 Subject: powerpc/mm: Fix _PAGE_CHG_MASK to protect _PAGE_SPECIAL Fix _PAGE_CHG_MASK so that pte_modify() does not affect the _PAGE_SPECIAL bit. Signed-off-by: Philippe Gerum Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/pgtable-4k.h | 2 +- arch/powerpc/include/asm/pgtable-64k.h | 2 +- arch/powerpc/include/asm/pgtable-ppc32.h | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h index 6b18ba9d2d85..1dbca4e7de67 100644 --- a/arch/powerpc/include/asm/pgtable-4k.h +++ b/arch/powerpc/include/asm/pgtable-4k.h @@ -60,7 +60,7 @@ /* It should be preserving the high 48 bits and then specifically */ /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_HPTEFLAGS) + _PAGE_HPTEFLAGS | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0 diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h index 07b0d8f09cb6..7389003349a6 100644 --- a/arch/powerpc/include/asm/pgtable-64k.h +++ b/arch/powerpc/include/asm/pgtable-64k.h @@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) * pgprot changes */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ - _PAGE_ACCESSED) + _PAGE_ACCESSED | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0x1ff diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index f69a4d977729..820b5f0a35ce 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -429,7 +429,8 @@ extern int icache_44x_need_flush; #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SPECIAL) #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ -- cgit v1.2.3 From 06eccea6c34e00c8fedce49229ed35fa84619fb7 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 12 Feb 2009 12:36:04 +0000 Subject: powerpc/mm: Fix numa reserve bootmem page selection Fix the powerpc NUMA reserve bootmem page selection logic. commit 8f64e1f2d1e09267ac926e15090fd505c1c0cbcb (powerpc: Reserve in bootmem lmb reserved regions that cross NUMA nodes) changed the logic for how the powerpc LMB reserved regions were converted to bootmen reserved regions. As the folowing discussion reports, the new logic was not correct. mark_reserved_regions_for_nid() goes through each LMB on the system that specifies a reserved area. It searches for active regions that intersect with that LMB and are on the specified node. It attempts to bootmem-reserve only the area where the active region and the reserved LMB intersect. We can not reserve things on other nodes as they may not have bootmem structures allocated, yet. We base the size of the bootmem reservation on two possible things. Normally, we just make the reservation start and stop exactly at the start and end of the LMB. However, the LMB reservations are not aware of NUMA nodes and on occasion a single LMB may cross into several adjacent active regions. Those may even be on different NUMA nodes and will require separate calls to the bootmem reserve functions. So, the bootmem reservation must be trimmed to fit inside the current active region. That's all fine and dandy, but we trim the reservation in a page-aligned fashion. That's bad because we start the reservation at a non-page-aligned address: physbase. The reservation may only span 2 bytes, but that those bytes may span two pfns and cause a reserve_size of 2*PAGE_SIZE. Take the case where you reserve 0x2 bytes at 0x0fff and where the active region ends at 0x1000. You'll jump into that if() statment, but node_ar.end_pfn=0x1 and start_pfn=0x0. You'll end up with a reserve_size=0x1000, and then call reserve_bootmem_node(node, physbase=0xfff, size=0x1000); 0x1000 may not be on the same node as 0xfff. Oops. In almost all the vm code, end_ is not inclusive. If you have an end_pfn of 0x1234, page 0x1234 is not included in the range. Using PFN_UP instead of the (>> >> PAGE_SHIFT) will make this consistent with the other VM code. We also need to do math for the reserved size with physbase instead of start_pfn. node_ar.end_pfn << PAGE_SHIFT is *precisely* the end of the node. However, (start_pfn << PAGE_SHIFT) is *NOT* precisely the beginning of the reserved area. That is, of course, physbase. If we don't use physbase here, the reserve_size can be made too large. From: Dave Hansen Tested-by: Geoff Levand Tested on PS3. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 7393bd76d698..5ac08b8ab654 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -882,7 +883,7 @@ static void mark_reserved_regions_for_nid(int nid) unsigned long physbase = lmb.reserved.region[i].base; unsigned long size = lmb.reserved.region[i].size; unsigned long start_pfn = physbase >> PAGE_SHIFT; - unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT); + unsigned long end_pfn = PFN_UP(physbase + size); struct node_active_region node_ar; unsigned long node_end_pfn = node->node_start_pfn + node->node_spanned_pages; @@ -908,7 +909,7 @@ static void mark_reserved_regions_for_nid(int nid) */ if (end_pfn > node_ar.end_pfn) reserve_size = (node_ar.end_pfn << PAGE_SHIFT) - - (start_pfn << PAGE_SHIFT); + - physbase; /* * Only worry about *this* node, others may not * yet have valid NODE_DATA(). -- cgit v1.2.3 From 0047656e2a97d4452dd7df9e52591a7afe21a263 Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Thu, 12 Feb 2009 12:36:16 +0000 Subject: powerpc/ps3: Move ps3_mm_add_memory to device_initcall Change the PS3 hotplug memory routine ps3_mm_add_memory() from a core_initcall to a device_initcall. core_initcall routines run before the powerpc topology_init() startup routine, which is a subsys_initcall, resulting in failure of ps3_mm_add_memory() when CONFIG_NUMA=y. When ps3_mm_add_memory() fails the system will boot with just the 128 MiB of boot memory Signed-off-by: Geoff Levand Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/ps3/mm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 67de6bf3db3d..d281cc0bca71 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -328,7 +328,7 @@ static int __init ps3_mm_add_memory(void) return result; } -core_initcall(ps3_mm_add_memory); +device_initcall(ps3_mm_add_memory); /*============================================================================*/ /* dma routines */ -- cgit v1.2.3 From 26456dcfb8d8e43b1b64b2a14710694cf7a72f05 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Thu, 12 Feb 2009 19:08:58 +0000 Subject: powerpc/vsx: Fix VSX alignment handler for regs 32-63 Fix the VSX alignment handler for VSX registers > 32. 32-63 are stored in the VMX part of the thread_struct not the FPR part. Signed-off-by: Michael Neuling CC: stable@kernel.org (2.6.27 & .28 please) Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/align.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 5af4e9b2dbe2..ada06924a423 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -646,11 +646,16 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, unsigned int areg, struct pt_regs *regs, unsigned int flags, unsigned int length) { - char *ptr = (char *) ¤t->thread.TS_FPR(reg); + char *ptr; int ret = 0; flush_vsx_to_thread(current); + if (reg < 32) + ptr = (char *) ¤t->thread.TS_FPR(reg); + else + ptr = (char *) ¤t->thread.vr[reg - 32]; + if (flags & ST) ret = __copy_to_user(addr, ptr, length); else { -- cgit v1.2.3 From ad8ba2cd44d4d39fb3fe55d5dcc565b19fc3a7fb Mon Sep 17 00:00:00 2001 From: Sheng Yang Date: Tue, 6 Jan 2009 10:03:02 +0800 Subject: KVM: Add kvm_arch_sync_events to sync with asynchronize events kvm_arch_sync_events is introduced to quiet down all other events may happen contemporary with VM destroy process, like IRQ handler and work struct for assigned device. For kvm_arch_sync_events is called at the very beginning of kvm_destroy_vm(), so the state of KVM here is legal and can provide a environment to quiet down other events. Signed-off-by: Sheng Yang Signed-off-by: Avi Kivity --- arch/powerpc/kvm/powerpc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2822c8ccfaaf..5f81256287f5 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -125,6 +125,10 @@ static void kvmppc_free_vcpus(struct kvm *kvm) } } +void kvm_arch_sync_events(struct kvm *kvm) +{ +} + void kvm_arch_destroy_vm(struct kvm *kvm) { kvmppc_free_vcpus(kvm); -- cgit v1.2.3