diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 17:11:57 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 08:39:15 -0700 |
commit | ff075d605511784c79cbf0ae73d90e07238267b3 (patch) | |
tree | 700690d0ae5e4d712ce3ba3b182af8e5deae869b /arch | |
parent | 7a95a2c80748bb91e0bf4b8d58396542e1319d21 (diff) |
um: mmu_gather rework
Fix up the um mmu_gather code to conform to the new API.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/um/include/asm/tlb.h | 29 |
1 files changed, 11 insertions, 18 deletions
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 660caedac9eb..4febacd1a8a1 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -22,9 +22,6 @@ struct mmu_gather { unsigned int fullmm; /* non-zero means full mm flush */ }; -/* Users of the generic TLB shootdown code must declare this storage space. */ -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) { @@ -47,27 +44,20 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) } } -/* tlb_gather_mmu - * Return a pointer to an initialized struct mmu_gather. - */ -static inline struct mmu_gather * -tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) +static inline void +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) { - struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); - tlb->mm = mm; tlb->fullmm = full_mm_flush; init_tlb_gather(tlb); - - return tlb; } extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end); static inline void -tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +tlb_flush_mmu(struct mmu_gather *tlb) { if (!tlb->need_flush) return; @@ -83,12 +73,10 @@ tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { - tlb_flush_mmu(tlb, start, end); + tlb_flush_mmu(tlb); /* keep the page table cache within bounds */ check_pgt_cache(); - - put_cpu_var(mmu_gathers); } /* tlb_remove_page @@ -96,11 +84,16 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) * while handling the additional races in SMP caused by other CPUs * caching valid mappings in their TLBs. */ -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; free_page_and_swap_cache(page); - return; + return 1; /* avoid calling tlb_flush_mmu */ +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + __tlb_remove_page(tlb, page); } /** |