diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-08 11:31:16 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-08 11:31:16 -0700 |
commit | 3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch) | |
tree | afbeb2accd4c2199ddd705ae943995b143a0af02 /arch/tile | |
parent | 1860e379875dfe7271c649058aeddffe5afd9d0d (diff) | |
parent | 1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff) |
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master',
bringing us to the normal situation where my 'master' branch is the
merge window.
* accumulated work in next: (6809 commits)
ufs: sb mutex merge + mutex_destroy
powerpc: update comments for generic idle conversion
cris: update comments for generic idle conversion
idle: remove cpu_idle() forward declarations
nbd: zero from and len fields in NBD_CMD_DISCONNECT.
mm: convert some level-less printks to pr_*
MAINTAINERS: adi-buildroot-devel is moderated
MAINTAINERS: add linux-api for review of API/ABI changes
mm/kmemleak-test.c: use pr_fmt for logging
fs/dlm/debug_fs.c: replace seq_printf by seq_puts
fs/dlm/lockspace.c: convert simple_str to kstr
fs/dlm/config.c: convert simple_str to kstr
mm: mark remap_file_pages() syscall as deprecated
mm: memcontrol: remove unnecessary memcg argument from soft limit functions
mm: memcontrol: clean up memcg zoneinfo lookup
mm/memblock.c: call kmemleak directly from memblock_(alloc|free)
mm/mempool.c: update the kmemleak stack trace for mempool allocations
lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations
mm: introduce kmemleak_update_trace()
mm/kmemleak.c: use %u to print ->checksum
...
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/Kconfig | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 10 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 6 | ||||
-rw-r--r-- | arch/tile/include/asm/barrier.h | 14 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops_32.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/bitops_64.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/irq.h | 6 | ||||
-rw-r--r-- | arch/tile/include/asm/thread_info.h | 3 | ||||
-rw-r--r-- | arch/tile/include/asm/topology.h | 33 | ||||
-rw-r--r-- | arch/tile/kernel/irq.c | 40 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 17 | ||||
-rw-r--r-- | arch/tile/kernel/proc.c | 4 | ||||
-rw-r--r-- | arch/tile/mm/homecache.c | 2 | ||||
-rw-r--r-- | arch/tile/mm/hugetlbpage.c | 5 |
15 files changed, 39 insertions, 116 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 85258ca43ff5..4f3006b600e3 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -125,6 +125,8 @@ config HVC_TILE config TILEGX bool "Building for TILE-Gx (64-bit) processor" + select SPARSE_IRQ + select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1ad4a1f7d42b..1b109fad9fff 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -169,16 +169,6 @@ static inline void atomic64_set(atomic64_t *v, long long n) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -/* - * We need to barrier before modifying the word, since the _atomic_xxx() - * routines just tns the lock and then read/modify/write of the word. - * But after the word is updated, the routine issues an "mf" before returning, - * and since it's a function call, we don't even need a compiler barrier. - */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_dec() do { } while (0) -#define smp_mb__after_atomic_inc() do { } while (0) #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index ad220eed05fc..7b11c5fadd42 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -105,12 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -/* Atomic dec and inc don't implement barrier, so provide them if needed. */ -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - /* Define this to indicate that cmpxchg is an efficient operation. */ #define __HAVE_ARCH_CMPXCHG diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index b5a05d050a8f..96a42ae79f4d 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -72,6 +72,20 @@ mb_incoherent(void) #define mb() fast_mb() #define iob() fast_iob() +#ifndef __tilegx__ /* 32 bit */ +/* + * We need to barrier before modifying the word, since the _atomic_xxx() + * routines just tns the lock and then read/modify/write of the word. + * But after the word is updated, the routine issues an "mf" before returning, + * and since it's a function call, we don't even need a compiler barrier. + */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() do { } while (0) +#else /* 64 bit */ +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() +#endif + #include <asm-generic/barrier.h> #endif /* !__ASSEMBLY__ */ diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h index d5a206865036..20caa346ac06 100644 --- a/arch/tile/include/asm/bitops.h +++ b/arch/tile/include/asm/bitops.h @@ -17,6 +17,7 @@ #define _ASM_TILE_BITOPS_H #include <linux/types.h> +#include <asm/barrier.h> #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h index 386865ad2f55..bbf7b666f21d 100644 --- a/arch/tile/include/asm/bitops_32.h +++ b/arch/tile/include/asm/bitops_32.h @@ -49,8 +49,8 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr) * restricted to acting on a single-word quantity. * * clear_bit() may not contain a memory barrier, so if it is used for - * locking purposes, you should call smp_mb__before_clear_bit() and/or - * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. + * locking purposes, you should call smp_mb__before_atomic() and/or + * smp_mb__after_atomic() to ensure changes are visible on other cpus. */ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) { @@ -121,10 +121,6 @@ static inline int test_and_change_bit(unsigned nr, return (_atomic_xor(addr, mask) & mask) != 0; } -/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */ -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do {} while (0) - #include <asm-generic/bitops/ext2-atomic.h> #endif /* _ASM_TILE_BITOPS_32_H */ diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h index ad34cd056085..bb1a29221fcd 100644 --- a/arch/tile/include/asm/bitops_64.h +++ b/arch/tile/include/asm/bitops_64.h @@ -32,10 +32,6 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr) __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask); } -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() - - static inline void change_bit(unsigned nr, volatile unsigned long *addr) { unsigned long mask = (1UL << (nr % BITS_PER_LONG)); diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h index 33cff9a3058b..1fe86911838b 100644 --- a/arch/tile/include/asm/irq.h +++ b/arch/tile/include/asm/irq.h @@ -18,10 +18,12 @@ #include <linux/hardirq.h> /* The hypervisor interface provides 32 IRQs. */ -#define NR_IRQS 32 +#define NR_IRQS 32 /* IRQ numbers used for linux IPIs. */ -#define IRQ_RESCHEDULE 0 +#define IRQ_RESCHEDULE 0 +/* Interrupts for dynamic allocation start at 1. Let the core allocate irq0 */ +#define NR_IRQS_LEGACY 1 #define irq_canonicalize(irq) (irq) diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 729aa107f64e..d767ff9f59b9 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -129,6 +129,7 @@ extern void _cpu_idle(void); #define TIF_MEMDIE 7 /* OOM killer at work */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SYSCALL_TRACEPOINT 9 /* syscall tracepoint instrumentation */ +#define TIF_POLLING_NRFLAG 10 /* idle is polling for TIF_NEED_RESCHED */ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) @@ -140,6 +141,7 @@ extern void _cpu_idle(void); #define _TIF_MEMDIE (1<<TIF_MEMDIE) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) +#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) /* Work to do on any return to user space. */ #define _TIF_ALLWORK_MASK \ @@ -162,7 +164,6 @@ extern void _cpu_idle(void); #ifdef __tilegx__ #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ #endif -#define TS_POLLING 0x0004 /* in idle loop but not sleeping */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ #ifndef __ASSEMBLY__ diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d15c0d8d550f..938311844233 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h @@ -44,39 +44,6 @@ static inline const struct cpumask *cpumask_of_node(int node) /* For now, use numa node -1 for global allocation. */ #define pcibus_to_node(bus) ((void)(bus), -1) -/* - * TILE architecture has many cores integrated in one processor, so we need - * setup bigger balance_interval for both CPU/NODE scheduling domains to - * reduce process scheduling costs. - */ - -/* sched_domains SD_CPU_INIT for TILE architecture */ -#define SD_CPU_INIT (struct sched_domain) { \ - .min_interval = 4, \ - .max_interval = 128, \ - .busy_factor = 64, \ - .imbalance_pct = 125, \ - .cache_nice_tries = 1, \ - .busy_idx = 2, \ - .idle_idx = 1, \ - .newidle_idx = 0, \ - .wake_idx = 0, \ - .forkexec_idx = 0, \ - \ - .flags = 1*SD_LOAD_BALANCE \ - | 1*SD_BALANCE_NEWIDLE \ - | 1*SD_BALANCE_EXEC \ - | 1*SD_BALANCE_FORK \ - | 0*SD_BALANCE_WAKE \ - | 0*SD_WAKE_AFFINE \ - | 0*SD_SHARE_CPUPOWER \ - | 0*SD_SHARE_PKG_RESOURCES \ - | 0*SD_SERIALIZE \ - , \ - .last_balance = jiffies, \ - .balance_interval = 32, \ -} - /* By definition, we create nodes based on online memory. */ #define node_has_online_mem(nid) 1 diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index 906a76bdb31d..637f2ffaa5f5 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -54,13 +54,6 @@ static DEFINE_PER_CPU(unsigned long, irq_disable_mask) */ static DEFINE_PER_CPU(int, irq_depth); -/* State for allocating IRQs on Gx. */ -#if CHIP_HAS_IPI() -static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) & - (~(1UL << IRQ_RESCHEDULE)); -static DEFINE_SPINLOCK(available_irqs_lock); -#endif - #if CHIP_HAS_IPI() /* Use SPRs to manipulate device interrupts. */ #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) @@ -278,38 +271,11 @@ int arch_show_interrupts(struct seq_file *p, int prec) return 0; } -/* - * Generic, controller-independent functions: - */ - #if CHIP_HAS_IPI() -int create_irq(void) +int arch_setup_hwirq(unsigned int irq, int node) { - unsigned long flags; - int result; - - spin_lock_irqsave(&available_irqs_lock, flags); - if (available_irqs == 0) - result = -ENOMEM; - else { - result = __ffs(available_irqs); - available_irqs &= ~(1UL << result); - dynamic_irq_init(result); - } - spin_unlock_irqrestore(&available_irqs_lock, flags); - - return result; + return irq >= NR_IRQS ? -EINVAL : 0; } -EXPORT_SYMBOL(create_irq); -void destroy_irq(unsigned int irq) -{ - unsigned long flags; - - spin_lock_irqsave(&available_irqs_lock, flags); - available_irqs |= (1UL << irq); - dynamic_irq_cleanup(irq); - spin_unlock_irqrestore(&available_irqs_lock, flags); -} -EXPORT_SYMBOL(destroy_irq); +void arch_teardown_hwirq(unsigned int irq) { } #endif diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 077b7bc437e5..e39f9c542807 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -350,10 +350,9 @@ static int tile_init_irqs(struct pci_controller *controller) int cpu; /* Ask the kernel to allocate an IRQ. */ - irq = create_irq(); - if (irq < 0) { + irq = irq_alloc_hwirq(-1); + if (!irq) { pr_err("PCI: no free irq vectors, failed for %d\n", i); - goto free_irqs; } controller->irq_intx_table[i] = irq; @@ -382,7 +381,7 @@ static int tile_init_irqs(struct pci_controller *controller) free_irqs: for (j = 0; j < i; j++) - destroy_irq(controller->irq_intx_table[j]); + irq_free_hwirq(controller->irq_intx_table[j]); return -1; } @@ -1500,9 +1499,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) int irq; int ret; - irq = create_irq(); - if (irq < 0) - return irq; + irq = irq_alloc_hwirq(-1); + if (!irq) + return -ENOSPC; /* * Since we use a 64-bit Mem-Map to accept the MSI write, we fail @@ -1601,11 +1600,11 @@ hv_msi_config_failure: /* Free mem-map */ msi_mem_map_alloc_failure: is_64_failure: - destroy_irq(irq); + irq_free_hwirq(irq); return ret; } void arch_teardown_msi_irq(unsigned int irq) { - destroy_irq(irq); + irq_free_hwirq(irq); } diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c index 681100c59fda..6829a9508649 100644 --- a/arch/tile/kernel/proc.c +++ b/arch/tile/kernel/proc.c @@ -113,7 +113,7 @@ arch_initcall(proc_tile_init); * Support /proc/sys/tile directory */ -static ctl_table unaligned_subtable[] = { +static struct ctl_table unaligned_subtable[] = { { .procname = "enabled", .data = &unaligned_fixup, @@ -138,7 +138,7 @@ static ctl_table unaligned_subtable[] = { {} }; -static ctl_table unaligned_table[] = { +static struct ctl_table unaligned_table[] = { { .procname = "unaligned_fixup", .mode = 0555, diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 004ba568d93f..33294fdc402e 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -417,7 +417,7 @@ void __homecache_free_pages(struct page *page, unsigned int order) if (put_page_testzero(page)) { homecache_change_page_home(page, order, PAGE_HOME_HASH); if (order == 0) { - free_hot_cold_page(page, 0); + free_hot_cold_page(page, false); } else { init_page_count(page); __free_pages(page, order); diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 0cb3bbaa580c..e514899e1100 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -166,11 +166,6 @@ int pud_huge(pud_t pud) return !!(pud_val(pud) & _PAGE_HUGE_PAGE); } -int pmd_huge_support(void) -{ - return 1; -} - struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) { |