summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/arch_timer.h19
-rw-r--r--arch/arm/include/asm/cacheflush.h6
-rw-r--r--arch/arm/include/asm/cmpxchg.h73
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/device.h4
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/include/asm/dma-mapping.h407
-rw-r--r--arch/arm/include/asm/glue-df.h8
-rw-r--r--arch/arm/include/asm/glue-proc.h18
-rw-r--r--arch/arm/include/asm/hardware/clps7111.h184
-rw-r--r--arch/arm/include/asm/hardware/cs89712.h49
-rw-r--r--arch/arm/include/asm/hardware/ep7211.h40
-rw-r--r--arch/arm/include/asm/hardware/ep7212.h83
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/hardware/pl080.h2
-rw-r--r--arch/arm/include/asm/hardware/uengine.h62
-rw-r--r--arch/arm/include/asm/kvm_para.h1
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/mach/pci.h17
-rw-r--r--arch/arm/include/asm/mach/time.h5
-rw-r--r--arch/arm/include/asm/mmu.h7
-rw-r--r--arch/arm/include/asm/mmu_context.h104
-rw-r--r--arch/arm/include/asm/page.h9
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/processor.h5
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/syscall.h93
-rw-r--r--arch/arm/include/asm/thread_info.h1
-rw-r--r--arch/arm/include/asm/tlbflush.h21
30 files changed, 393 insertions, 885 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 000000000000..ed2e95d46e29
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,19 @@
+#ifndef __ASMARM_ARCH_TIMER_H
+#define __ASMARM_ARCH_TIMER_H
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+int arch_timer_of_register(void);
+int arch_timer_sched_clock_init(void);
+#else
+static inline int arch_timer_of_register(void)
+{
+ return -ENXIO;
+}
+
+static inline int arch_timer_sched_clock_init(void)
+{
+ return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5d8d5c72682..004c1bc95d2b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -101,7 +101,7 @@ struct cpu_cache_fns {
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
void (*coherent_kern_range)(unsigned long, unsigned long);
- void (*coherent_user_range)(unsigned long, unsigned long);
+ int (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_map_area)(const void *, size_t, int);
@@ -142,7 +142,7 @@ extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
-extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
/*
@@ -249,7 +249,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(vma,start,end) \
+#define flush_cache_user_range(start,end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
/*
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index d41d7cbf0ada..7eb18c1d8d6c 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -229,66 +229,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \
sizeof(*(ptr))))
-#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
-
-/*
- * Note : ARMv7-M (currently unsupported by Linux) does not support
- * ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
- * not be allowed to use __cmpxchg64.
- */
-static inline unsigned long long __cmpxchg64(volatile void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- register unsigned long long oldval asm("r0");
- register unsigned long long __old asm("r2") = old;
- register unsigned long long __new asm("r4") = new;
- unsigned long res;
-
- do {
- asm volatile(
- " @ __cmpxchg8\n"
- " ldrexd %1, %H1, [%2]\n"
- " mov %0, #0\n"
- " teq %1, %3\n"
- " teqeq %H1, %H3\n"
- " strexdeq %0, %4, %H4, [%2]\n"
- : "=&r" (res), "=&r" (oldval)
- : "r" (ptr), "Ir" (__old), "r" (__new)
- : "memory", "cc");
- } while (res);
-
- return oldval;
-}
-
-static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
- unsigned long long old,
- unsigned long long new)
-{
- unsigned long long ret;
-
- smp_mb();
- ret = __cmpxchg64(ptr, old, new);
- smp_mb();
-
- return ret;
-}
-
-#define cmpxchg64(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr,o,n) \
- ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
-
-#else /* min ARCH = ARMv6 */
-
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#endif
+#define cmpxchg64(ptr, o, n) \
+ ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
+ atomic64_t, \
+ counter), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#define cmpxchg64_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
+ local64_t, \
+ a), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 793968173bef..d797223b39d5 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -16,7 +16,6 @@
struct cpuinfo_arm {
struct cpu cpu;
#ifdef CONFIG_SMP
- struct task_struct *idle;
unsigned int loops_per_jiffy;
#endif
};
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b05..b69c0d3285f8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
#define ASMARM_DEVICE_H
struct dev_archdata {
+ struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..3ed37b4d93da
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..799b09409fad
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4b..bbef15d04890 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
+#define DMA_ERROR_CODE (~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ BUG_ON(!dev);
+ dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * The DMA API is built upon the notion of "buffer ownership". A buffer
- * is either exclusively owned by the CPU (and therefore may be accessed
- * by it) or exclusively owned by the DMA device. These helper functions
- * represent the transitions between these two ownership states.
- *
- * Note, however, that on later ARMs, this notion does not work due to
- * speculative prefetches. We model our approach on the assumption that
- * the CPU does do speculative prefetches, which means we clean caches
- * before transfers and delay cache invalidation until transfer completion.
- *
- * Private support functions: these are not part of the API and are
- * liable to change. Drivers must not use these.
- */
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_cpu_to_dev(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
- enum dma_data_direction dir)
-{
- extern void ___dma_single_dev_to_cpu(const void *, size_t,
- enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
- size_t size, enum dma_data_direction dir)
-{
- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
- size_t, enum dma_data_direction);
-
- if (!arch_is_coherent())
- ___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
-/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return dma_addr == ~0;
+ return dma_addr == DMA_ERROR_CODE;
}
/*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
+extern int dma_supported(struct device *dev, u64 mask);
+
/**
- * dma_alloc_coherent - allocate consistent memory for DMA
+ * arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: required memory size
* @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
*
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
+ * Allocate some memory for a device for performing DMA. This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
*/
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+ BUG_ON(!ops);
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+ * arm_dma_free - free memory allocated by arm_dma_alloc
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @size: size of memory originally requested in dma_alloc_coherent
* @cpu_addr: CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
+ * arm_dma_alloc().
*
* References to memory and mappings associated with cpu_addr/handle
* during and after this call executing are illegal.
*/
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
/**
- * dma_mmap_coherent - map a coherent DMA allocation into user space
+ * arm_dma_mmap - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
* @handle: device-view address returned from dma_alloc_coherent
* @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
*
* Map a coherent DMA buffer previously allocated by dma_alloc_coherent
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-/**
- * dma_alloc_writecombine - allocate writecombining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
- gfp_t);
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
- dma_free_coherent(dev,size,cpu_addr,handle)
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t);
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
/*
* This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
*/
extern void __init init_consistent_dma_size(unsigned long size);
-
-#ifdef CONFIG_DMABOUNCE
/*
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/
extern void dmabounce_unregister_dev(struct device *);
-/*
- * The DMA API, implemented by dmabounce.c. See below for descriptions.
- */
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
- unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-
-/*
- * Private functions
- */
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
- size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- __dma_page_cpu_to_dev(page, offset, size, dir);
- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
- handle & ~PAGE_MASK, size, dir);
-}
-#endif /* CONFIG_DMABOUNCE */
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir)
-{
- unsigned long offset;
- struct page *page;
- dma_addr_t addr;
-
- BUG_ON(!virt_addr_valid(cpu_addr));
- BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
- BUG_ON(!valid_dma_direction(dir));
-
- page = virt_to_page(cpu_addr);
- offset = (unsigned long)cpu_addr & ~PAGE_MASK;
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
- return addr;
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page().
- */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir)
-{
- dma_addr_t addr;
-
- BUG_ON(!valid_dma_direction(dir));
-
- addr = __dma_map_page(dev, page, offset, size, dir);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_single)
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, true);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * Unmap a page streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_page() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- debug_dma_unmap_page(dev, handle, size, dir, false);
- __dma_unmap_page(dev, handle, size, dir);
-}
-
-/**
- * dma_sync_single_range_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @offset: offset of region to start sync
- * @size: size of region to sync
- * @dir: DMA transfer direction (same as passed to dma_map_single)
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first the perform a dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
- return;
-
- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t handle, unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-
- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
- return;
-
- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
/*
* The scatter list versions of the above methods.
*/
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+ enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/glue-df.h b/arch/arm/include/asm/glue-df.h
index 354d571e8bcc..8cacbcda76da 100644
--- a/arch/arm/include/asm/glue-df.h
+++ b/arch/arm/include/asm/glue-df.h
@@ -31,14 +31,6 @@
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
-#if defined(CONFIG_CPU_ARM610)
-# ifdef CPU_DABORT_HANDLER
-# define MULTI_DABORT 1
-# else
-# define CPU_DABORT_HANDLER cpu_arm6_data_abort
-# endif
-#endif
-
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index e2be7f142668..ac1dd54724b6 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -23,15 +23,6 @@
* CPU_NAME - the prefix for CPU related functions
*/
-#ifdef CONFIG_CPU_ARM610
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm6
-# endif
-#endif
-
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
@@ -41,15 +32,6 @@
# endif
#endif
-#ifdef CONFIG_CPU_ARM710
-# ifdef CPU_NAME
-# undef MULTI_CPU
-# define MULTI_CPU
-# else
-# define CPU_NAME cpu_arm7
-# endif
-#endif
-
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
diff --git a/arch/arm/include/asm/hardware/clps7111.h b/arch/arm/include/asm/hardware/clps7111.h
deleted file mode 100644
index 44477225aed6..000000000000
--- a/arch/arm/include/asm/hardware/clps7111.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/clps7111.h
- *
- * This file contains the hardware definitions of the CLPS7111 internal
- * registers.
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_HARDWARE_CLPS7111_H
-#define __ASM_HARDWARE_CLPS7111_H
-
-#define CLPS7111_PHYS_BASE (0x80000000)
-
-#ifndef __ASSEMBLY__
-#define clps_readb(off) __raw_readb(CLPS7111_BASE + (off))
-#define clps_readw(off) __raw_readw(CLPS7111_BASE + (off))
-#define clps_readl(off) __raw_readl(CLPS7111_BASE + (off))
-#define clps_writeb(val,off) __raw_writeb(val, CLPS7111_BASE + (off))
-#define clps_writew(val,off) __raw_writew(val, CLPS7111_BASE + (off))
-#define clps_writel(val,off) __raw_writel(val, CLPS7111_BASE + (off))
-#endif
-
-#define PADR (0x0000)
-#define PBDR (0x0001)
-#define PDDR (0x0003)
-#define PADDR (0x0040)
-#define PBDDR (0x0041)
-#define PDDDR (0x0043)
-#define PEDR (0x0080)
-#define PEDDR (0x00c0)
-#define SYSCON1 (0x0100)
-#define SYSFLG1 (0x0140)
-#define MEMCFG1 (0x0180)
-#define MEMCFG2 (0x01c0)
-#define DRFPR (0x0200)
-#define INTSR1 (0x0240)
-#define INTMR1 (0x0280)
-#define LCDCON (0x02c0)
-#define TC1D (0x0300)
-#define TC2D (0x0340)
-#define RTCDR (0x0380)
-#define RTCMR (0x03c0)
-#define PMPCON (0x0400)
-#define CODR (0x0440)
-#define UARTDR1 (0x0480)
-#define UBRLCR1 (0x04c0)
-#define SYNCIO (0x0500)
-#define PALLSW (0x0540)
-#define PALMSW (0x0580)
-#define STFCLR (0x05c0)
-#define BLEOI (0x0600)
-#define MCEOI (0x0640)
-#define TEOI (0x0680)
-#define TC1EOI (0x06c0)
-#define TC2EOI (0x0700)
-#define RTCEOI (0x0740)
-#define UMSEOI (0x0780)
-#define COEOI (0x07c0)
-#define HALT (0x0800)
-#define STDBY (0x0840)
-
-#define FBADDR (0x1000)
-#define SYSCON2 (0x1100)
-#define SYSFLG2 (0x1140)
-#define INTSR2 (0x1240)
-#define INTMR2 (0x1280)
-#define UARTDR2 (0x1480)
-#define UBRLCR2 (0x14c0)
-#define SS2DR (0x1500)
-#define SRXEOF (0x1600)
-#define SS2POP (0x16c0)
-#define KBDEOI (0x1700)
-
-/* common bits: SYSCON1 / SYSCON2 */
-#define SYSCON_UARTEN (1 << 8)
-
-#define SYSCON1_KBDSCAN(x) ((x) & 15)
-#define SYSCON1_KBDSCANMASK (15)
-#define SYSCON1_TC1M (1 << 4)
-#define SYSCON1_TC1S (1 << 5)
-#define SYSCON1_TC2M (1 << 6)
-#define SYSCON1_TC2S (1 << 7)
-#define SYSCON1_UART1EN SYSCON_UARTEN
-#define SYSCON1_BZTOG (1 << 9)
-#define SYSCON1_BZMOD (1 << 10)
-#define SYSCON1_DBGEN (1 << 11)
-#define SYSCON1_LCDEN (1 << 12)
-#define SYSCON1_CDENTX (1 << 13)
-#define SYSCON1_CDENRX (1 << 14)
-#define SYSCON1_SIREN (1 << 15)
-#define SYSCON1_ADCKSEL(x) (((x) & 3) << 16)
-#define SYSCON1_ADCKSEL_MASK (3 << 16)
-#define SYSCON1_EXCKEN (1 << 18)
-#define SYSCON1_WAKEDIS (1 << 19)
-#define SYSCON1_IRTXM (1 << 20)
-
-/* common bits: SYSFLG1 / SYSFLG2 */
-#define SYSFLG_UBUSY (1 << 11)
-#define SYSFLG_URXFE (1 << 22)
-#define SYSFLG_UTXFF (1 << 23)
-
-#define SYSFLG1_MCDR (1 << 0)
-#define SYSFLG1_DCDET (1 << 1)
-#define SYSFLG1_WUDR (1 << 2)
-#define SYSFLG1_WUON (1 << 3)
-#define SYSFLG1_CTS (1 << 8)
-#define SYSFLG1_DSR (1 << 9)
-#define SYSFLG1_DCD (1 << 10)
-#define SYSFLG1_UBUSY SYSFLG_UBUSY
-#define SYSFLG1_NBFLG (1 << 12)
-#define SYSFLG1_RSTFLG (1 << 13)
-#define SYSFLG1_PFFLG (1 << 14)
-#define SYSFLG1_CLDFLG (1 << 15)
-#define SYSFLG1_URXFE SYSFLG_URXFE
-#define SYSFLG1_UTXFF SYSFLG_UTXFF
-#define SYSFLG1_CRXFE (1 << 24)
-#define SYSFLG1_CTXFF (1 << 25)
-#define SYSFLG1_SSIBUSY (1 << 26)
-#define SYSFLG1_ID (1 << 29)
-
-#define SYSFLG2_SSRXOF (1 << 0)
-#define SYSFLG2_RESVAL (1 << 1)
-#define SYSFLG2_RESFRM (1 << 2)
-#define SYSFLG2_SS2RXFE (1 << 3)
-#define SYSFLG2_SS2TXFF (1 << 4)
-#define SYSFLG2_SS2TXUF (1 << 5)
-#define SYSFLG2_CKMODE (1 << 6)
-#define SYSFLG2_UBUSY SYSFLG_UBUSY
-#define SYSFLG2_URXFE SYSFLG_URXFE
-#define SYSFLG2_UTXFF SYSFLG_UTXFF
-
-#define LCDCON_GSEN (1 << 30)
-#define LCDCON_GSMD (1 << 31)
-
-#define SYSCON2_SERSEL (1 << 0)
-#define SYSCON2_KBD6 (1 << 1)
-#define SYSCON2_DRAMZ (1 << 2)
-#define SYSCON2_KBWEN (1 << 3)
-#define SYSCON2_SS2TXEN (1 << 4)
-#define SYSCON2_PCCARD1 (1 << 5)
-#define SYSCON2_PCCARD2 (1 << 6)
-#define SYSCON2_SS2RXEN (1 << 7)
-#define SYSCON2_UART2EN SYSCON_UARTEN
-#define SYSCON2_SS2MAEN (1 << 9)
-#define SYSCON2_OSTB (1 << 12)
-#define SYSCON2_CLKENSL (1 << 13)
-#define SYSCON2_BUZFREQ (1 << 14)
-
-/* common bits: UARTDR1 / UARTDR2 */
-#define UARTDR_FRMERR (1 << 8)
-#define UARTDR_PARERR (1 << 9)
-#define UARTDR_OVERR (1 << 10)
-
-/* common bits: UBRLCR1 / UBRLCR2 */
-#define UBRLCR_BAUD_MASK ((1 << 12) - 1)
-#define UBRLCR_BREAK (1 << 12)
-#define UBRLCR_PRTEN (1 << 13)
-#define UBRLCR_EVENPRT (1 << 14)
-#define UBRLCR_XSTOP (1 << 15)
-#define UBRLCR_FIFOEN (1 << 16)
-#define UBRLCR_WRDLEN5 (0 << 17)
-#define UBRLCR_WRDLEN6 (1 << 17)
-#define UBRLCR_WRDLEN7 (2 << 17)
-#define UBRLCR_WRDLEN8 (3 << 17)
-#define UBRLCR_WRDLEN_MASK (3 << 17)
-
-#define SYNCIO_SMCKEN (1 << 13)
-#define SYNCIO_TXFRMEN (1 << 14)
-
-#endif /* __ASM_HARDWARE_CLPS7111_H */
diff --git a/arch/arm/include/asm/hardware/cs89712.h b/arch/arm/include/asm/hardware/cs89712.h
deleted file mode 100644
index f75626933e94..000000000000
--- a/arch/arm/include/asm/hardware/cs89712.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/cs89712.h
- *
- * This file contains the hardware definitions of the CS89712
- * additional internal registers.
- *
- * Copyright (C) 2001 Thomas Gleixner autronix automation <gleixner@autronix.de>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_HARDWARE_CS89712_H
-#define __ASM_HARDWARE_CS89712_H
-
-/*
-* CS89712 additional registers
-*/
-
-#define PCDR 0x0002 /* Port C Data register ---------------------------- */
-#define PCDDR 0x0042 /* Port C Data Direction register ------------------ */
-#define SDCONF 0x2300 /* SDRAM Configuration register ---------------------*/
-#define SDRFPR 0x2340 /* SDRAM Refresh period register --------------------*/
-
-#define SDCONF_ACTIVE (1 << 10)
-#define SDCONF_CLKCTL (1 << 9)
-#define SDCONF_WIDTH_4 (0 << 7)
-#define SDCONF_WIDTH_8 (1 << 7)
-#define SDCONF_WIDTH_16 (2 << 7)
-#define SDCONF_WIDTH_32 (3 << 7)
-#define SDCONF_SIZE_16 (0 << 5)
-#define SDCONF_SIZE_64 (1 << 5)
-#define SDCONF_SIZE_128 (2 << 5)
-#define SDCONF_SIZE_256 (3 << 5)
-#define SDCONF_CASLAT_2 (2)
-#define SDCONF_CASLAT_3 (3)
-
-#endif /* __ASM_HARDWARE_CS89712_H */
diff --git a/arch/arm/include/asm/hardware/ep7211.h b/arch/arm/include/asm/hardware/ep7211.h
deleted file mode 100644
index 654d5f625c49..000000000000
--- a/arch/arm/include/asm/hardware/ep7211.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/ep7211.h
- *
- * This file contains the hardware definitions of the EP7211 internal
- * registers.
- *
- * Copyright (C) 2001 Blue Mug, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_HARDWARE_EP7211_H
-#define __ASM_HARDWARE_EP7211_H
-
-#include <asm/hardware/clps7111.h>
-
-/*
- * define EP7211_BASE to be the base address of the region
- * you want to access.
- */
-
-#define EP7211_PHYS_BASE (0x80000000)
-
-/*
- * XXX miket@bluemug.com: need to introduce EP7211 registers (those not
- * present in 7212) here.
- */
-
-#endif /* __ASM_HARDWARE_EP7211_H */
diff --git a/arch/arm/include/asm/hardware/ep7212.h b/arch/arm/include/asm/hardware/ep7212.h
deleted file mode 100644
index 3b43bbeaf1db..000000000000
--- a/arch/arm/include/asm/hardware/ep7212.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/ep7212.h
- *
- * This file contains the hardware definitions of the EP7212 internal
- * registers.
- *
- * Copyright (C) 2000 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef __ASM_HARDWARE_EP7212_H
-#define __ASM_HARDWARE_EP7212_H
-
-/*
- * define EP7212_BASE to be the base address of the region
- * you want to access.
- */
-
-#define EP7212_PHYS_BASE (0x80000000)
-
-#ifndef __ASSEMBLY__
-#define ep_readl(off) __raw_readl(EP7212_BASE + (off))
-#define ep_writel(val,off) __raw_writel(val, EP7212_BASE + (off))
-#endif
-
-/*
- * These registers are specific to the EP7212 only
- */
-#define DAIR 0x2000
-#define DAIR0 0x2040
-#define DAIDR1 0x2080
-#define DAIDR2 0x20c0
-#define DAISR 0x2100
-#define SYSCON3 0x2200
-#define INTSR3 0x2240
-#define INTMR3 0x2280
-#define LEDFLSH 0x22c0
-
-#define DAIR_DAIEN (1 << 16)
-#define DAIR_ECS (1 << 17)
-#define DAIR_LCTM (1 << 19)
-#define DAIR_LCRM (1 << 20)
-#define DAIR_RCTM (1 << 21)
-#define DAIR_RCRM (1 << 22)
-#define DAIR_LBM (1 << 23)
-
-#define DAIDR2_FIFOEN (1 << 15)
-#define DAIDR2_FIFOLEFT (0x0d << 16)
-#define DAIDR2_FIFORIGHT (0x11 << 16)
-
-#define DAISR_RCTS (1 << 0)
-#define DAISR_RCRS (1 << 1)
-#define DAISR_LCTS (1 << 2)
-#define DAISR_LCRS (1 << 3)
-#define DAISR_RCTU (1 << 4)
-#define DAISR_RCRO (1 << 5)
-#define DAISR_LCTU (1 << 6)
-#define DAISR_LCRO (1 << 7)
-#define DAISR_RCNF (1 << 8)
-#define DAISR_RCNE (1 << 9)
-#define DAISR_LCNF (1 << 10)
-#define DAISR_LCNE (1 << 11)
-#define DAISR_FIFO (1 << 12)
-
-#define SYSCON3_ADCCON (1 << 0)
-#define SYSCON3_DAISEL (1 << 3)
-#define SYSCON3_ADCCKNSEN (1 << 4)
-#define SYSCON3_FASTWAKE (1 << 8)
-#define SYSCON3_DAIEN (1 << 9)
-
-#endif /* __ASM_HARDWARE_EP7212_H */
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 73f84fa4f366..d36a73d7c0e8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -110,6 +110,6 @@ extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
extern void it8152_init_irq(void);
extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
-extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys);
+extern struct pci_ops it8152_ops;
#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index 33c78d7af2e1..4eea2107214b 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -102,6 +102,8 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
+#define PL080N_CONFIG_ITPROT (1 << 20)
+#define PL080N_CONFIG_SECPROT (1 << 19)
#define PL080_CONFIG_HALT (1 << 18)
#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
#define PL080_CONFIG_LOCK (1 << 16)
diff --git a/arch/arm/include/asm/hardware/uengine.h b/arch/arm/include/asm/hardware/uengine.h
deleted file mode 100644
index b442d65c6593..000000000000
--- a/arch/arm/include/asm/hardware/uengine.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Generic library functions for the microengines found on the Intel
- * IXP2000 series of network processors.
- *
- * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
- * Dedicated to Marija Kulikova.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of the
- * License, or (at your option) any later version.
- */
-
-#ifndef __IXP2000_UENGINE_H
-#define __IXP2000_UENGINE_H
-
-extern u32 ixp2000_uengine_mask;
-
-struct ixp2000_uengine_code
-{
- u32 cpu_model_bitmask;
- u8 cpu_min_revision;
- u8 cpu_max_revision;
-
- u32 uengine_parameters;
-
- struct ixp2000_reg_value {
- int reg;
- u32 value;
- } *initial_reg_values;
-
- int num_insns;
- u8 *insns;
-};
-
-u32 ixp2000_uengine_csr_read(int uengine, int offset);
-void ixp2000_uengine_csr_write(int uengine, int offset, u32 value);
-void ixp2000_uengine_reset(u32 uengine_mask);
-void ixp2000_uengine_set_mode(int uengine, u32 mode);
-void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns);
-void ixp2000_uengine_init_context(int uengine, int context, int pc);
-void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask);
-void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask);
-int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c);
-
-#define IXP2000_UENGINE_8_CONTEXTS 0x00000000
-#define IXP2000_UENGINE_4_CONTEXTS 0x80000000
-#define IXP2000_UENGINE_PRN_UPDATE_EVERY 0x40000000
-#define IXP2000_UENGINE_PRN_UPDATE_ON_ACCESS 0x00000000
-#define IXP2000_UENGINE_NN_FROM_SELF 0x00100000
-#define IXP2000_UENGINE_NN_FROM_PREVIOUS 0x00000000
-#define IXP2000_UENGINE_ASSERT_EMPTY_AT_3 0x000c0000
-#define IXP2000_UENGINE_ASSERT_EMPTY_AT_2 0x00080000
-#define IXP2000_UENGINE_ASSERT_EMPTY_AT_1 0x00040000
-#define IXP2000_UENGINE_ASSERT_EMPTY_AT_0 0x00000000
-#define IXP2000_UENGINE_LM_ADDR1_GLOBAL 0x00020000
-#define IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT 0x00000000
-#define IXP2000_UENGINE_LM_ADDR0_GLOBAL 0x00010000
-#define IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT 0x00000000
-
-
-#endif
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/arm/include/asm/kvm_para.h
@@ -0,0 +1 @@
+#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f3654bf54..a6efcdd6fd25 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,7 @@ struct map_desc {
#define MT_MEMORY_DTCM 12
#define MT_MEMORY_ITCM 13
#define MT_MEMORY_SO 14
+#define MT_MEMORY_DMA_READY 15
#ifdef CONFIG_MMU
extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index d943b7d20f11..26c511fddf8f 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -12,13 +12,14 @@
#define __ASM_MACH_PCI_H
struct pci_sys_data;
+struct pci_ops;
struct pci_bus;
struct hw_pci {
#ifdef CONFIG_PCI_DOMAINS
int domain;
#endif
- struct list_head buses;
+ struct pci_ops *ops;
int nr_controllers;
int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
@@ -45,16 +46,10 @@ struct pci_sys_data {
u8 (*swizzle)(struct pci_dev *, u8 *);
/* IRQ mapping */
int (*map_irq)(const struct pci_dev *, u8, u8);
- struct hw_pci *hw;
void *private_data; /* platform controller private data */
};
/*
- * This is the standard PCI-PCI bridge swizzling algorithm.
- */
-#define pci_std_swizzle pci_common_swizzle
-
-/*
* Call this with your hw_pci struct to initialise the PCI system.
*/
void pci_common_init(struct hw_pci *);
@@ -62,22 +57,22 @@ void pci_common_init(struct hw_pci *);
/*
* PCI controllers
*/
+extern struct pci_ops iop3xx_ops;
extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
extern void iop3xx_pci_preinit(void);
extern void iop3xx_pci_preinit_cond(void);
+extern struct pci_ops dc21285_ops;
extern int dc21285_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
extern void dc21285_preinit(void);
extern void dc21285_postinit(void);
+extern struct pci_ops via82c505_ops;
extern int via82c505_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
extern void via82c505_init(void *sysdata);
+extern struct pci_ops pci_v3_ops;
extern int pci_v3_setup(int nr, struct pci_sys_data *);
-extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
extern void pci_v3_preinit(void);
extern void pci_v3_postinit(void);
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index f73c908b7fa0..6ca945f534ab 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -42,4 +42,9 @@ struct sys_timer {
extern void timer_tick(void);
+struct timespec;
+typedef void (*clock_access_fn)(struct timespec *);
+extern int register_persistent_clock(clock_access_fn read_boot,
+ clock_access_fn read_persistent);
+
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a297e4..14965658a923 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -34,11 +34,4 @@ typedef struct {
#endif
-/*
- * switch_mm() may do a full cache flush over the context switch,
- * so enable interrupts over the context switch to avoid high
- * latency.
- */
-#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
-
#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c..0306bc642c0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
-#ifdef CONFIG_SMP
-DECLARE_PER_CPU(struct mm_struct *, current_mm);
-#endif
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
+void cpu_set_reserved_ttbr0(void);
-static inline void check_context(struct mm_struct *mm)
+static inline void switch_new_context(struct mm_struct *mm)
{
- /*
- * This code is executed with interrupts enabled. Therefore,
- * mm->context.id cannot be updated to the latest ASID version
- * on a different CPU (and condition below not triggered)
- * without first getting an IPI to reset the context. The
- * alternative is to take a read_lock on mm->context.id_lock
- * (after changing its type to rwlock_t).
- */
- if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
- __new_context(mm);
+ unsigned long flags;
+
+ __new_context(mm);
+
+ local_irq_save(flags);
+ cpu_switch_mm(mm->pgd, mm);
+ local_irq_restore(flags);
+}
+static inline void check_and_switch_context(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
+
+ /*
+ * Required during context switch to avoid speculative page table
+ * walking with the wrong TTBR.
+ */
+ cpu_set_reserved_ttbr0();
+
+ if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
+ /*
+ * The ASID is from the current generation, just switch to the
+ * new pgd. This condition is only true for calls from
+ * context_switch() and interrupts are already disabled.
+ */
+ cpu_switch_mm(mm->pgd, mm);
+ else if (irqs_disabled())
+ /*
+ * Defer the new ASID allocation until after the context
+ * switch critical region since __new_context() cannot be
+ * called with interrupts disabled (it sends IPIs).
+ */
+ set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ else
+ /*
+ * That is a direct call to switch_mm() or activate_mm() with
+ * interrupts enabled and a new context.
+ */
+ switch_new_context(mm);
}
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
-#else
-
-static inline void check_context(struct mm_struct *mm)
+#define finish_arch_post_lock_switch \
+ finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
{
+ if (test_and_clear_thread_flag(TIF_SWITCH_MM))
+ switch_new_context(current->mm);
+}
+
+#else /* !CONFIG_CPU_HAS_ASID */
+
#ifdef CONFIG_MMU
+
+static inline void check_and_switch_context(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
-#endif
+
+ if (irqs_disabled())
+ /*
+ * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+ * high interrupt latencies, defer the call and continue
+ * running with the old mm. Since we only support UP systems
+ * on non-ASID CPUs, the old mm will remain valid until the
+ * finish_arch_post_lock_switch() call.
+ */
+ set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ else
+ cpu_switch_mm(mm->pgd, mm);
}
+#define finish_arch_post_lock_switch \
+ finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+ if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+ struct mm_struct *mm = current->mm;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+}
+
+#endif /* CONFIG_MMU */
+
#define init_new_context(tsk,mm) 0
-#endif
+#endif /* CONFIG_CPU_HAS_ASID */
#define destroy_context(mm) do { } while(0)
@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all();
#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
-#ifdef CONFIG_SMP
- struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
- *crt_mm = next;
-#endif
- check_context(next);
- cpu_switch_mm(next->pgd, next);
+ check_and_switch_context(next, tsk);
if (cache_is_vivt())
cpumask_clear_cpu(cpu, mm_cpumask(prev));
}
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 5838361c48b3..ecf901902e44 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -34,7 +34,6 @@
* processor(s) we're building for.
*
* We have the following to choose from:
- * v3 - ARMv3
* v4wt - ARMv4 with writethrough cache, without minicache
* v4wb - ARMv4 with writeback cache, without minicache
* v4_mc - ARMv4 with minicache
@@ -44,14 +43,6 @@
#undef _USER
#undef MULTI_USER
-#ifdef CONFIG_CPU_COPY_V3
-# ifdef _USER
-# define MULTI_USER 1
-# else
-# define _USER v3
-# endif
-#endif
-
#ifdef CONFIG_CPU_COPY_V4WT
# ifdef _USER
# define MULTI_USER 1
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 759af70f9a0a..b24903549d1c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -69,8 +69,6 @@
*/
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
-#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
-#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 5ac8d3d3e025..99afa7498260 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -77,9 +77,6 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
@@ -88,8 +85,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
-void cpu_idle_wait(void);
-
/*
* Create a new kernel thread
*/
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 451808ba1211..355ece523f41 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -249,6 +249,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
return regs->ARM_sp;
}
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->ARM_sp;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
new file mode 100644
index 000000000000..c334a23ddf75
--- /dev/null
+++ b/arch/arm/include/asm/syscall.h
@@ -0,0 +1,93 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_ARM_SYSCALL_H
+#define _ASM_ARM_SYSCALL_H
+
+#include <linux/err.h>
+
+extern const unsigned long sys_call_table[];
+
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return task_thread_info(task)->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ARM_r0;
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ARM_r0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ARM_r0 = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 7
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ if (i + n > SYSCALL_MAX_ARGS) {
+ unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
+ unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ memset(args_bad, 0, n_bad * sizeof(args[0]));
+ n = SYSCALL_MAX_ARGS - i;
+ }
+
+ if (i == 0) {
+ args[0] = regs->ARM_ORIG_r0;
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ if (i + n > SYSCALL_MAX_ARGS) {
+ pr_warning("%s called with max args %d, handling only %d\n",
+ __func__, i + n, SYSCALL_MAX_ARGS);
+ n = SYSCALL_MAX_ARGS - i;
+ }
+
+ if (i == 0) {
+ regs->ARM_ORIG_r0 = args[0];
+ args++;
+ i++;
+ n--;
+ }
+
+ memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
+}
+
+#endif /* _ASM_ARM_SYSCALL_H */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 0f04d84582e1..68388eb4946b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -153,6 +153,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
#define TIF_SECCOMP 21
+#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 85fe61e73202..6e924d3a77eb 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -65,21 +65,6 @@
#define MULTI_TLB 1
#endif
-#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
-
-#ifdef CONFIG_CPU_TLB_V3
-# define v3_possible_flags v3_tlb_flags
-# define v3_always_flags v3_tlb_flags
-# ifdef _TLB
-# define MULTI_TLB 1
-# else
-# define _TLB v3
-# endif
-#else
-# define v3_possible_flags 0
-# define v3_always_flags (-1UL)
-#endif
-
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#ifdef CONFIG_CPU_TLB_V4WT
@@ -298,8 +283,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
-#define possible_tlb_flags (v3_possible_flags | \
- v4_possible_flags | \
+#define possible_tlb_flags (v4_possible_flags | \
v4wbi_possible_flags | \
fr_possible_flags | \
v4wb_possible_flags | \
@@ -307,8 +291,7 @@ extern struct cpu_tlb_fns cpu_tlb;
v6wbi_possible_flags | \
v7wbi_possible_flags)
-#define always_tlb_flags (v3_always_flags & \
- v4_always_flags & \
+#define always_tlb_flags (v4_always_flags & \
v4wbi_always_flags & \
fr_always_flags & \
v4wb_always_flags & \