summaryrefslogtreecommitdiff
path: root/arch/cris
diff options
context:
space:
mode:
Diffstat (limited to 'arch/cris')
-rw-r--r--arch/cris/arch-v10/mm/fault.c3
-rw-r--r--arch/cris/arch-v10/mm/tlb.c22
-rw-r--r--arch/cris/arch-v32/kernel/head.S1
-rw-r--r--arch/cris/include/arch-v32/arch/spinlock.h62
-rw-r--r--arch/cris/include/asm/asm-offsets.h1
-rw-r--r--arch/cris/include/asm/cacheflush.h1
-rw-r--r--arch/cris/include/asm/elf.h2
-rw-r--r--arch/cris/include/asm/socket.h2
-rw-r--r--arch/cris/kernel/asm-offsets.c1
-rw-r--r--arch/cris/kernel/irq.c4
-rw-r--r--arch/cris/kernel/sys_cris.c30
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c2
13 files changed, 42 insertions, 90 deletions
diff --git a/arch/cris/arch-v10/mm/fault.c b/arch/cris/arch-v10/mm/fault.c
index 087a2096f221..ed60588f8467 100644
--- a/arch/cris/arch-v10/mm/fault.c
+++ b/arch/cris/arch-v10/mm/fault.c
@@ -80,8 +80,7 @@ handle_mmu_bus_fault(struct pt_regs *regs)
* do_page_fault may have flushed the TLB so we have to restore
* the MMU registers.
*/
- local_save_flags(flags);
- local_irq_disable();
+ local_irq_save(flags);
pmd = (pmd_t *)(pgd + pgd_index(address));
if (pmd_none(*pmd))
goto exit;
diff --git a/arch/cris/arch-v10/mm/tlb.c b/arch/cris/arch-v10/mm/tlb.c
index 4a496e4ffacc..21d78c599bab 100644
--- a/arch/cris/arch-v10/mm/tlb.c
+++ b/arch/cris/arch-v10/mm/tlb.c
@@ -134,28 +134,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
local_irq_restore(flags);
}
-/* dump the entire TLB for debug purposes */
-
-#if 0
-void
-dump_tlb_all(void)
-{
- int i;
- unsigned long flags;
-
- printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n");
-
- local_save_flags(flags);
- local_irq_disable();
- for(i = 0; i < NUM_TLB_ENTRIES; i++) {
- *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
- printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n",
- i, *R_TLB_HI, *R_TLB_LO);
- }
- local_irq_restore(flags);
-}
-#endif
-
/*
* Initialize the context related info for a new mm_struct
* instance.
diff --git a/arch/cris/arch-v32/kernel/head.S b/arch/cris/arch-v32/kernel/head.S
index 3db478eb5155..76266f80a5f1 100644
--- a/arch/cris/arch-v32/kernel/head.S
+++ b/arch/cris/arch-v32/kernel/head.S
@@ -10,7 +10,6 @@
* The macros found in mmu_defs_asm.h uses the ## concatenation operator, so
* -traditional must not be used when assembling this file.
*/
-#include <linux/autoconf.h>
#include <arch/memmap.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/intr_vect.h>
diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h
index 367a53ea10c5..f171a6600fbc 100644
--- a/arch/cris/include/arch-v32/arch/spinlock.h
+++ b/arch/cris/include/arch-v32/arch/spinlock.h
@@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val);
extern void cris_spin_lock(void *l);
extern int cris_spin_trylock(void *l);
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
return *(volatile signed char *)(&(x)->slock) <= 0;
}
-static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ volatile ("move.d %1,%0" \
: "=m" (lock->slock) \
@@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
: "memory");
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
+ while (arch_spin_is_locked(lock))
cpu_relax();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return cris_spin_trylock((void *)&lock->slock);
}
-static inline void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void arch_spin_lock(arch_spinlock_t *lock)
{
cris_spin_lock((void *)&lock->slock);
}
static inline void
-__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
/*
@@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
*
*/
-static inline int __raw_read_can_lock(raw_rwlock_t *x)
+static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}
-static inline int __raw_write_can_lock(raw_rwlock_t *x)
+static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}
-static inline void __raw_read_lock(raw_rwlock_t *rw)
+static inline void arch_read_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_lock(raw_rwlock_t *rw)
+static inline void arch_write_lock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_read_unlock(raw_rwlock_t *rw)
+static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
rw->lock++;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline void __raw_write_unlock(raw_rwlock_t *rw)
+static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
}
-static inline int __raw_read_trylock(raw_rwlock_t *rw)
+static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock != 0) {
rw->lock--;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return ret;
}
-static inline int __raw_write_trylock(raw_rwlock_t *rw)
+static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
- __raw_spin_lock(&rw->slock);
+ arch_spin_lock(&rw->slock);
if (rw->lock == RW_LOCK_BIAS) {
rw->lock = 0;
ret = 1;
}
- __raw_spin_unlock(&rw->slock);
+ arch_spin_unlock(&rw->slock);
return 1;
}
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_ARCH_SPINLOCK_H */
diff --git a/arch/cris/include/asm/asm-offsets.h b/arch/cris/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/cris/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/cris/include/asm/cacheflush.h b/arch/cris/include/asm/cacheflush.h
index cf60e3f69f8d..36795bca605e 100644
--- a/arch/cris/include/asm/cacheflush.h
+++ b/arch/cris/include/asm/cacheflush.h
@@ -12,6 +12,7 @@
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/cris/include/asm/elf.h b/arch/cris/include/asm/elf.h
index 0f51b10b9f4f..8a3d8e2b33c1 100644
--- a/arch/cris/include/asm/elf.h
+++ b/arch/cris/include/asm/elf.h
@@ -64,8 +64,6 @@ typedef unsigned long elf_fpregset_t;
#define EF_CRIS_VARIANT_COMMON_V10_V32 0x00000004
/* End of excerpt from {binutils}/include/elf/cris.h. */
-#define USE_ELF_CORE_DUMP
-
#define ELF_EXEC_PAGESIZE 8192
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/cris/include/asm/socket.h b/arch/cris/include/asm/socket.h
index 45ec49bdb7b1..1a4a61909ca8 100644
--- a/arch/cris/include/asm/socket.h
+++ b/arch/cris/include/asm/socket.h
@@ -62,6 +62,8 @@
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
+#define SO_RXQ_OVFL 40
+
#endif /* _ASM_SOCKET_H */
diff --git a/arch/cris/kernel/asm-offsets.c b/arch/cris/kernel/asm-offsets.c
index ddd6fbbe75de..dd7b8e983221 100644
--- a/arch/cris/kernel/asm-offsets.c
+++ b/arch/cris/kernel/asm-offsets.c
@@ -1,6 +1,5 @@
#include <linux/sched.h>
#include <asm/thread_info.h>
-#include <linux/autoconf.h>
/*
* Generate definitions needed by assembly language modules.
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 0ca7d9892cc6..b5ce0724a88f 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
}
return 0;
}
diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c
index 2ad962c7e88e..c2bbb1ac98a9 100644
--- a/arch/cris/kernel/sys_cris.c
+++ b/arch/cris/kernel/sys_cris.c
@@ -26,31 +26,6 @@
#include <asm/uaccess.h>
#include <asm/segment.h>
-/* common code for old and new mmaps */
-static inline long
-do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long fd, unsigned long pgoff)
-{
- int error = -EBADF;
- struct file * file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
- if (file)
- fput(file);
-out:
- return error;
-}
-
asmlinkage unsigned long old_mmap(unsigned long __user *args)
{
unsigned long buffer[6];
@@ -63,7 +38,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args)
if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
goto out;
- err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3],
+ err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
buffer[4], buffer[5] >> PAGE_SHIFT);
out:
return err;
@@ -73,7 +48,8 @@ asmlinkage long
sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
unsigned long flags, unsigned long fd, unsigned long pgoff)
{
- return do_mmap2(addr, len, prot, flags, fd, pgoff);
+ /* bug(?): 8Kb pages here */
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/*
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index bbfda67d2907..d49d17d2a14f 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -8,7 +8,6 @@
* the kernel has booted.
*/
-#include <linux/autoconf.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/page.h>
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 4a7cdd9ea1ee..380df1a73a6e 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -209,7 +209,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
/* Are we prepared to handle this kernel fault?
*
* (The kernel has valid exception-points in the source
- * when it acesses user-memory. When it fails in one
+ * when it accesses user-memory. When it fails in one
* of those points, we find it in a table and do a jump
* to some fixup code that loads an appropriate error
* code)