summaryrefslogtreecommitdiff
path: root/arch/metag/kernel
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2012-10-09 10:54:51 +0100
committerJames Hogan <james.hogan@imgtec.com>2013-03-02 20:09:49 +0000
commit26025bbfbba33a9425be1b89eccb4664ea4c17b6 (patch)
treef8a7f754525812c4d758b638d9bcade297af67da /arch/metag/kernel
parent5698c50d9da4ab2f57d98c64ea97675dcaf2a608 (diff)
metag: System Calls
Add metag system call and gateway page interfaces. The metag architecture port uses the generic system call numbers from asm-generic/unistd.h, as well as a user gateway page mapped at 0x6ffff000 which contains fast atomic primitives (depending on SMP) and a fast method of accessing TLS data. System calls use the SWITCH instruction with the immediate 0x440001 to signal a system call. Signed-off-by: James Hogan <james.hogan@imgtec.com>
Diffstat (limited to 'arch/metag/kernel')
-rw-r--r--arch/metag/kernel/sys_metag.c180
-rw-r--r--arch/metag/kernel/user_gateway.S97
2 files changed, 277 insertions, 0 deletions
diff --git a/arch/metag/kernel/sys_metag.c b/arch/metag/kernel/sys_metag.c
new file mode 100644
index 000000000000..efe833a452f7
--- /dev/null
+++ b/arch/metag/kernel/sys_metag.c
@@ -0,0 +1,180 @@
+/*
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/Meta
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm/core_reg.h>
+#include <asm/global_lock.h>
+#include <asm/switch.h>
+#include <asm/syscall.h>
+#include <asm/syscalls.h>
+#include <asm/user_gateway.h>
+
+#define merge_64(hi, lo) ((((unsigned long long)(hi)) << 32) + \
+ ((lo) & 0xffffffffUL))
+
+int metag_mmap_check(unsigned long addr, unsigned long len,
+ unsigned long flags)
+{
+ /* We can't have people trying to write to the bottom of the
+ * memory map, there are mysterious unspecified things there that
+ * we don't want people trampling on.
+ */
+ if ((flags & MAP_FIXED) && (addr < TASK_UNMAPPED_BASE))
+ return -EINVAL;
+
+ return 0;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ /* The shift for mmap2 is constant, regardless of PAGE_SIZE setting. */
+ if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
+ return -EINVAL;
+
+ pgoff >>= PAGE_SHIFT - 12;
+
+ return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+}
+
+asmlinkage int sys_metag_setglobalbit(char __user *addr, int mask)
+{
+ char tmp;
+ int ret = 0;
+ unsigned int flags;
+
+ if (!((__force unsigned int)addr >= LINCORE_BASE))
+ return -EFAULT;
+
+ __global_lock2(flags);
+
+ metag_data_cache_flush((__force void *)addr, sizeof(mask));
+
+ ret = __get_user(tmp, addr);
+ if (ret)
+ goto out;
+ tmp |= mask;
+ ret = __put_user(tmp, addr);
+
+ metag_data_cache_flush((__force void *)addr, sizeof(mask));
+
+out:
+ __global_unlock2(flags);
+
+ return ret;
+}
+
+#define TXDEFR_FPU_MASK ((0x1f << 16) | 0x1f)
+
+asmlinkage void sys_metag_set_fpu_flags(unsigned int flags)
+{
+ unsigned int temp;
+
+ flags &= TXDEFR_FPU_MASK;
+
+ temp = __core_reg_get(TXDEFR);
+ temp &= ~TXDEFR_FPU_MASK;
+ temp |= flags;
+ __core_reg_set(TXDEFR, temp);
+}
+
+asmlinkage int sys_metag_set_tls(void __user *ptr)
+{
+ current->thread.tls_ptr = ptr;
+ set_gateway_tls(ptr);
+
+ return 0;
+}
+
+asmlinkage void *sys_metag_get_tls(void)
+{
+ return (__force void *)current->thread.tls_ptr;
+}
+
+asmlinkage long sys_truncate64_metag(const char __user *path, unsigned long lo,
+ unsigned long hi)
+{
+ return sys_truncate64(path, merge_64(hi, lo));
+}
+
+asmlinkage long sys_ftruncate64_metag(unsigned int fd, unsigned long lo,
+ unsigned long hi)
+{
+ return sys_ftruncate64(fd, merge_64(hi, lo));
+}
+
+asmlinkage long sys_fadvise64_64_metag(int fd, unsigned long offs_lo,
+ unsigned long offs_hi,
+ unsigned long len_lo,
+ unsigned long len_hi, int advice)
+{
+ return sys_fadvise64_64(fd, merge_64(offs_hi, offs_lo),
+ merge_64(len_hi, len_lo), advice);
+}
+
+asmlinkage long sys_readahead_metag(int fd, unsigned long lo, unsigned long hi,
+ size_t count)
+{
+ return sys_readahead(fd, merge_64(hi, lo), count);
+}
+
+asmlinkage ssize_t sys_pread64_metag(unsigned long fd, char __user *buf,
+ size_t count, unsigned long lo,
+ unsigned long hi)
+{
+ return sys_pread64(fd, buf, count, merge_64(hi, lo));
+}
+
+asmlinkage ssize_t sys_pwrite64_metag(unsigned long fd, char __user *buf,
+ size_t count, unsigned long lo,
+ unsigned long hi)
+{
+ return sys_pwrite64(fd, buf, count, merge_64(hi, lo));
+}
+
+asmlinkage long sys_sync_file_range_metag(int fd, unsigned long offs_lo,
+ unsigned long offs_hi,
+ unsigned long len_lo,
+ unsigned long len_hi,
+ unsigned int flags)
+{
+ return sys_sync_file_range(fd, merge_64(offs_hi, offs_lo),
+ merge_64(len_hi, len_lo), flags);
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/*
+ * We need wrappers for anything with unaligned 64bit arguments
+ */
+#define sys_truncate64 sys_truncate64_metag
+#define sys_ftruncate64 sys_ftruncate64_metag
+#define sys_fadvise64_64 sys_fadvise64_64_metag
+#define sys_readahead sys_readahead_metag
+#define sys_pread64 sys_pread64_metag
+#define sys_pwrite64 sys_pwrite64_metag
+#define sys_sync_file_range sys_sync_file_range_metag
+
+/*
+ * Note that we can't include <linux/unistd.h> here since the header
+ * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well.
+ */
+const void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/metag/kernel/user_gateway.S b/arch/metag/kernel/user_gateway.S
new file mode 100644
index 000000000000..7167f3e8db6b
--- /dev/null
+++ b/arch/metag/kernel/user_gateway.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2010 Imagination Technologies Ltd.
+ *
+ * This file contains code that can be accessed from userspace and can
+ * access certain kernel data structures without the overhead of a system
+ * call.
+ */
+
+#include <asm/metag_regs.h>
+#include <asm/user_gateway.h>
+
+/*
+ * User helpers.
+ *
+ * These are segment of kernel provided user code reachable from user space
+ * at a fixed address in kernel memory. This is used to provide user space
+ * with some operations which require kernel help because of unimplemented
+ * native feature and/or instructions in some Meta CPUs. The idea is for
+ * this code to be executed directly in user mode for best efficiency but
+ * which is too intimate with the kernel counter part to be left to user
+ * libraries. The kernel reserves the right to change this code as needed
+ * without warning. Only the entry points and their results are guaranteed
+ * to be stable.
+ *
+ * Each segment is 64-byte aligned. This mechanism should be used only for
+ * for things that are really small and justified, and not be abused freely.
+ */
+ .text
+ .global ___user_gateway_start
+___user_gateway_start:
+
+ /* get_tls
+ * Offset: 0
+ * Description: Get the TLS pointer for this process.
+ */
+ .global ___kuser_get_tls
+ .type ___kuser_get_tls,function
+___kuser_get_tls:
+ MOVT D1Ar1,#HI(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
+ ADD D1Ar1,D1Ar1,#LO(USER_GATEWAY_PAGE + USER_GATEWAY_TLS)
+ MOV D1Ar3,TXENABLE
+ AND D1Ar3,D1Ar3,#(TXENABLE_THREAD_BITS)
+ LSR D1Ar3,D1Ar3,#(TXENABLE_THREAD_S - 2)
+ GETD D0Re0,[D1Ar1+D1Ar3]
+___kuser_get_tls_end: /* Beyond this point the read will complete */
+ MOV PC,D1RtP
+ .size ___kuser_get_tls,.-___kuser_get_tls
+ .global ___kuser_get_tls_end
+
+ /* cmpxchg
+ * Offset: 64
+ * Description: Replace the value at 'ptr' with 'newval' if the current
+ * value is 'oldval'. Return zero if we succeeded,
+ * non-zero otherwise.
+ *
+ * Reference prototype:
+ *
+ * int __kuser_cmpxchg(int oldval, int newval, unsigned long *ptr)
+ *
+ */
+ .balign 64
+ .global ___kuser_cmpxchg
+ .type ___kuser_cmpxchg,function
+___kuser_cmpxchg:
+#ifdef CONFIG_SMP
+ /*
+ * We must use LNKGET/LNKSET with an SMP kernel because the other method
+ * does not provide atomicity across multiple CPUs.
+ */
+0: LNKGETD D0Re0,[D1Ar3]
+ CMP D0Re0,D1Ar1
+ LNKSETDZ [D1Ar3],D0Ar2
+ BNZ 1f
+ DEFR D0Re0,TXSTAT
+ ANDT D0Re0,D0Re0,#HI(0x3f000000)
+ CMPT D0Re0,#HI(0x02000000)
+ BNE 0b
+#ifdef CONFIG_METAG_LNKGET_AROUND_CACHE
+ DCACHE [D1Ar3], D0Re0
+#endif
+1: MOV D0Re0,#1
+ XORZ D0Re0,D0Re0,D0Re0
+ MOV PC,D1RtP
+#else
+ GETD D0Re0,[D1Ar3]
+ CMP D0Re0,D1Ar1
+ SETDZ [D1Ar3],D0Ar2
+___kuser_cmpxchg_end: /* Beyond this point the write will complete */
+ MOV D0Re0,#1
+ XORZ D0Re0,D0Re0,D0Re0
+ MOV PC,D1RtP
+#endif /* CONFIG_SMP */
+ .size ___kuser_cmpxchg,.-___kuser_cmpxchg
+ .global ___kuser_cmpxchg_end
+
+ .global ___user_gateway_end
+___user_gateway_end: