From 3bb447fc8bb6523cb1cec7a0277d831a2b0462b7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Jul 2007 12:29:08 +0200 Subject: [S390] Convert to smp_call_function_single. smp_call_function_single now has the same semantics as s390's smp_call_function_on. Therefore convert to the *single variant and get rid of some architecture specific code. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/appldata/appldata_base.c | 12 ++++++------ arch/s390/kernel/smp.c | 16 +++++++--------- arch/s390/kernel/vtime.c | 2 +- 3 files changed, 14 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 6ffbab77ae4d..62391fb1f61f 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -173,7 +173,7 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer, /* * appldata_mod_vtimer_wrap() * - * wrapper function for mod_virt_timer(), because smp_call_function_on() + * wrapper function for mod_virt_timer(), because smp_call_function_single() * accepts only one parameter. */ static void __appldata_mod_vtimer_wrap(void *p) { @@ -208,9 +208,9 @@ __appldata_vtimer_setup(int cmd) num_online_cpus()) * TOD_MICRO; for_each_online_cpu(i) { per_cpu(appldata_timer, i).expires = per_cpu_interval; - smp_call_function_on(add_virt_timer_periodic, - &per_cpu(appldata_timer, i), - 0, 1, i); + smp_call_function_single(i, add_virt_timer_periodic, + &per_cpu(appldata_timer, i), + 0, 1); } appldata_timer_active = 1; P_INFO("Monitoring timer started.\n"); @@ -236,8 +236,8 @@ __appldata_vtimer_setup(int cmd) } args; args.timer = &per_cpu(appldata_timer, i); args.expires = per_cpu_interval; - smp_call_function_on(__appldata_mod_vtimer_wrap, - &args, 0, 1, i); + smp_call_function_single(i, __appldata_mod_vtimer_wrap, + &args, 0, 1); } } } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 182c085ae4dd..aff9f853fc30 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -170,30 +170,28 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, EXPORT_SYMBOL(smp_call_function); /* - * smp_call_function_on: + * smp_call_function_single: + * @cpu: the CPU where func should run * @func: the function to run; this must be fast and non-blocking * @info: an arbitrary pointer to pass to the function * @nonatomic: unused * @wait: if true, wait (atomically) until function has completed on other CPUs - * @cpu: the CPU where func should run * * Run a function on one processor. * * You must not call this function with disabled interrupts, from a * hardware interrupt handler or from a bottom half. */ -int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, - int wait, int cpu) +int smp_call_function_single(int cpu, void (*func) (void *info), void *info, + int nonatomic, int wait) { - cpumask_t map = CPU_MASK_NONE; - preempt_disable(); - cpu_set(cpu, map); - __smp_call_function_map(func, info, nonatomic, wait, map); + __smp_call_function_map(func, info, nonatomic, wait, + cpumask_of_cpu(cpu)); preempt_enable(); return 0; } -EXPORT_SYMBOL(smp_call_function_on); +EXPORT_SYMBOL(smp_call_function_single); static void do_send_stop(void) { diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index b6ed143e8597..84ff78de6bac 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -415,7 +415,7 @@ EXPORT_SYMBOL(add_virt_timer_periodic); /* * If we change a pending timer the function must be called on the CPU - * where the timer is running on, e.g. by smp_call_function_on() + * where the timer is running on, e.g. by smp_call_function_single() * * The original mod_timer adds the timer if it is not pending. For compatibility * we do the same. The timer will be added on the current CPU as a oneshot timer. -- cgit v1.2.3 From 8da1aecde00b74d63123e6031155bbb1424b338d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Jul 2007 12:29:09 +0200 Subject: [S390] Improve __smp_call_function_map. There is no need to disable bottom halves when holding call_lock. Also this could imply that it is legal to call smp_call_function* from bh context, which it is not. Also test if func will be executed locally before disabling and aterwards enabling interrupts again. It's not necessary to disable and enable interrupts each time __smp_call_function_map gets called. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/smp.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index aff9f853fc30..03674fbe598f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -120,7 +120,7 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, if (wait) data.finished = CPU_MASK_NONE; - spin_lock_bh(&call_lock); + spin_lock(&call_lock); call_data = &data; for_each_cpu_mask(cpu, map) @@ -129,18 +129,16 @@ static void __smp_call_function_map(void (*func) (void *info), void *info, /* Wait for response */ while (!cpus_equal(map, data.started)) cpu_relax(); - if (wait) while (!cpus_equal(map, data.finished)) cpu_relax(); - - spin_unlock_bh(&call_lock); - + spin_unlock(&call_lock); out: - local_irq_disable(); - if (local) + if (local) { + local_irq_disable(); func(info); - local_irq_enable(); + local_irq_enable(); + } } /* -- cgit v1.2.3 From e62133b4ea0d85888d9883a3e1c396ea8717bc26 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Jul 2007 12:29:13 +0200 Subject: [S390] Get rid of new section mismatch warnings. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head.S | 1 + arch/s390/kernel/vmlinux.lds.S | 1 + arch/s390/mm/vmem.c | 6 +++--- 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 8f8c802f1bcf..83477c7dc743 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -35,6 +35,7 @@ #define ARCH_OFFSET 0 #endif +.section ".text.head","ax" #ifndef CONFIG_IPL .org 0 .long 0x00080000,0x80000000+startup # Just a restart PSW diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 6ab7d4ee13a4..b4622a3889b0 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -21,6 +21,7 @@ SECTIONS . = 0x00000000; _text = .; /* Text and read-only data */ .text : { + *(.text.head) TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 92a565190028..fd594d5fe142 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -29,8 +29,8 @@ struct memory_segment { static LIST_HEAD(mem_segs); -void memmap_init(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn) +void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn) { struct page *start, *end; struct page *map_start, *map_end; @@ -66,7 +66,7 @@ void memmap_init(unsigned long size, int nid, unsigned long zone, } } -static inline void *vmem_alloc_pages(unsigned int order) +static void __init_refok *vmem_alloc_pages(unsigned int order) { if (slab_is_available()) return (void *)__get_free_pages(GFP_KERNEL, order); -- cgit v1.2.3 From 7a8e0c8d9af43c9dfc62a8b2b9cc0484f48f7da4 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 27 Jul 2007 12:29:16 +0200 Subject: [S390] Wire up sys_fallocate. This patch implements support of fallocate system call on s390(x) platform. A wrapper is added to address the issue which s390 ABI has with the arguments of this system call. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/compat_wrapper.S | 10 ++++++++++ arch/s390/kernel/sys_s390.c | 20 ++++++++++++++++++++ arch/s390/kernel/syscalls.S | 2 +- 3 files changed, 31 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index acc415457b45..6ee1bedbd1bf 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1710,3 +1710,13 @@ compat_sys_timerfd_wrapper: sys_eventfd_wrapper: llgfr %r2,%r2 # unsigned int jg sys_eventfd + + .globl sys_fallocate_wrapper +sys_fallocate_wrapper: + lgfr %r2,%r2 # int + lgfr %r3,%r3 # int + sllg %r4,%r4,32 # get high word of 64bit loff_t + lr %r4,%r5 # get low word of 64bit loff_t + sllg %r5,%r6,32 # get high word of 64bit loff_t + l %r5,164(%r15) # get low word of 64bit loff_t + jg sys_fallocate diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 1c90c7e99978..13e27bdb96e2 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -265,3 +265,23 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args) return -EFAULT; return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice); } + +#ifndef CONFIG_64BIT +/* + * This is a wrapper to call sys_fallocate(). For 31 bit s390 the last + * 64 bit argument "len" is split into the upper and lower 32 bits. The + * system call wrapper in the user space loads the value to %r6/%r7. + * The code in entry.S keeps the values in %r2 - %r6 where they are and + * stores %r7 to 96(%r15). But the standard C linkage requires that + * the whole 64 bit value for len is stored on the stack and doesn't + * use %r6 at all. So s390_fallocate has to convert the arguments from + * %r2: fd, %r3: mode, %r4/%r5: offset, %r6/96(%r15)-99(%r15): len + * to + * %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len + */ +asmlinkage long s390_fallocate(int fd, int mode, loff_t offset, + u32 len_high, u32 len_low) +{ + return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low); +} +#endif diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 738feb4a0aad..9e26ed9fe4e7 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -322,7 +322,7 @@ NI_SYSCALL /* 310 sys_move_pages */ SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper) SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper) -NI_SYSCALL /* 314 sys_fallocate */ +SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper) SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */ SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper) SYSCALL(sys_timerfd,sys_timerfd,compat_sys_timerfd_wrapper) -- cgit v1.2.3 From b771aeac32a320ac52bc227252103d7d7fc48cad Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 27 Jul 2007 12:29:18 +0200 Subject: [S390] Fix IRQ tracing. If a machine check is pending and the external or I/O interrupt handler returns to userspace io_mcck_pending is going to call s390_handle_mcck. Before this happens a call to TRACE_IRQS_ON was already made since we know that we are going back to userspace and hence interrupts will be enabled. So there was an indication that interrupts are enabled while in reality they are still disabled. s390_handle_mcck will do a local_irq_save/restore pair and confuse lockdep which later complains about inconsistent irq tracing. To solve this just call trace_hardirqs_off before calling s390_handle_mcck and trace_hardirqs_on afterwards. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 6 ++++-- arch/s390/kernel/entry64.S | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index bc7ff3658c3d..f3bceb165321 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -624,9 +624,11 @@ io_work_loop: # _TIF_MCCK_PENDING is set, call handler # io_mcck_pending: + TRACE_IRQS_OFF l %r1,BASED(.Ls390_handle_mcck) - la %r14,BASED(io_work_loop) - br %r1 # TIF bit will be cleared by handler + basr %r14,%r1 # TIF bit will be cleared by handler + TRACE_IRQS_ON + b BASED(io_work_loop) # # _TIF_NEED_RESCHED is set, call schedule diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 2a7b1304418b..9c0d5cc8269d 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -611,8 +611,10 @@ io_work_loop: # _TIF_MCCK_PENDING is set, call handler # io_mcck_pending: - larl %r14,io_work_loop - jg s390_handle_mcck # TIF bit will be cleared by handler + TRACE_IRQS_OFF + brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler + TRACE_IRQS_ON + j io_work_loop # # _TIF_NEED_RESCHED is set, call schedule -- cgit v1.2.3