diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/s390/include/asm/fpu-internal.h | 2 | ||||
-rw-r--r-- | arch/s390/include/asm/switch_to.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 8 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 36 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/ptrace.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 6 | ||||
-rw-r--r-- | arch/s390/kvm/kvm-s390.c | 10 |
10 files changed, 45 insertions, 47 deletions
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h index 237f8fcbe46b..55dc2c0fb40a 100644 --- a/arch/s390/include/asm/fpu-internal.h +++ b/arch/s390/include/asm/fpu-internal.h @@ -28,7 +28,7 @@ struct fpu { }; }; -void save_fpu_regs(struct fpu *fpu); +void save_fpu_regs(void); #define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) #define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 0a4a3150b7d7..dcadfde32265 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -30,7 +30,7 @@ static inline void restore_access_regs(unsigned int *acrs) #define switch_to(prev,next,last) do { \ if (prev->mm) { \ - save_fpu_regs(&prev->thread.fpu); \ + save_fpu_regs(); \ save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 6bc42c08be09..48c9af7a7683 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -28,16 +28,14 @@ int main(void) DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); - DEFINE(__THREAD_fpu, offsetof(struct task_struct, thread.fpu)); + DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); + DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); + DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); BLANK(); - DEFINE(__FPU_fpc, offsetof(struct fpu, fpc)); - DEFINE(__FPU_flags, offsetof(struct fpu, flags)); - DEFINE(__FPU_regs, offsetof(struct fpu, regs)); - BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 0b46fd4aa31e..eb4664238613 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -154,7 +154,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); } /* Load registers after signal return */ @@ -286,7 +286,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) goto badframe; set_current_blocked(&set); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext32(regs, &frame->sregs_ext)) @@ -309,7 +309,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 21c1219122af..5a966dea937f 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -183,7 +183,6 @@ ENTRY(sie64a) xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs - lg %r12,__LC_THREAD_INFO # load fp/vx regs save area brasl %r14,load_fpu_regs # load guest fp/vx regs .Lsie_load_guest_gprs: lmg %r0,%r13,0(%r3) # load guest gprs 0-13 @@ -752,14 +751,16 @@ ENTRY(psw_idle) * of the register contents at system call or io return. */ ENTRY(save_fpu_regs) + lg %r2,__LC_CURRENT + aghi %r2,__TASK_thread tm __LC_CPU_FLAGS+7,_CIF_FPU bor %r14 - stfpc __FPU_fpc(%r2) + stfpc __THREAD_FPU_fpc(%r2) .Lsave_fpu_regs_fpc_end: - lg %r3,__FPU_regs(%r2) + lg %r3,__THREAD_FPU_regs(%r2) ltgr %r3,%r3 jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU - tm __FPU_flags+3(%r2),FPU_USE_VX + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX jz .Lsave_fpu_regs_fp # no -> store FP regs .Lsave_fpu_regs_vx_low: VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -794,20 +795,19 @@ ENTRY(save_fpu_regs) * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. * * There are special calling conventions to fit into sysc and io return work: - * %r12: __LC_THREAD_INFO * %r15: <kernel stack> * The function requires: * %r4 and __SF_EMPTY+32(%r15) */ load_fpu_regs: + lg %r4,__LC_CURRENT + aghi %r4,__TASK_thread tm __LC_CPU_FLAGS+7,_CIF_FPU bnor %r14 - lg %r4,__TI_task(%r12) - la %r4,__THREAD_fpu(%r4) - lfpc __FPU_fpc(%r4) + lfpc __THREAD_FPU_fpc(%r4) stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? - lg %r4,__FPU_regs(%r4) # %r4 <- reg save area + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs .Lload_fpu_regs_vx_ctl: tm __SF_EMPTY+32+5(%r15),2 # test VX control @@ -1190,13 +1190,14 @@ cleanup_critical: jhe 2f clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) jhe 1f + lg %r2,__LC_CURRENT 0: # Store floating-point controls - stfpc __FPU_fpc(%r2) + stfpc __THREAD_FPU_fpc(%r2) 1: # Load register save area and check if VX is active - lg %r3,__FPU_regs(%r2) + lg %r3,__THREAD_FPU_regs(%r2) ltgr %r3,%r3 jz 5f # no save area -> set CIF_FPU - tm __FPU_flags+3(%r2),FPU_USE_VX + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX jz 4f # no VX -> store FP regs 2: # Store vector registers (V0-V15) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -1250,11 +1251,10 @@ cleanup_critical: jhe 5f clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) jhe 6f - lg %r4,__TI_task(%r12) - la %r4,__THREAD_fpu(%r4) - lfpc __FPU_fpc(%r4) - tm __FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? - lg %r4,__FPU_regs(%r4) # %r4 <- reg save area + lg %r4,__LC_CURRENT + lfpc __THREAD_FPU_fpc(%r4) + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area jz 3f # -> no VX, load FP regs 6: # Set VX-enablement control stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 9cf0063f920e..f2dac9f0799d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -104,7 +104,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) * The CIF_FPU flag is set in any case to lazy clear or restore a saved * state when switching to a different task or returning to user space. */ - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); dst->thread.fpu.fpc = current->thread.fpu.fpc; if (is_vx_task(current)) convert_vx_to_fp(dst->thread.fpu.fprs, @@ -196,7 +196,7 @@ asmlinkage void execve_tail(void) */ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) { - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); fpregs->fpc = current->thread.fpu.fpc; fpregs->pad = 0; if (is_vx_task(current)) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 8c525880a3ff..8b1c8e33f184 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -943,7 +943,7 @@ static int s390_fpregs_get(struct task_struct *target, _s390_fp_regs fp_regs; if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); fp_regs.fpc = target->thread.fpu.fpc; fpregs_store(&fp_regs, &target->thread.fpu); @@ -961,7 +961,7 @@ static int s390_fpregs_set(struct task_struct *target, freg_t fprs[__NUM_FPRS]; if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); /* If setting FPC, must validate it first. */ if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { @@ -1049,7 +1049,7 @@ static int s390_vxrs_low_get(struct task_struct *target, return -ENODEV; if (is_vx_task(target)) { if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); } else @@ -1072,7 +1072,7 @@ static int s390_vxrs_low_set(struct task_struct *target, if (rc) return rc; } else if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); if (rc == 0) @@ -1093,7 +1093,7 @@ static int s390_vxrs_high_get(struct task_struct *target, return -ENODEV; if (is_vx_task(target)) { if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); } else @@ -1115,7 +1115,7 @@ static int s390_vxrs_high_set(struct task_struct *target, if (rc) return rc; } else if (target == current) - save_fpu_regs(&target->thread.fpu); + save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 2f4c7e2638c9..9549af102d75 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -105,7 +105,7 @@ struct rt_sigframe static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); } /* Load registers after signal return */ @@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; set_current_blocked(&set); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (restore_sigregs(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext(regs, &frame->sregs_ext)) @@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 76f76932ccb9..9861613fb35a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -236,7 +236,7 @@ int alloc_vector_registers(struct task_struct *tsk) return -ENOMEM; preempt_disable(); if (tsk == current) - save_fpu_regs(&tsk->thread.fpu); + save_fpu_regs(); /* Copy the 16 floating point registers */ convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); fprs = tsk->thread.fpu.fprs; @@ -257,7 +257,7 @@ void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); vic = (current->thread.fpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ @@ -295,7 +295,7 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); /* Check for vector register enablement */ if (MACHINE_HAS_VX && !is_vx_task(current) && (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index c0cceaf4a92e..1903f0212bd0 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1224,7 +1224,7 @@ static inline void load_fpu_from(struct fpu *from) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Save host register state */ - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); save_fpu_to(&vcpu->arch.host_fpregs); if (test_kvm_facility(vcpu->kvm, 129)) { @@ -1256,7 +1256,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (test_kvm_facility(vcpu->kvm, 129)) /* @@ -1671,7 +1671,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return -EINVAL; memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); vcpu->arch.guest_fpregs.fpc = fpu->fpc; - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); load_fpu_from(&vcpu->arch.guest_fpregs); return 0; } @@ -2241,7 +2241,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * copying in vcpu load/put. Lets update our copies before we save * it into the save area */ - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); if (test_kvm_facility(vcpu->kvm, 129)) { /* * If the vector extension is available, the vector registers @@ -2288,7 +2288,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) * * Let's update our copies before we save it into the save area. */ - save_fpu_regs(¤t->thread.fpu); + save_fpu_regs(); return kvm_s390_store_adtl_status_unloaded(vcpu, addr); } |