diff options
Diffstat (limited to 'include/asm-x86')
-rw-r--r-- | include/asm-x86/i387.h | 84 | ||||
-rw-r--r-- | include/asm-x86/processor-flags.h | 1 | ||||
-rw-r--r-- | include/asm-x86/processor.h | 20 | ||||
-rw-r--r-- | include/asm-x86/sigcontext.h | 87 | ||||
-rw-r--r-- | include/asm-x86/sigcontext32.h | 6 | ||||
-rw-r--r-- | include/asm-x86/thread_info.h | 1 | ||||
-rw-r--r-- | include/asm-x86/ucontext.h | 6 | ||||
-rw-r--r-- | include/asm-x86/xcr.h | 49 | ||||
-rw-r--r-- | include/asm-x86/xsave.h | 118 |
9 files changed, 354 insertions, 18 deletions
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 1ecdc3ed96e4..9ba862a4eac0 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h @@ -19,7 +19,9 @@ #include <asm/sigcontext.h> #include <asm/user.h> #include <asm/uaccess.h> +#include <asm/xsave.h> +extern unsigned int sig_xstate_size; extern void fpu_init(void); extern void mxcsr_feature_mask_init(void); extern int init_fpu(struct task_struct *child); @@ -31,12 +33,18 @@ extern user_regset_active_fn fpregs_active, xfpregs_active; extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; +extern struct _fpx_sw_bytes fx_sw_reserved; #ifdef CONFIG_IA32_EMULATION +extern unsigned int sig_xstate_ia32_size; +extern struct _fpx_sw_bytes fx_sw_reserved_ia32; struct _fpstate_ia32; -extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); -extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); +struct _xstate_ia32; +extern int save_i387_xstate_ia32(void __user *buf); +extern int restore_i387_xstate_ia32(void __user *buf); #endif +#define X87_FSW_ES (1 << 7) /* Exception Summary */ + #ifdef CONFIG_X86_64 /* Ignore delayed exceptions from user space */ @@ -47,7 +55,7 @@ static inline void tolerant_fwait(void) _ASM_EXTABLE(1b, 2b)); } -static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) +static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { int err; @@ -67,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) return err; } -#define X87_FSW_ES (1 << 7) /* Exception Summary */ +static inline int restore_fpu_checking(struct task_struct *tsk) +{ + if (task_thread_info(tsk)->status & TS_XSAVE) + return xrstor_checking(&tsk->thread.xstate->xsave); + else + return fxrstor_checking(&tsk->thread.xstate->fxsave); +} /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. The kernel data segment can be sometimes 0 and sometimes new user value. Both should be ok. Use the PDA as safe address because it should be already in L1. */ -static inline void clear_fpu_state(struct i387_fxsave_struct *fx) +static inline void clear_fpu_state(struct task_struct *tsk) { + struct xsave_struct *xstate = &tsk->thread.xstate->xsave; + struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + + /* + * xsave header may indicate the init state of the FP. + */ + if ((task_thread_info(tsk)->status & TS_XSAVE) && + !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) + return; + if (unlikely(fx->swd & X87_FSW_ES)) asm volatile("fnclex"); alternative_input(ASM_NOP8 ASM_NOP2, @@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx) X86_FEATURE_FXSAVE_LEAK); } -static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) +static inline int fxsave_user(struct i387_fxsave_struct __user *fx) { int err; @@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) return err; } -static inline void __save_init_fpu(struct task_struct *tsk) +static inline void fxsave(struct task_struct *tsk) { /* Using "rex64; fxsave %0" is broken because, if the memory operand uses any extended registers for addressing, a second REX prefix @@ -133,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk) : "=m" (tsk->thread.xstate->fxsave) : "cdaSDb" (&tsk->thread.xstate->fxsave)); #endif - clear_fpu_state(&tsk->thread.xstate->fxsave); +} + +static inline void __save_init_fpu(struct task_struct *tsk) +{ + if (task_thread_info(tsk)->status & TS_XSAVE) + xsave(tsk); + else + fxsave(tsk); + + clear_fpu_state(tsk); task_thread_info(tsk)->status &= ~TS_USEDFPU; } @@ -148,6 +181,10 @@ static inline void tolerant_fwait(void) static inline void restore_fpu(struct task_struct *tsk) { + if (task_thread_info(tsk)->status & TS_XSAVE) { + xrstor_checking(&tsk->thread.xstate->xsave); + return; + } /* * The "nop" is needed to make the instructions the same * length. @@ -173,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk) */ static inline void __save_init_fpu(struct task_struct *tsk) { + if (task_thread_info(tsk)->status & TS_XSAVE) { + struct xsave_struct *xstate = &tsk->thread.xstate->xsave; + struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; + + xsave(tsk); + + /* + * xsave header may indicate the init state of the FP. + */ + if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) + goto end; + + if (unlikely(fx->swd & X87_FSW_ES)) + asm volatile("fnclex"); + + /* + * we can do a simple return here or be paranoid :) + */ + goto clear_state; + } + /* Use more nops than strictly needed in case the compiler varies code */ alternative_input( @@ -182,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk) X86_FEATURE_FXSR, [fx] "m" (tsk->thread.xstate->fxsave), [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); +clear_state: /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is pending. Clear the x87 state here by setting it to fixed values. safe_address is a random variable that should be in L1 */ @@ -191,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk) "fildl %[addr]", /* set F?P to defined value */ X86_FEATURE_FXSAVE_LEAK, [addr] "m" (safe_address)); +end: task_thread_info(tsk)->status &= ~TS_USEDFPU; } +#endif /* CONFIG_X86_64 */ + /* * Signal frame handlers... */ -extern int save_i387(struct _fpstate __user *buf); -extern int restore_i387(struct _fpstate __user *buf); - -#endif /* CONFIG_X86_64 */ +extern int save_i387_xstate(void __user *buf); +extern int restore_i387_xstate(void __user *buf); static inline void __unlazy_fpu(struct task_struct *tsk) { diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h index 5dd79774f693..dc5f0712f9fa 100644 --- a/include/asm-x86/processor-flags.h +++ b/include/asm-x86/processor-flags.h @@ -59,6 +59,7 @@ #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ +#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ /* * x86-64 Task Priority Register, CR8 diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 10471cfab145..df03c98e38d4 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -326,7 +326,12 @@ struct i387_fxsave_struct { /* 16*16 bytes for each XMM-reg = 256 bytes: */ u32 xmm_space[64]; - u32 padding[24]; + u32 padding[12]; + + union { + u32 padding1[12]; + u32 sw_reserved[12]; + }; } __attribute__((aligned(16))); @@ -350,10 +355,23 @@ struct i387_soft_struct { u32 entry_eip; }; +struct xsave_hdr_struct { + u64 xstate_bv; + u64 reserved1[2]; + u64 reserved2[5]; +} __attribute__((packed)); + +struct xsave_struct { + struct i387_fxsave_struct i387; + struct xsave_hdr_struct xsave_hdr; + /* new processor state extensions will go here */ +} __attribute__ ((packed, aligned (64))); + union thread_xstate { struct i387_fsave_struct fsave; struct i387_fxsave_struct fxsave; struct i387_soft_struct soft; + struct xsave_struct xsave; }; #ifdef CONFIG_X86_64 diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index 24879c85b291..ee813f4fe5d5 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h @@ -4,6 +4,40 @@ #include <linux/compiler.h> #include <asm/types.h> +#define FP_XSTATE_MAGIC1 0x46505853U +#define FP_XSTATE_MAGIC2 0x46505845U +#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) + +/* + * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame + * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes + * are used to extended the fpstate pointer in the sigcontext, which now + * includes the extended state information along with fpstate information. + * + * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved + * area and FP_XSTATE_MAGIC2 at the end of memory layout + * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the + * extended state information in the memory layout pointed by the fpstate + * pointer in sigcontext. + */ +struct _fpx_sw_bytes { + __u32 magic1; /* FP_XSTATE_MAGIC1 */ + __u32 extended_size; /* total size of the layout referred by + * fpstate pointer in the sigcontext. + */ + __u64 xstate_bv; + /* feature bit mask (including fp/sse/extended + * state) that is present in the memory + * layout. + */ + __u32 xstate_size; /* actual xsave state size, based on the + * features saved in the layout. + * 'extended_size' will be greater than + * 'xstate_size'. + */ + __u32 padding[7]; /* for future use. */ +}; + #ifdef __i386__ /* * As documented in the iBCS2 standard.. @@ -53,7 +87,13 @@ struct _fpstate { unsigned long reserved; struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ struct _xmmreg _xmm[8]; - unsigned long padding[56]; + unsigned long padding1[44]; + + union { + unsigned long padding2[12]; + struct _fpx_sw_bytes sw_reserved; /* represents the extended + * state info */ + }; }; #define X86_FXSR_MAGIC 0x0000 @@ -79,7 +119,15 @@ struct sigcontext { unsigned long flags; unsigned long sp_at_signal; unsigned short ss, __ssh; - struct _fpstate __user *fpstate; + + /* + * fpstate is really (struct _fpstate *) or (struct _xstate *) + * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved + * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end + * of extended memory layout. See comments at the defintion of + * (struct _fpx_sw_bytes) + */ + void __user *fpstate; /* zero when no FPU/extended context */ unsigned long oldmask; unsigned long cr2; }; @@ -130,7 +178,12 @@ struct _fpstate { __u32 mxcsr_mask; __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ - __u32 reserved2[24]; + __u32 reserved2[12]; + union { + __u32 reserved3[12]; + struct _fpx_sw_bytes sw_reserved; /* represents the extended + * state information */ + }; }; #ifdef __KERNEL__ @@ -161,7 +214,15 @@ struct sigcontext { unsigned long trapno; unsigned long oldmask; unsigned long cr2; - struct _fpstate __user *fpstate; /* zero when no FPU context */ + + /* + * fpstate is really (struct _fpstate *) or (struct _xstate *) + * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved + * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end + * of extended memory layout. See comments at the defintion of + * (struct _fpx_sw_bytes) + */ + void __user *fpstate; /* zero when no FPU/extended context */ unsigned long reserved1[8]; }; #else /* __KERNEL__ */ @@ -202,4 +263,22 @@ struct sigcontext { #endif /* !__i386__ */ +struct _xsave_hdr { + __u64 xstate_bv; + __u64 reserved1[2]; + __u64 reserved2[5]; +}; + +/* + * Extended state pointed by the fpstate pointer in the sigcontext. + * In addition to the fpstate, information encoded in the xstate_hdr + * indicates the presence of other extended state information + * supported by the processor and OS. + */ +struct _xstate { + struct _fpstate fpstate; + struct _xsave_hdr xstate_hdr; + /* new processor state extensions go here */ +}; + #endif /* ASM_X86__SIGCONTEXT_H */ diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 4e2ec732dd01..8c347032c2f2 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h @@ -40,7 +40,11 @@ struct _fpstate_ia32 { __u32 reserved; struct _fpxreg _fxsr_st[8]; struct _xmmreg _xmm[8]; /* It's actually 16 */ - __u32 padding[56]; + __u32 padding[44]; + union { + __u32 padding2[12]; + struct _fpx_sw_bytes sw_reserved; + }; }; struct sigcontext_ia32 { diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index e64be8863b76..30586f2ee558 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h @@ -239,6 +239,7 @@ static inline struct thread_info *stack_thread_info(void) #define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */ #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ +#define TS_XSAVE 0x0010 /* Use xsave/xrstor */ #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h index 9948dd328084..89eaa5456a7e 100644 --- a/include/asm-x86/ucontext.h +++ b/include/asm-x86/ucontext.h @@ -1,6 +1,12 @@ #ifndef ASM_X86__UCONTEXT_H #define ASM_X86__UCONTEXT_H +#define UC_FP_XSTATE 0x1 /* indicates the presence of extended state + * information in the memory layout pointed + * by the fpstate pointer in the ucontext's + * sigcontext struct (uc_mcontext). + */ + struct ucontext { unsigned long uc_flags; struct ucontext *uc_link; diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h new file mode 100644 index 000000000000..f2cba4e79a23 --- /dev/null +++ b/include/asm-x86/xcr.h @@ -0,0 +1,49 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + * Copyright 2008 rPath, Inc. - All Rights Reserved + * + * This file is part of the Linux kernel, and is made available under + * the terms of the GNU General Public License version 2 or (at your + * option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * asm-x86/xcr.h + * + * Definitions for the eXtended Control Register instructions + */ + +#ifndef _ASM_X86_XCR_H +#define _ASM_X86_XCR_H + +#define XCR_XFEATURE_ENABLED_MASK 0x00000000 + +#ifdef __KERNEL__ +# ifndef __ASSEMBLY__ + +#include <linux/types.h> + +static inline u64 xgetbv(u32 index) +{ + u32 eax, edx; + + asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ + : "=a" (eax), "=d" (edx) + : "c" (index)); + return eax + ((u64)edx << 32); +} + +static inline void xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ + : : "a" (eax), "d" (edx), "c" (index)); +} + +# endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_X86_XCR_H */ diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h new file mode 100644 index 000000000000..08e9a1ac07a9 --- /dev/null +++ b/include/asm-x86/xsave.h @@ -0,0 +1,118 @@ +#ifndef __ASM_X86_XSAVE_H +#define __ASM_X86_XSAVE_H + +#include <linux/types.h> +#include <asm/processor.h> +#include <asm/i387.h> + +#define XSTATE_FP 0x1 +#define XSTATE_SSE 0x2 + +#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) + +#define FXSAVE_SIZE 512 + +/* + * These are the features that the OS can handle currently. + */ +#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) + +#ifdef CONFIG_X86_64 +#define REX_PREFIX "0x48, " +#else +#define REX_PREFIX +#endif + +extern unsigned int xstate_size; +extern u64 pcntxt_mask; +extern struct xsave_struct *init_xstate_buf; + +extern void xsave_cntxt_init(void); +extern void xsave_init(void); +extern int init_fpu(struct task_struct *child); +extern int check_for_xstate(struct i387_fxsave_struct __user *buf, + void __user *fpstate, + struct _fpx_sw_bytes *sw); + +static inline int xrstor_checking(struct xsave_struct *fx) +{ + int err; + + asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + : [err] "=r" (err) + : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) + : "memory"); + + return err; +} + +static inline int xsave_user(struct xsave_struct __user *buf) +{ + int err; + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + _ASM_ALIGN "\n" + _ASM_PTR "1b,3b\n" + ".previous" + : [err] "=r" (err) + : "D" (buf), "a" (-1), "d" (-1), "0" (0) + : "memory"); + if (unlikely(err) && __clear_user(buf, xstate_size)) + err = -EFAULT; + /* No need to clear here because the caller clears USED_MATH */ + return err; +} + +static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) +{ + int err; + struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); + u32 lmask = mask; + u32 hmask = mask >> 32; + + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3: movl $-1,%[err]\n" + " jmp 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + _ASM_ALIGN "\n" + _ASM_PTR "1b,3b\n" + ".previous" + : [err] "=r" (err) + : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) + : "memory"); /* memory required? */ + return err; +} + +static inline void xrstor_state(struct xsave_struct *fx, u64 mask) +{ + u32 lmask = mask; + u32 hmask = mask >> 32; + + asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" + : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + : "memory"); +} + +static inline void xsave(struct task_struct *tsk) +{ + /* This, however, we can work around by forcing the compiler to select + an addressing mode that doesn't require extended registers. */ + __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" + : : "D" (&(tsk->thread.xstate->xsave)), + "a" (-1), "d"(-1) : "memory"); +} +#endif |