diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/preempt.h | 47 | ||||
-rw-r--r-- | include/linux/sched.h | 7 |
2 files changed, 47 insertions, 7 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index eaac52a8fe6a..92e341853e4b 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -10,9 +10,19 @@ #include <linux/linkage.h> #include <linux/list.h> +/* + * We use the MSB mostly because its available; see <linux/preempt_mask.h> for + * the other bits -- can't include that header due to inclusion hell. + */ +#define PREEMPT_NEED_RESCHED 0x80000000 + +/* + * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users + * that think a non-zero value indicates we cannot preempt. + */ static __always_inline int preempt_count(void) { - return current_thread_info()->preempt_count; + return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; } static __always_inline int *preempt_count_ptr(void) @@ -20,11 +30,40 @@ static __always_inline int *preempt_count_ptr(void) return ¤t_thread_info()->preempt_count; } +/* + * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the + * alternative is loosing a reschedule. Better schedule too often -- also this + * should be a very rare operation. + */ static __always_inline void preempt_count_set(int pc) { *preempt_count_ptr() = pc; } +/* + * We fold the NEED_RESCHED bit into the preempt count such that + * preempt_enable() can decrement and test for needing to reschedule with a + * single instruction. + * + * We invert the actual bit, so that when the decrement hits 0 we know we both + * need to resched (the bit is cleared) and can resched (no preempt count). + */ + +static __always_inline void set_preempt_need_resched(void) +{ + *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; +} + +static __always_inline void clear_preempt_need_resched(void) +{ + *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; +} + +static __always_inline bool test_preempt_need_resched(void) +{ + return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); +} + #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) extern void add_preempt_count(int val); extern void sub_preempt_count(int val); @@ -42,7 +81,7 @@ asmlinkage void preempt_schedule(void); #define preempt_check_resched() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + if (unlikely(!*preempt_count_ptr())) \ preempt_schedule(); \ } while (0) @@ -52,7 +91,7 @@ void preempt_schedule_context(void); #define preempt_check_resched_context() \ do { \ - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ + if (unlikely(!*preempt_count_ptr())) \ preempt_schedule_context(); \ } while (0) #else @@ -88,7 +127,6 @@ do { \ #define preempt_enable() \ do { \ preempt_enable_no_resched(); \ - barrier(); \ preempt_check_resched(); \ } while (0) @@ -116,7 +154,6 @@ do { \ #define preempt_enable_notrace() \ do { \ preempt_enable_no_resched_notrace(); \ - barrier(); \ preempt_check_resched_context(); \ } while (0) diff --git a/include/linux/sched.h b/include/linux/sched.h index e783ec52295a..9fa151fb968e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -22,6 +22,7 @@ struct sched_param { #include <linux/errno.h> #include <linux/nodemask.h> #include <linux/mm_types.h> +#include <linux/preempt.h> #include <asm/page.h> #include <asm/ptrace.h> @@ -434,7 +435,9 @@ struct task_cputime { * We include PREEMPT_ACTIVE to avoid cond_resched() from working * before the scheduler is active -- see should_resched(). */ -#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE) +#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE + PREEMPT_NEED_RESCHED) +#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED) +#define PREEMPT_DISABLED (1 + PREEMPT_NEED_RESCHED) /** * struct thread_group_cputimer - thread group interval timer counts @@ -2408,7 +2411,7 @@ static inline int signal_pending_state(long state, struct task_struct *p) static inline int need_resched(void) { - return unlikely(test_thread_flag(TIF_NEED_RESCHED)); + return unlikely(test_preempt_need_resched()); } /* |