summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_fair.c6
2 files changed, 0 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2dc5d2f7b392..b78b9d9ffd1c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -910,10 +910,7 @@ static int effective_prio(struct task_struct *p)
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
- u64 now;
-
update_rq_clock(rq);
- now = rq->clock;
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
@@ -927,10 +924,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
*/
static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
{
- u64 now;
-
update_rq_clock(rq);
- now = rq->clock;
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
@@ -1647,13 +1641,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
unsigned long flags;
struct rq *rq;
int this_cpu;
- u64 now;
rq = task_rq_lock(p, &flags);
BUG_ON(p->state != TASK_RUNNING);
this_cpu = smp_processor_id(); /* parent's CPU */
update_rq_clock(rq);
- now = rq->clock;
p->prio = effective_prio(p);
@@ -1955,11 +1947,9 @@ static void update_cpu_load(struct rq *this_rq)
unsigned long total_load = this_rq->ls.load.weight;
unsigned long this_load = total_load;
struct load_stat *ls = &this_rq->ls;
- u64 now;
int i, scale;
__update_rq_clock(this_rq);
- now = this_rq->clock;
this_rq->nr_load_updates++;
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
@@ -3431,7 +3421,6 @@ asmlinkage void __sched schedule(void)
struct task_struct *prev, *next;
long *switch_count;
struct rq *rq;
- u64 now;
int cpu;
need_resched:
@@ -3450,7 +3439,6 @@ need_resched_nonpreemptible:
spin_lock_irq(&rq->lock);
clear_tsk_need_resched(prev);
__update_rq_clock(rq);
- now = rq->clock;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
@@ -3909,13 +3897,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
unsigned long flags;
int oldprio, on_rq;
struct rq *rq;
- u64 now;
BUG_ON(prio < 0 || prio > MAX_PRIO);
rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
- now = rq->clock;
oldprio = p->prio;
on_rq = p->se.on_rq;
@@ -3953,7 +3939,6 @@ void set_user_nice(struct task_struct *p, long nice)
int old_prio, delta, on_rq;
unsigned long flags;
struct rq *rq;
- u64 now;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
@@ -3963,7 +3948,6 @@ void set_user_nice(struct task_struct *p, long nice)
*/
rq = task_rq_lock(p, &flags);
update_rq_clock(rq);
- now = rq->clock;
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4a2cbde1057f..eb7ca49c3260 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -667,10 +667,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
struct rq *rq = rq_of(cfs_rq);
struct sched_entity *next;
- u64 now;
__update_rq_clock(rq);
- now = rq->clock;
/*
* Dequeue and enqueue the task to update its
@@ -820,10 +818,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
static void yield_task_fair(struct rq *rq, struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
- u64 now;
__update_rq_clock(rq);
- now = rq->clock;
/*
* Dequeue and enqueue the task to update its
* position within the tree:
@@ -1062,11 +1058,9 @@ static void set_curr_task_fair(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se;
- u64 now;
struct cfs_rq *cfs_rq;
update_rq_clock(rq);
- now = rq->clock;
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);