summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-02-14 11:33:39 +1100
committerPaul Mackerras <paulus@samba.org>2007-02-14 11:33:39 +1100
commit944b380e0c3dead0e16a747dfd3ce2765afac16d (patch)
treed1016c273ebac0b4c02d851a854d9a789df632b5 /arch/powerpc/platforms
parentfff5f52808be01d16bb7c8b82580155ff19e16b0 (diff)
parentc7eb734766217b9ddac217cbccae3aedcfa67520 (diff)
Merge branch 'cell-merge' of git+ssh://master.kernel.org/pub/scm/linux/kernel/git/arnd/cell-2.6
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/pmu.c14
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c125
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c386
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h48
6 files changed, 379 insertions, 217 deletions
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index d04ae1671e6c..66ca4b5a1dbc 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -345,18 +345,12 @@ EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
* Enabling/disabling interrupts for the entire performance monitoring unit.
*/
-u32 cbe_query_pm_interrupts(u32 cpu)
-{
- return cbe_read_pm(cpu, pm_status);
-}
-EXPORT_SYMBOL_GPL(cbe_query_pm_interrupts);
-
-u32 cbe_clear_pm_interrupts(u32 cpu)
+u32 cbe_get_and_clear_pm_interrupts(u32 cpu)
{
/* Reading pm_status clears the interrupt bits. */
- return cbe_query_pm_interrupts(cpu);
+ return cbe_read_pm(cpu, pm_status);
}
-EXPORT_SYMBOL_GPL(cbe_clear_pm_interrupts);
+EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts);
void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask)
{
@@ -371,7 +365,7 @@ EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts);
void cbe_disable_pm_interrupts(u32 cpu)
{
- cbe_clear_pm_interrupts(cpu);
+ cbe_get_and_clear_pm_interrupts(cpu);
cbe_write_pm(cpu, pm_status, 0);
}
EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts);
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 28c718ca3b51..04ad2e364e97 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -42,7 +42,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
}
spin_lock_init(&ctx->mmio_lock);
kref_init(&ctx->kref);
- init_rwsem(&ctx->state_sema);
+ mutex_init(&ctx->state_mutex);
init_MUTEX(&ctx->run_sema);
init_waitqueue_head(&ctx->ibox_wq);
init_waitqueue_head(&ctx->wbox_wq);
@@ -53,6 +53,10 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
ctx->owner = get_task_mm(current);
if (gang)
spu_gang_add_ctx(gang, ctx);
+ ctx->rt_priority = current->rt_priority;
+ ctx->policy = current->policy;
+ ctx->prio = current->prio;
+ INIT_DELAYED_WORK(&ctx->sched_work, spu_sched_tick);
goto out;
out_free:
kfree(ctx);
@@ -65,9 +69,9 @@ void destroy_spu_context(struct kref *kref)
{
struct spu_context *ctx;
ctx = container_of(kref, struct spu_context, kref);
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
spu_deactivate(ctx);
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
spu_fini_csa(&ctx->csa);
if (ctx->gang)
spu_gang_remove_ctx(ctx->gang, ctx);
@@ -96,16 +100,6 @@ void spu_forget(struct spu_context *ctx)
spu_release(ctx);
}
-void spu_acquire(struct spu_context *ctx)
-{
- down_read(&ctx->state_sema);
-}
-
-void spu_release(struct spu_context *ctx)
-{
- up_read(&ctx->state_sema);
-}
-
void spu_unmap_mappings(struct spu_context *ctx)
{
if (ctx->local_store)
@@ -124,83 +118,84 @@ void spu_unmap_mappings(struct spu_context *ctx)
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
}
+/**
+ * spu_acquire_exclusive - lock spu contex and protect against userspace access
+ * @ctx: spu contex to lock
+ *
+ * Note:
+ * Returns 0 and with the context locked on success
+ * Returns negative error and with the context _unlocked_ on failure.
+ */
int spu_acquire_exclusive(struct spu_context *ctx)
{
- int ret = 0;
+ int ret = -EINVAL;
- down_write(&ctx->state_sema);
- /* ctx is about to be freed, can't acquire any more */
- if (!ctx->owner) {
- ret = -EINVAL;
- goto out;
- }
+ spu_acquire(ctx);
+ /*
+ * Context is about to be freed, so we can't acquire it anymore.
+ */
+ if (!ctx->owner)
+ goto out_unlock;
if (ctx->state == SPU_STATE_SAVED) {
ret = spu_activate(ctx, 0);
if (ret)
- goto out;
- ctx->state = SPU_STATE_RUNNABLE;
+ goto out_unlock;
} else {
- /* We need to exclude userspace access to the context. */
+ /*
+ * We need to exclude userspace access to the context.
+ *
+ * To protect against memory access we invalidate all ptes
+ * and make sure the pagefault handlers block on the mutex.
+ */
spu_unmap_mappings(ctx);
}
-out:
- if (ret)
- up_write(&ctx->state_sema);
+ return 0;
+
+ out_unlock:
+ spu_release(ctx);
return ret;
}
-int spu_acquire_runnable(struct spu_context *ctx)
+/**
+ * spu_acquire_runnable - lock spu contex and make sure it is in runnable state
+ * @ctx: spu contex to lock
+ *
+ * Note:
+ * Returns 0 and with the context locked on success
+ * Returns negative error and with the context _unlocked_ on failure.
+ */
+int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags)
{
- int ret = 0;
-
- down_read(&ctx->state_sema);
- if (ctx->state == SPU_STATE_RUNNABLE) {
- ctx->spu->prio = current->prio;
- return 0;
- }
- up_read(&ctx->state_sema);
-
- down_write(&ctx->state_sema);
- /* ctx is about to be freed, can't acquire any more */
- if (!ctx->owner) {
- ret = -EINVAL;
- goto out;
- }
+ int ret = -EINVAL;
+ spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
- ret = spu_activate(ctx, 0);
+ /*
+ * Context is about to be freed, so we can't acquire it anymore.
+ */
+ if (!ctx->owner)
+ goto out_unlock;
+ ret = spu_activate(ctx, flags);
if (ret)
- goto out;
- ctx->state = SPU_STATE_RUNNABLE;
+ goto out_unlock;
}
- downgrade_write(&ctx->state_sema);
- /* On success, we return holding the lock */
-
- return ret;
-out:
- /* Release here, to simplify calling code. */
- up_write(&ctx->state_sema);
+ return 0;
+ out_unlock:
+ spu_release(ctx);
return ret;
}
+/**
+ * spu_acquire_saved - lock spu contex and make sure it is in saved state
+ * @ctx: spu contex to lock
+ */
void spu_acquire_saved(struct spu_context *ctx)
{
- down_read(&ctx->state_sema);
-
- if (ctx->state == SPU_STATE_SAVED)
- return;
-
- up_read(&ctx->state_sema);
- down_write(&ctx->state_sema);
-
- if (ctx->state == SPU_STATE_RUNNABLE) {
+ spu_acquire(ctx);
+ if (ctx->state != SPU_STATE_SAVED)
spu_deactivate(ctx);
- ctx->state = SPU_STATE_SAVED;
- }
-
- downgrade_write(&ctx->state_sema);
}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index a528020baa18..b00653d69c01 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -103,6 +103,9 @@ static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
offset += vma->vm_pgoff << PAGE_SHIFT;
+ if (offset >= LS_SIZE)
+ return NOPFN_SIGBUS;
+
spu_acquire(ctx);
if (ctx->state == SPU_STATE_SAVED) {
@@ -164,7 +167,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
/* error here usually means a signal.. we might want to test
* the error code more precisely though
*/
- ret = spu_acquire_runnable(ctx);
+ ret = spu_acquire_runnable(ctx, 0);
if (ret)
return NOPFN_REFAULT;
@@ -1306,7 +1309,7 @@ static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer,
if (ret)
goto out;
- spu_acquire_runnable(ctx);
+ spu_acquire_runnable(ctx, 0);
if (file->f_flags & O_NONBLOCK) {
ret = ctx->ops->send_mfc_command(ctx, &cmd);
} else {
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 1acc2ffef8c8..353a8fa07ab8 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -133,7 +133,7 @@ out_drop_priv:
spu_mfc_sr1_set(ctx->spu, sr1);
out_unlock:
- spu_release_exclusive(ctx);
+ spu_release(ctx);
out:
return ret;
}
@@ -143,7 +143,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
int ret;
unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
- ret = spu_acquire_runnable(ctx);
+ ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
if (ret)
return ret;
@@ -155,7 +155,7 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
spu_release(ctx);
ret = spu_setup_isolated(ctx);
if (!ret)
- ret = spu_acquire_runnable(ctx);
+ ret = spu_acquire_runnable(ctx, SPU_ACTIVATE_NOWAKE);
}
/* if userspace has set the runcntrl register (eg, to issue an
@@ -164,8 +164,10 @@ static inline int spu_run_init(struct spu_context *ctx, u32 * npc)
(SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
if (runcntl == 0)
runcntl = SPU_RUNCNTL_RUNNABLE;
- } else
+ } else {
+ spu_start_tick(ctx);
ctx->ops->npc_write(ctx, *npc);
+ }
ctx->ops->runcntl_write(ctx, runcntl);
return ret;
@@ -176,6 +178,7 @@ static inline int spu_run_fini(struct spu_context *ctx, u32 * npc,
{
int ret = 0;
+ spu_stop_tick(ctx);
*status = ctx->ops->status_read(ctx);
*npc = ctx->ops->npc_read(ctx);
spu_release(ctx);
@@ -329,8 +332,10 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
}
if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) {
ret = spu_reacquire_runnable(ctx, npc, &status);
- if (ret)
+ if (ret) {
+ spu_stop_tick(ctx);
goto out2;
+ }
continue;
}
ret = spu_process_events(ctx);
@@ -361,4 +366,3 @@ out:
up(&ctx->run_sema);
return ret;
}
-
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index bd6fe4b7a84b..2f25e68b4bac 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -44,17 +44,18 @@
#include <asm/spu_priv1.h>
#include "spufs.h"
-#define SPU_MIN_TIMESLICE (100 * HZ / 1000)
+#define SPU_TIMESLICE (HZ)
-#define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
struct spu_prio_array {
- unsigned long bitmap[SPU_BITMAP_SIZE];
- wait_queue_head_t waitq[MAX_PRIO];
+ DECLARE_BITMAP(bitmap, MAX_PRIO);
+ struct list_head runq[MAX_PRIO];
+ spinlock_t runq_lock;
struct list_head active_list[MAX_NUMNODES];
struct mutex active_mutex[MAX_NUMNODES];
};
static struct spu_prio_array *spu_prio;
+static struct workqueue_struct *spu_sched_wq;
static inline int node_allowed(int node)
{
@@ -68,6 +69,64 @@ static inline int node_allowed(int node)
return 1;
}
+void spu_start_tick(struct spu_context *ctx)
+{
+ if (ctx->policy == SCHED_RR)
+ queue_delayed_work(spu_sched_wq, &ctx->sched_work, SPU_TIMESLICE);
+}
+
+void spu_stop_tick(struct spu_context *ctx)
+{
+ if (ctx->policy == SCHED_RR)
+ cancel_delayed_work(&ctx->sched_work);
+}
+
+void spu_sched_tick(struct work_struct *work)
+{
+ struct spu_context *ctx =
+ container_of(work, struct spu_context, sched_work.work);
+ struct spu *spu;
+ int rearm = 1;
+
+ mutex_lock(&ctx->state_mutex);
+ spu = ctx->spu;
+ if (spu) {
+ int best = sched_find_first_bit(spu_prio->bitmap);
+ if (best <= ctx->prio) {
+ spu_deactivate(ctx);
+ rearm = 0;
+ }
+ }
+ mutex_unlock(&ctx->state_mutex);
+
+ if (rearm)
+ spu_start_tick(ctx);
+}
+
+/**
+ * spu_add_to_active_list - add spu to active list
+ * @spu: spu to add to the active list
+ */
+static void spu_add_to_active_list(struct spu *spu)
+{
+ mutex_lock(&spu_prio->active_mutex[spu->node]);
+ list_add_tail(&spu->list, &spu_prio->active_list[spu->node]);
+ mutex_unlock(&spu_prio->active_mutex[spu->node]);
+}
+
+/**
+ * spu_remove_from_active_list - remove spu from active list
+ * @spu: spu to remove from the active list
+ */
+static void spu_remove_from_active_list(struct spu *spu)
+{
+ int node = spu->node;
+
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_del_init(&spu->list);
+ mutex_unlock(&spu_prio->active_mutex[node]);
+}
+
static inline void mm_needs_global_tlbie(struct mm_struct *mm)
{
int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
@@ -94,8 +153,12 @@ int spu_switch_event_unregister(struct notifier_block * n)
return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
}
-
-static inline void bind_context(struct spu *spu, struct spu_context *ctx)
+/**
+ * spu_bind_context - bind spu context to physical spu
+ * @spu: physical spu to bind to
+ * @ctx: context to bind
+ */
+static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
spu->number, spu->node);
@@ -104,7 +167,6 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
ctx->spu = spu;
ctx->ops = &spu_hw_ops;
spu->pid = current->pid;
- spu->prio = current->prio;
spu->mm = ctx->owner;
mm_needs_global_tlbie(spu->mm);
spu->ibox_callback = spufs_ibox_callback;
@@ -118,12 +180,21 @@ static inline void bind_context(struct spu *spu, struct spu_context *ctx)
spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id());
spu_switch_notify(spu, ctx);
+ spu_add_to_active_list(spu);
+ ctx->state = SPU_STATE_RUNNABLE;
}
-static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
+/**
+ * spu_unbind_context - unbind spu context from physical spu
+ * @spu: physical spu to unbind from
+ * @ctx: context to unbind
+ */
+static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
spu->pid, spu->number, spu->node);
+
+ spu_remove_from_active_list(spu);
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
@@ -136,95 +207,98 @@ static inline void unbind_context(struct spu *spu, struct spu_context *ctx)
spu->dma_callback = NULL;
spu->mm = NULL;
spu->pid = 0;
- spu->prio = MAX_PRIO;
ctx->ops = &spu_backing_ops;
ctx->spu = NULL;
spu->flags = 0;
spu->ctx = NULL;
}
-static inline void spu_add_wq(wait_queue_head_t * wq, wait_queue_t * wait,
- int prio)
+/**
+ * spu_add_to_rq - add a context to the runqueue
+ * @ctx: context to add
+ */
+static void spu_add_to_rq(struct spu_context *ctx)
{
- prepare_to_wait_exclusive(wq, wait, TASK_INTERRUPTIBLE);
- set_bit(prio, spu_prio->bitmap);
+ spin_lock(&spu_prio->runq_lock);
+ list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
+ set_bit(ctx->prio, spu_prio->bitmap);
+ spin_unlock(&spu_prio->runq_lock);
}
-static inline void spu_del_wq(wait_queue_head_t * wq, wait_queue_t * wait,
- int prio)
+/**
+ * spu_del_from_rq - remove a context from the runqueue
+ * @ctx: context to remove
+ */
+static void spu_del_from_rq(struct spu_context *ctx)
{
- u64 flags;
-
- __set_current_state(TASK_RUNNING);
-
- spin_lock_irqsave(&wq->lock, flags);
+ spin_lock(&spu_prio->runq_lock);
+ list_del_init(&ctx->rq);
+ if (list_empty(&spu_prio->runq[ctx->prio]))
+ clear_bit(ctx->prio, spu_prio->bitmap);
+ spin_unlock(&spu_prio->runq_lock);
+}
- remove_wait_queue_locked(wq, wait);
- if (list_empty(&wq->task_list))
- clear_bit(prio, spu_prio->bitmap);
+/**
+ * spu_grab_context - remove one context from the runqueue
+ * @prio: priority of the context to be removed
+ *
+ * This function removes one context from the runqueue for priority @prio.
+ * If there is more than one context with the given priority the first
+ * task on the runqueue will be taken.
+ *
+ * Returns the spu_context it just removed.
+ *
+ * Must be called with spu_prio->runq_lock held.
+ */
+static struct spu_context *spu_grab_context(int prio)
+{
+ struct list_head *rq = &spu_prio->runq[prio];
- spin_unlock_irqrestore(&wq->lock, flags);
+ if (list_empty(rq))
+ return NULL;
+ return list_entry(rq->next, struct spu_context, rq);
}
-static void spu_prio_wait(struct spu_context *ctx, u64 flags)
+static void spu_prio_wait(struct spu_context *ctx)
{
- int prio = current->prio;
- wait_queue_head_t *wq = &spu_prio->waitq[prio];
DEFINE_WAIT(wait);
- if (ctx->spu)
- return;
-
- spu_add_wq(wq, &wait, prio);
-
+ set_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
+ prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
if (!signal_pending(current)) {
- up_write(&ctx->state_sema);
- pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__,
- current->pid, current->prio);
+ mutex_unlock(&ctx->state_mutex);
schedule();
- down_write(&ctx->state_sema);
+ mutex_lock(&ctx->state_mutex);
}
-
- spu_del_wq(wq, &wait, prio);
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ctx->stop_wq, &wait);
+ clear_bit(SPU_SCHED_WAKE, &ctx->sched_flags);
}
-static void spu_prio_wakeup(void)
+/**
+ * spu_reschedule - try to find a runnable context for a spu
+ * @spu: spu available
+ *
+ * This function is called whenever a spu becomes idle. It looks for the
+ * most suitable runnable spu context and schedules it for execution.
+ */
+static void spu_reschedule(struct spu *spu)
{
- int best = sched_find_first_bit(spu_prio->bitmap);
- if (best < MAX_PRIO) {
- wait_queue_head_t *wq = &spu_prio->waitq[best];
- wake_up_interruptible_nr(wq, 1);
- }
-}
+ int best;
-static int get_active_spu(struct spu *spu)
-{
- int node = spu->node;
- struct spu *tmp;
- int rc = 0;
+ spu_free(spu);
- mutex_lock(&spu_prio->active_mutex[node]);
- list_for_each_entry(tmp, &spu_prio->active_list[node], list) {
- if (tmp == spu) {
- list_del_init(&spu->list);
- rc = 1;
- break;
- }
+ spin_lock(&spu_prio->runq_lock);
+ best = sched_find_first_bit(spu_prio->bitmap);
+ if (best < MAX_PRIO) {
+ struct spu_context *ctx = spu_grab_context(best);
+ if (ctx && test_bit(SPU_SCHED_WAKE, &ctx->sched_flags))
+ wake_up(&ctx->stop_wq);
}
- mutex_unlock(&spu_prio->active_mutex[node]);
- return rc;
-}
-
-static void put_active_spu(struct spu *spu)
-{
- int node = spu->node;
-
- mutex_lock(&spu_prio->active_mutex[node]);
- list_add_tail(&spu->list, &spu_prio->active_list[node]);
- mutex_unlock(&spu_prio->active_mutex[node]);
+ spin_unlock(&spu_prio->runq_lock);
}
-static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
+static struct spu *spu_get_idle(struct spu_context *ctx)
{
struct spu *spu = NULL;
int node = cpu_to_node(raw_smp_processor_id());
@@ -241,87 +315,154 @@ static struct spu *spu_get_idle(struct spu_context *ctx, u64 flags)
return spu;
}
-static inline struct spu *spu_get(struct spu_context *ctx, u64 flags)
+/**
+ * find_victim - find a lower priority context to preempt
+ * @ctx: canidate context for running
+ *
+ * Returns the freed physical spu to run the new context on.
+ */
+static struct spu *find_victim(struct spu_context *ctx)
{
- /* Future: spu_get_idle() if possible,
- * otherwise try to preempt an active
- * context.
+ struct spu_context *victim = NULL;
+ struct spu *spu;
+ int node, n;
+
+ /*
+ * Look for a possible preemption candidate on the local node first.
+ * If there is no candidate look at the other nodes. This isn't
+ * exactly fair, but so far the whole spu schedule tries to keep
+ * a strong node affinity. We might want to fine-tune this in
+ * the future.
*/
- return spu_get_idle(ctx, flags);
+ restart:
+ node = cpu_to_node(raw_smp_processor_id());
+ for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ node = (node < MAX_NUMNODES) ? node : 0;
+ if (!node_allowed(node))
+ continue;
+
+ mutex_lock(&spu_prio->active_mutex[node]);
+ list_for_each_entry(spu, &spu_prio->active_list[node], list) {
+ struct spu_context *tmp = spu->ctx;
+
+ if (tmp->rt_priority < ctx->rt_priority &&
+ (!victim || tmp->rt_priority < victim->rt_priority))
+ victim = spu->ctx;
+ }
+ mutex_unlock(&spu_prio->active_mutex[node]);
+
+ if (victim) {
+ /*
+ * This nests ctx->state_mutex, but we always lock
+ * higher priority contexts before lower priority
+ * ones, so this is safe until we introduce
+ * priority inheritance schemes.
+ */
+ if (!mutex_trylock(&victim->state_mutex)) {
+ victim = NULL;
+ goto restart;
+ }
+
+ spu = victim->spu;
+ if (!spu) {
+ /*
+ * This race can happen because we've dropped
+ * the active list mutex. No a problem, just
+ * restart the search.
+ */
+ mutex_unlock(&victim->state_mutex);
+ victim = NULL;
+ goto restart;
+ }
+ spu_unbind_context(spu, victim);
+ mutex_unlock(&victim->state_mutex);
+ return spu;
+ }
+ }
+
+ return NULL;
}
-/* The three externally callable interfaces
- * for the scheduler begin here.
+/**
+ * spu_activate - find a free spu for a context and execute it
+ * @ctx: spu context to schedule
+ * @flags: flags (currently ignored)
*
- * spu_activate - bind a context to SPU, waiting as needed.
- * spu_deactivate - unbind a context from its SPU.
- * spu_yield - yield an SPU if others are waiting.
+ * Tries to find a free spu to run @ctx. If no free spu is availble
+ * add the context to the runqueue so it gets woken up once an spu
+ * is available.
*/
-
-int spu_activate(struct spu_context *ctx, u64 flags)
+int spu_activate(struct spu_context *ctx, unsigned long flags)
{
- struct spu *spu;
- int ret = 0;
- for (;;) {
- if (ctx->spu)
+ if (ctx->spu)
+ return 0;
+
+ do {
+ struct spu *spu;
+
+ spu = spu_get_idle(ctx);
+ /*
+ * If this is a realtime thread we try to get it running by
+ * preempting a lower priority thread.
+ */
+ if (!spu && ctx->rt_priority)
+ spu = find_victim(ctx);
+ if (spu) {
+ spu_bind_context(spu, ctx);
return 0;
- spu = spu_get(ctx, flags);
- if (spu != NULL) {
- if (ctx->spu != NULL) {
- spu_free(spu);
- spu_prio_wakeup();
- break;
- }
- bind_context(spu, ctx);
- put_active_spu(spu);
- break;
}
- spu_prio_wait(ctx, flags);
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- spu_prio_wakeup();
- break;
- }
- }
- return ret;
+
+ spu_add_to_rq(ctx);
+ if (!(flags & SPU_ACTIVATE_NOWAKE))
+ spu_prio_wait(ctx);
+ spu_del_from_rq(ctx);
+ } while (!signal_pending(current));
+
+ return -ERESTARTSYS;
}
+/**
+ * spu_deactivate - unbind a context from it's physical spu
+ * @ctx: spu context to unbind
+ *
+ * Unbind @ctx from the physical spu it is running on and schedule
+ * the highest priority context to run on the freed physical spu.
+ */
void spu_deactivate(struct spu_context *ctx)
{
- struct spu *spu;
- int needs_idle;
+ struct spu *spu = ctx->spu;
- spu = ctx->spu;
- if (!spu)
- return;
- needs_idle = get_active_spu(spu);
- unbind_context(spu, ctx);
- if (needs_idle) {
- spu_free(spu);
- spu_prio_wakeup();
+ if (spu) {
+ spu_unbind_context(spu, ctx);
+ spu_reschedule(spu);
}
}
+/**
+ * spu_yield - yield a physical spu if others are waiting
+ * @ctx: spu context to yield
+ *
+ * Check if there is a higher priority context waiting and if yes
+ * unbind @ctx from the physical spu and schedule the highest
+ * priority context to run on the freed physical spu instead.
+ */
void spu_yield(struct spu_context *ctx)
{
struct spu *spu;
int need_yield = 0;
- if (down_write_trylock(&ctx->state_sema)) {
+ if (mutex_trylock(&ctx->state_mutex)) {
if ((spu = ctx->spu) != NULL) {
int best = sched_find_first_bit(spu_prio->bitmap);
if (best < MAX_PRIO) {
pr_debug("%s: yielding SPU %d NODE %d\n",
__FUNCTION__, spu->number, spu->node);
spu_deactivate(ctx);
- ctx->state = SPU_STATE_SAVED;
need_yield = 1;
- } else {
- spu->prio = MAX_PRIO;
}
}
- up_write(&ctx->state_sema);
+ mutex_unlock(&ctx->state_mutex);
}
if (unlikely(need_yield))
yield();
@@ -331,14 +472,19 @@ int __init spu_sched_init(void)
{
int i;
+ spu_sched_wq = create_singlethread_workqueue("spusched");
+ if (!spu_sched_wq)
+ return 1;
+
spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
if (!spu_prio) {
printk(KERN_WARNING "%s: Unable to allocate priority queue.\n",
__FUNCTION__);
+ destroy_workqueue(spu_sched_wq);
return 1;
}
for (i = 0; i < MAX_PRIO; i++) {
- init_waitqueue_head(&spu_prio->waitq[i]);
+ INIT_LIST_HEAD(&spu_prio->runq[i]);
__clear_bit(i, spu_prio->bitmap);
}
__set_bit(MAX_PRIO, spu_prio->bitmap);
@@ -346,6 +492,7 @@ int __init spu_sched_init(void)
mutex_init(&spu_prio->active_mutex[i]);
INIT_LIST_HEAD(&spu_prio->active_list[i]);
}
+ spin_lock_init(&spu_prio->runq_lock);
return 0;
}
@@ -364,4 +511,5 @@ void __exit spu_sched_exit(void)
mutex_unlock(&spu_prio->active_mutex[node]);
}
kfree(spu_prio);
+ destroy_workqueue(spu_sched_wq);
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0941c56df9b5..0c437891dfd5 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -23,7 +23,7 @@
#define SPUFS_H
#include <linux/kref.h>
-#include <linux/rwsem.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
@@ -37,11 +37,13 @@ enum {
};
struct spu_context_ops;
-
-#define SPU_CONTEXT_PREEMPT 0UL
-
struct spu_gang;
+/* ctx->sched_flags */
+enum {
+ SPU_SCHED_WAKE = 0,
+};
+
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
@@ -56,7 +58,7 @@ struct spu_context {
u64 object_id; /* user space pointer for oprofile */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
- struct rw_semaphore state_sema;
+ struct mutex state_mutex;
struct semaphore run_sema;
struct mm_struct *owner;
@@ -77,6 +79,14 @@ struct spu_context {
struct list_head gang_list;
struct spu_gang *gang;
+
+ /* scheduler fields */
+ struct list_head rq;
+ struct delayed_work sched_work;
+ unsigned long sched_flags;
+ unsigned long rt_priority;
+ int policy;
+ int prio;
};
struct spu_gang {
@@ -161,6 +171,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
/* context management */
+static inline void spu_acquire(struct spu_context *ctx)
+{
+ mutex_lock(&ctx->state_mutex);
+}
+
+static inline void spu_release(struct spu_context *ctx)
+{
+ mutex_unlock(&ctx->state_mutex);
+}
+
struct spu_context * alloc_spu_context(struct spu_gang *gang);
void destroy_spu_context(struct kref *kref);
struct spu_context * get_spu_context(struct spu_context *ctx);
@@ -168,20 +188,18 @@ int put_spu_context(struct spu_context *ctx);
void spu_unmap_mappings(struct spu_context *ctx);
void spu_forget(struct spu_context *ctx);
-void spu_acquire(struct spu_context *ctx);
-void spu_release(struct spu_context *ctx);
-int spu_acquire_runnable(struct spu_context *ctx);
+int spu_acquire_runnable(struct spu_context *ctx, unsigned long flags);
void spu_acquire_saved(struct spu_context *ctx);
int spu_acquire_exclusive(struct spu_context *ctx);
-
-static inline void spu_release_exclusive(struct spu_context *ctx)
-{
- up_write(&ctx->state_sema);
-}
-
-int spu_activate(struct spu_context *ctx, u64 flags);
+enum {
+ SPU_ACTIVATE_NOWAKE = 1,
+};
+int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
+void spu_start_tick(struct spu_context *ctx);
+void spu_stop_tick(struct spu_context *ctx);
+void spu_sched_tick(struct work_struct *work);
int __init spu_sched_init(void);
void __exit spu_sched_exit(void);