summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/cell
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/cell')
-rw-r--r--arch/powerpc/platforms/cell/ras.c6
-rw-r--r--arch/powerpc/platforms/cell/smp.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c155
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c24
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c37
7 files changed, 157 insertions, 79 deletions
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 2a14b052abcd..665af1c4195b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -21,6 +21,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
+#include <asm/kdump.h>
#include "ras.h"
@@ -111,9 +112,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
int ret = -ENOMEM;
unsigned long addr;
-#ifdef CONFIG_CRASH_DUMP
- rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
-#endif
+ if (__kdump_flag)
+ rtas_call(ptcal_stop_tok, 1, 1, NULL, nid);
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index efb3964457b1..c0d86e1f56ea 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -54,8 +54,8 @@
#endif
/*
- * The primary thread of each non-boot processor is recorded here before
- * smp init.
+ * The Primary thread of each non-boot processor was started from the OF client
+ * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
static cpumask_t of_spin_map;
@@ -208,11 +208,7 @@ void __init smp_init_cell(void)
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
- if (i % 2 == 0)
- /*
- * Even-numbered logical cpus correspond to
- * primary threads.
- */
+ if (cpu_thread_in_core(i) == 0)
cpu_set(i, of_spin_map);
}
} else {
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 010a51f59796..b73c369cc6f1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -548,6 +548,11 @@ spufs_regs_read(struct file *file, char __user *buffer,
int ret;
struct spu_context *ctx = file->private_data;
+ /* pre-check for file position: if we'd return EOF, there's no point
+ * causing a deschedule */
+ if (*pos >= sizeof(ctx->csa.lscsa->gprs))
+ return 0;
+
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
@@ -2426,38 +2431,49 @@ static inline int spufs_switch_log_avail(struct spu_context *ctx)
static int spufs_switch_log_open(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int rc;
+
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
- /*
- * We (ab-)use the mapping_lock here because it serves the similar
- * purpose for synchronizing open/close elsewhere. Maybe it should
- * be renamed eventually.
- */
- mutex_lock(&ctx->mapping_lock);
if (ctx->switch_log) {
- spin_lock(&ctx->switch_log->lock);
- ctx->switch_log->head = 0;
- ctx->switch_log->tail = 0;
- spin_unlock(&ctx->switch_log->lock);
- } else {
- /*
- * We allocate the switch log data structures on first open.
- * They will never be free because we assume a context will
- * be traced until it goes away.
- */
- ctx->switch_log = kzalloc(sizeof(struct switch_log) +
- SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
- GFP_KERNEL);
- if (!ctx->switch_log)
- goto out;
- spin_lock_init(&ctx->switch_log->lock);
- init_waitqueue_head(&ctx->switch_log->wait);
+ rc = -EBUSY;
+ goto out;
}
- mutex_unlock(&ctx->mapping_lock);
+
+ ctx->switch_log = kmalloc(sizeof(struct switch_log) +
+ SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+ GFP_KERNEL);
+
+ if (!ctx->switch_log) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->switch_log->head = ctx->switch_log->tail = 0;
+ init_waitqueue_head(&ctx->switch_log->wait);
+ rc = 0;
+
+out:
+ spu_release(ctx);
+ return rc;
+}
+
+static int spufs_switch_log_release(struct inode *inode, struct file *file)
+{
+ struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+ int rc;
+
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
+
+ kfree(ctx->switch_log);
+ ctx->switch_log = NULL;
+ spu_release(ctx);
return 0;
- out:
- mutex_unlock(&ctx->mapping_lock);
- return -ENOMEM;
}
static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
@@ -2485,42 +2501,54 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
if (!buf || len < 0)
return -EINVAL;
+ error = spu_acquire(ctx);
+ if (error)
+ return error;
+
while (cnt < len) {
char tbuf[128];
int width;
- if (file->f_flags & O_NONBLOCK) {
- if (spufs_switch_log_used(ctx) <= 0)
- return cnt ? cnt : -EAGAIN;
- } else {
- /* Wait for data in buffer */
- error = wait_event_interruptible(ctx->switch_log->wait,
- spufs_switch_log_used(ctx) > 0);
- if (error)
+ if (spufs_switch_log_used(ctx) == 0) {
+ if (cnt > 0) {
+ /* If there's data ready to go, we can
+ * just return straight away */
+ break;
+
+ } else if (file->f_flags & O_NONBLOCK) {
+ error = -EAGAIN;
break;
- }
- spin_lock(&ctx->switch_log->lock);
- if (ctx->switch_log->head == ctx->switch_log->tail) {
- /* multiple readers race? */
- spin_unlock(&ctx->switch_log->lock);
- continue;
+ } else {
+ /* spufs_wait will drop the mutex and
+ * re-acquire, but since we're in read(), the
+ * file cannot be _released (and so
+ * ctx->switch_log is stable).
+ */
+ error = spufs_wait(ctx->switch_log->wait,
+ spufs_switch_log_used(ctx) > 0);
+
+ /* On error, spufs_wait returns without the
+ * state mutex held */
+ if (error)
+ return error;
+
+ /* We may have had entries read from underneath
+ * us while we dropped the mutex in spufs_wait,
+ * so re-check */
+ if (spufs_switch_log_used(ctx) == 0)
+ continue;
+ }
}
width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
- if (width < len) {
+ if (width < len)
ctx->switch_log->tail =
(ctx->switch_log->tail + 1) %
SWITCH_LOG_BUFSIZE;
- }
-
- spin_unlock(&ctx->switch_log->lock);
-
- /*
- * If the record is greater than space available return
- * partial buffer (so far)
- */
- if (width >= len)
+ else
+ /* If the record is greater than space available return
+ * partial buffer (so far) */
break;
error = copy_to_user(buf + cnt, tbuf, width);
@@ -2529,6 +2557,8 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
cnt += width;
}
+ spu_release(ctx);
+
return cnt == 0 ? error : cnt;
}
@@ -2537,29 +2567,41 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
struct inode *inode = file->f_path.dentry->d_inode;
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
+ int rc;
poll_wait(file, &ctx->switch_log->wait, wait);
+ rc = spu_acquire(ctx);
+ if (rc)
+ return rc;
+
if (spufs_switch_log_used(ctx) > 0)
mask |= POLLIN;
+ spu_release(ctx);
+
return mask;
}
static const struct file_operations spufs_switch_log_fops = {
- .owner = THIS_MODULE,
- .open = spufs_switch_log_open,
- .read = spufs_switch_log_read,
- .poll = spufs_switch_log_poll,
+ .owner = THIS_MODULE,
+ .open = spufs_switch_log_open,
+ .read = spufs_switch_log_read,
+ .poll = spufs_switch_log_poll,
+ .release = spufs_switch_log_release,
};
+/**
+ * Log a context switch event to a switch log reader.
+ *
+ * Must be called with ctx->state_mutex held.
+ */
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val)
{
if (!ctx->switch_log)
return;
- spin_lock(&ctx->switch_log->lock);
if (spufs_switch_log_avail(ctx) > 1) {
struct switch_log_entry *p;
@@ -2573,7 +2615,6 @@ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
ctx->switch_log->head =
(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
}
- spin_unlock(&ctx->switch_log->lock);
wake_up(&ctx->switch_log->wait);
}
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c9bb7cfd3dca..c58bd36b0c5b 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -249,6 +249,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc,
spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
+ spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status);
spu_release(ctx);
if (signal_pending(current))
@@ -417,8 +418,6 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);
- spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
-
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 67595bc380dc..2ad914c47493 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -312,6 +312,15 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
*/
node = cpu_to_node(raw_smp_processor_id());
for (n = 0; n < MAX_NUMNODES; n++, node++) {
+ /*
+ * "available_spus" counts how many spus are not potentially
+ * going to be used by other affinity gangs whose reference
+ * context is already in place. Although this code seeks to
+ * avoid having affinity gangs with a summed amount of
+ * contexts bigger than the amount of spus in the node,
+ * this may happen sporadically. In this case, available_spus
+ * becomes negative, which is harmless.
+ */
int available_spus;
node = (node < MAX_NUMNODES) ? node : 0;
@@ -321,12 +330,10 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
available_spus = 0;
mutex_lock(&cbe_spu_info[node].list_mutex);
list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
- if (spu->ctx && spu->ctx->gang
- && spu->ctx->aff_offset == 0)
- available_spus -=
- (spu->ctx->gang->contexts - 1);
- else
- available_spus++;
+ if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
+ && spu->ctx->gang->aff_ref_spu)
+ available_spus -= spu->ctx->gang->contexts;
+ available_spus++;
}
if (available_spus < ctx->gang->contexts) {
mutex_unlock(&cbe_spu_info[node].list_mutex);
@@ -437,6 +444,11 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
if (ctx->gang)
+ /*
+ * If ctx->gang->aff_sched_count is positive, SPU affinity is
+ * being considered in this gang. Using atomic_dec_if_positive
+ * allow us to skip an explicit check for affinity in this gang
+ */
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
spu_switch_notify(spu, NULL);
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 8ae8ef9dfc22..15c62d3ca129 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -65,7 +65,6 @@ enum {
};
struct switch_log {
- spinlock_t lock;
wait_queue_head_t wait;
unsigned long head;
unsigned long tail;
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
index 2ece399f2862..d0b1f3f4d9c8 100644
--- a/arch/powerpc/platforms/cell/spufs/sputrace.c
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -40,6 +40,7 @@ static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
static ktime_t sputrace_start;
static unsigned long sputrace_head, sputrace_tail;
static struct sputrace *sputrace_log;
+static int sputrace_logging;
static int sputrace_used(void)
{
@@ -79,6 +80,11 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
char tbuf[128];
int width;
+ /* If we have data ready to return, don't block waiting
+ * for more */
+ if (cnt > 0 && sputrace_used() == 0)
+ break;
+
error = wait_event_interruptible(sputrace_wait,
sputrace_used() > 0);
if (error)
@@ -109,24 +115,49 @@ static ssize_t sputrace_read(struct file *file, char __user *buf,
static int sputrace_open(struct inode *inode, struct file *file)
{
+ int rc;
+
spin_lock(&sputrace_lock);
+ if (sputrace_logging) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ sputrace_logging = 1;
sputrace_head = sputrace_tail = 0;
sputrace_start = ktime_get();
+ rc = 0;
+
+out:
spin_unlock(&sputrace_lock);
+ return rc;
+}
+static int sputrace_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&sputrace_lock);
+ sputrace_logging = 0;
+ spin_unlock(&sputrace_lock);
return 0;
}
static const struct file_operations sputrace_fops = {
- .owner = THIS_MODULE,
- .open = sputrace_open,
- .read = sputrace_read,
+ .owner = THIS_MODULE,
+ .open = sputrace_open,
+ .read = sputrace_read,
+ .release = sputrace_release,
};
static void sputrace_log_item(const char *name, struct spu_context *ctx,
struct spu *spu)
{
spin_lock(&sputrace_lock);
+
+ if (!sputrace_logging) {
+ spin_unlock(&sputrace_lock);
+ return;
+ }
+
if (sputrace_avail() > 1) {
struct sputrace *t = sputrace_log + sputrace_head;