aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorArnd Bergmann2007-07-20 21:39:49 +0200
committerArnd Bergmann2007-07-20 21:42:18 +0200
commitcbc23d3e7cb3c9fd3c9fce0bc3f44f687a9517c0 (patch)
tree4adcbbfa5402e46c816788cd86f992082d2a6f61 /arch/powerpc
parentc5fc8d2a92461fcabd00dfd678204cba36b93119 (diff)
[CELL] spufs: integration of SPE affinity with the scheduller
This patch makes the scheduller honor affinity information for each context being scheduled. If the context has no affinity information, behaviour is unchanged. If there are affinity information, context is schedulled to be run on the exact spu recommended by the affinity placement algorithm. Signed-off-by: Andre Detsch <adetsch@br.ibm.com> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c19
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
2 files changed, 23 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 75b5af0a7e21..5f399313b472 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -425,6 +425,25 @@ static void spu_init_channels(struct spu *spu)
}
}
+struct spu *spu_alloc_spu(struct spu *req_spu)
+{
+ struct spu *spu, *ret = NULL;
+
+ mutex_lock(&spu_mutex);
+ list_for_each_entry(spu, &cbe_spu_info[req_spu->node].free_spus, list) {
+ if (spu == req_spu) {
+ list_del_init(&spu->list);
+ pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_init_channels(spu);
+ ret = spu;
+ break;
+ }
+ }
+ mutex_unlock(&spu_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spu_alloc_spu);
+
struct spu *spu_alloc_node(int node)
{
struct spu *spu = NULL;
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index a9569de4c141..49b8f6867a96 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -507,6 +507,10 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
int node = cpu_to_node(raw_smp_processor_id());
int n;
+ spu = affinity_check(ctx);
+ if (spu)
+ return spu_alloc_spu(spu);
+
for (n = 0; n < MAX_NUMNODES; n++, node++) {
node = (node < MAX_NUMNODES) ? node : 0;
if (!node_allowed(ctx, node))