Mercurial > hg > Game > Cerium
changeset 1522:027d99ecb50e draft
run example/many_task
author | Yuhi TOMARI <yuhi@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 13 Nov 2012 16:16:29 +0900 |
parents | 9ae6eedd3ee3 |
children | d232231e1425 |
files | TaskManager/Gpu/GpuScheduler.cc TaskManager/Gpu/GpuThreads.cc TaskManager/kernel/ppe/CpuThreads.cc TaskManager/kernel/schedule/SchedTask.cc TaskManager/test/GpuRunTest/GpuFunc.h example/many_task/gpu/gpu_task_init.cc example/many_task/sort.cc |
diffstat | 7 files changed, 38 insertions(+), 36 deletions(-) [+] |
line wrap: on
line diff
--- a/TaskManager/Gpu/GpuScheduler.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/TaskManager/Gpu/GpuScheduler.cc Tue Nov 13 16:16:29 2012 +0900 @@ -143,7 +143,6 @@ reply[cur] = (memaddr)tasklist->waiter; - // usleep(10000); clFlush(command_queue[cur]); // flush for queued task clFinish(command_queue[cur]); // waiting for queued task // pipeline : 1-cur
--- a/TaskManager/Gpu/GpuThreads.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/TaskManager/Gpu/GpuThreads.cc Tue Nov 13 16:16:29 2012 +0900 @@ -30,11 +30,11 @@ void GpuThreads::init() { - args->scheduler = new GpuScheduler(); + args->scheduler = new GpuScheduler(); args->useRefDma = use_refdma; pthread_create(&threads[0], NULL, &gpu_thread_run, args); - + } void * @@ -42,10 +42,10 @@ { gpu_thread_arg_t *argt = (gpu_thread_arg_t *) args; Scheduler *g_scheduler = argt->scheduler; - + TaskManagerImpl *manager = new SpeTaskManagerImpl(); g_scheduler->init(manager, argt->useRefDma); - + manager->set_scheduler(g_scheduler); argt->wait->sem_v();
--- a/TaskManager/kernel/ppe/CpuThreads.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/TaskManager/kernel/ppe/CpuThreads.cc Tue Nov 13 16:16:29 2012 +0900 @@ -10,7 +10,7 @@ #include "SchedNop.h" #include "SpeTaskManagerImpl.h" #include "CellScheduler.h" - +#include <fcntl.h> SchedExternTask(ShowTime); SchedExternTask(StartProfile); @@ -40,7 +40,7 @@ } for (int i = 0; i < cpu_num; i++) { - delete args[i].scheduler; + delete args[i].scheduler; } delete [] threads; @@ -65,7 +65,7 @@ SchedRegister(ShowTime); SchedRegister(StartProfile); - argt->wait->sem_v(); //準備完了したスレッドができるたびに+1していく + argt->wait->sem_v(); //準備完了したスレッドができるたびに+1していく c_scheduler->run(new SchedNop()); c_scheduler->finish(); @@ -91,12 +91,12 @@ } for (int i = 0; i < cpu_num; i++) { - pthread_create(&threads[i], NULL, - &cpu_thread_run, (void*)&args[i]); + pthread_create(&threads[i], NULL, + &cpu_thread_run, (void*)&args[i]); } for (int i = 0; i < cpu_num; i++) { - wait->sem_p(); + wait->sem_p(); } } @@ -110,7 +110,7 @@ */ int CpuThreads::get_mail(int cpuid, int count, memaddr *ret) -{ +{ #ifdef __CERIUM_GPU__ if (is_gpu(cpuid)) return gpu->get_mail(cpuid, count, ret); #endif @@ -129,7 +129,7 @@ } else { return 0; //mailがないとき0を返す } - + } /** * Inbound Mailbox
--- a/TaskManager/kernel/schedule/SchedTask.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/TaskManager/kernel/schedule/SchedTask.cc Tue Nov 13 16:16:29 2012 +0900 @@ -134,7 +134,7 @@ if (outListData.bound != dout) free(outListData.bound); #ifdef TASK_LIST_MAIL if ((cur_index->next() >= list->last()) ) - connector->mail_write(waiter); + connector->mail_write(waiter); #else connector->mail_write(waiter); #endif @@ -147,28 +147,28 @@ if (cur_index == 0) { // 最初の一つ SchedTask *nextSched = new SchedTask(); - nextSched->init(list, &list->tasks[0], scheduler, this->tag^1); - return nextSched; + nextSched->init(list, &list->tasks[0], scheduler, this->tag^1); + return nextSched; } TaskPtr nextTask = cur_index->next(); if (nextTask < list->last()) { - // Task List が残っているので、次を準備 + // Task List が残っているので、次を準備 + + TaskPtr nextTask = cur_index->next(); - TaskPtr nextTask = cur_index->next(); - SchedTask *nextSched = new SchedTask(); - nextSched->init(list, nextTask, scheduler, this->tag^1); - return nextSched; + nextSched->init(list, nextTask, scheduler, this->tag^1); + return nextSched; } else { memaddr nextList = (memaddr)list->next; if (nextList == 0) { - // もう何もする必要がない - + // もう何もする必要がない + return new SchedNop2Ready(scheduler); } else { - // 新しいリストに取り掛かる - int dma_tag_switch = 0; - return new SchedTaskList(nextList, scheduler, dma_tag_switch); + // 新しいリストに取り掛かる + int dma_tag_switch = 0; + return new SchedTaskList(nextList, scheduler, dma_tag_switch); } } } @@ -388,7 +388,7 @@ return manager->create_task(cmd, __builtin_return_address(0)); } -HTaskPtr +HTaskPtr SchedTask::create_task(int cmd, memaddr r, long rs, memaddr w, long ws) { return manager->create_task(cmd,r,rs,w,ws, __builtin_return_address(0)); @@ -422,12 +422,12 @@ manager->set_task_cpu(t, cpu); } -void* SchedTask::allocate(int size) +void* SchedTask::allocate(int size) { return manager->allocate(size) ; } -void* SchedTask::allocate(int size,int align) +void* SchedTask::allocate(int size,int align) { return manager->allocate(size,align) ; } @@ -437,14 +437,14 @@ manager->polling(); } -Scheduler* SchedTask::get_scheduler() +Scheduler* SchedTask::get_scheduler() { return scheduler; } /* system call */ -int +int SchedTask::printf(const char * format, ...) { va_list ap;
--- a/TaskManager/test/GpuRunTest/GpuFunc.h Mon Nov 12 12:43:12 2012 +0900 +++ b/TaskManager/test/GpuRunTest/GpuFunc.h Tue Nov 13 16:16:29 2012 +0900 @@ -1,6 +1,7 @@ enum { #include "SysTasks.h" + mogyo, Twice, // Func1, };
--- a/example/many_task/gpu/gpu_task_init.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/example/many_task/gpu/gpu_task_init.cc Tue Nov 13 16:16:29 2012 +0900 @@ -7,6 +7,8 @@ void task_init(void) { - GpuSchedRegister(QUICK_SORT, "gpu/QuickSort.cl", "quick_sort"); + int a = SortSimple; + int b = QUICK_SORT; SchedRegister(SortSimple); + GpuSchedRegister(QUICK_SORT, "gpu/QuickSort.cl", "quick_sort"); }
--- a/example/many_task/sort.cc Mon Nov 12 12:43:12 2012 +0900 +++ b/example/many_task/sort.cc Tue Nov 13 16:16:29 2012 +0900 @@ -72,7 +72,7 @@ if (i<s->split_num-2 && s->bsort[i]) { s->fsort[i]->wait_for(s->bsort[i]); } - s->fsort[i]->set_cpu(spe_cpu); + s->fsort[i]->set_cpu(GPU_0); s->fsort[i]->set_param(0,(memaddr)block_num); } @@ -87,7 +87,7 @@ if (i>0 && s->bsort[i-1]) { s->fsort[i]->wait_for(s->bsort[i-1]); } - s->fsort[i]->set_cpu(spe_cpu); + s->fsort[i]->set_cpu(GPU_0); s->fsort[i]->set_param(0,(memaddr)last_block_num); } @@ -99,7 +99,7 @@ (memaddr)&s->data[i*block_num+half_block_num], sizeof(Data)*block_num, (memaddr)&s->data[i*block_num+half_block_num], sizeof(Data)*block_num); s->bsort[i]->flip(); - s->bsort[i]->set_cpu(spe_cpu); + s->bsort[i]->set_cpu(GPU_0); s->bsort[i]->set_param(0,(memaddr)block_num); } @@ -111,7 +111,7 @@ (memaddr)&s->data[i*block_num+half_block_num], sizeof(Data)*last_half_block_num, (memaddr)&s->data[i*block_num+half_block_num], sizeof(Data)*last_half_block_num); s->bsort[i]->flip(); - s->bsort[i]->set_cpu(spe_cpu); + s->bsort[i]->set_cpu(GPU_0); s->bsort[i]->set_param(0,(memaddr)last_half_block_num); }