Mercurial > hg > Game > Cerium
changeset 1192:6147dd81b4c7 draft
merge
author | Daichi TOMA <e085740@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 08 Jul 2011 19:38:09 +0900 |
parents | bd4a27ae2524 (diff) 5abf0ce8c71c (current diff) |
children | ae3ca6ee94e5 |
files | TaskManager/Makefile.def |
diffstat | 35 files changed, 812 insertions(+), 388 deletions(-) [+] |
line wrap: on
line diff
--- a/Renderer/Test/ball_bound.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/Renderer/Test/ball_bound.cc Fri Jul 08 19:38:09 2011 +0900 @@ -52,7 +52,7 @@ } } -static int time = 0; +static int time_val = 0; static void ball_move_idle(SceneGraphPtr node, void *sgroot_, int screen_w, int screen_h) @@ -62,19 +62,19 @@ if (pad->circle.isPush()) { node->set_move_collision(ball_move_idle2, ball_collision_idle); - time = 0; + time_val = 0; } - time++; + time_val++; - if (time > 90) { + if (time_val > 90) { float w = (float)random(); w = fmodf(w, screen_w - ball_radius*2); node->xyz[0] = w + ball_radius; node->xyz[1] = h0; node->set_move_collision(ball_move, ball_collision); - time = 0; + time_val = 0; } }
--- a/TaskManager/Cell/CellTaskManagerImpl.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Cell/CellTaskManagerImpl.cc Fri Jul 08 19:38:09 2011 +0900 @@ -11,68 +11,63 @@ #include "types.h" #include "SysFunc.h" -static void send_alloc_reply(CellTaskManagerImpl *tm, int id, SpeThreads *speThreads); +static void send_alloc_reply(CellTaskManagerImpl *tm, int id, + Threads *speThreads); -CellTaskManagerImpl::~CellTaskManagerImpl() -{ +CellTaskManagerImpl::~CellTaskManagerImpl() { + delete speThreads; + delete[] speTaskList; - delete speThreads; - delete [] speTaskList; - - delete ppeManager; + delete ppeManager; } -void -CellTaskManagerImpl::init(int spuIdle_) -{ - spe_running = 0; - spuIdle = spuIdle_; - - speThreads = new SpeThreads(machineNum); - speThreads->init(); +void CellTaskManagerImpl::init(int spuIdle_) { + spe_running = 0; + spuIdle = spuIdle_; - // 実行される Task 用の パイプライン用のダブルバッファ - speTaskList = new QueueInfo<TaskList>*[machineNum]; // spe上の走っている Task の配列 - taskListInfo = new QueueInfo<TaskList>*[machineNum]; // 次に走る Task の配列 + //speThreads = new SpeThreads(machineNum); + //speThreads->init(); + + // 実行される Task 用の パイプライン用のダブルバッファ + speTaskList = new QueueInfo<TaskList>*[machineNum]; // spe上の走っている Task の配列 + taskListInfo = new QueueInfo<TaskList>*[machineNum]; // 次に走る Task の配列 - for (int i = 0; i < machineNum; i++) { - taskListInfo[i] = new QueueInfo<TaskList>(taskListPool); - speTaskList[i] = new QueueInfo<TaskList>(taskListPool); - } + for (int i = 0; i < machineNum; i++) { + taskListInfo[i] = new QueueInfo<TaskList> (taskListPool); + speTaskList[i] = new QueueInfo<TaskList> (taskListPool); + } - // PPE 側の管理をする Manager - ppeManager = new FifoTaskManagerImpl(machineNum); - // 大半のTaskQueueInfoは、共有される - MainScheduler *mscheduler = new MainScheduler; - set_scheduler(mscheduler); - ppeManager->init(mscheduler, this); // ここで HTaskInfo が共有される。 + // PPE 側の管理をする Manager + ppeManager = new FifoTaskManagerImpl(machineNum); + // 大半のTaskQueueInfoは、共有される + MainScheduler *mscheduler = new MainScheduler; + set_scheduler(mscheduler); + ppeManager->init(mscheduler, this); // ここで HTaskInfo が共有される。 - // 実行可能な HTask のリスト。 FifoTaskManager と共有される - activeTaskQueue = ppeManager->activeTaskQueue; - // HTask の factory。 HTaskInfo ならなんでもいい。 - htaskImpl = activeTaskQueue ; // any HTaskInfo + // 実行可能な HTask のリスト。 FifoTaskManager と共有される + activeTaskQueue = ppeManager->activeTaskQueue; + // HTask の factory。 HTaskInfo ならなんでもいい。 + htaskImpl = activeTaskQueue; // any HTaskInfo + - - ppeManager->get_scheduler()->set_manager(this); + ppeManager->get_scheduler()->set_manager(this); - // Task 内からManager->task_create() とかするときに必要なTaskManager。 - // 現状では ppe 側からしか動かない - // spe 側から Task create できない - schedTaskManager = new SchedTask(); - schedTaskManager->init(0,0,0,ppeManager->get_scheduler(),0); - ppeManager->schedTaskManager = schedTaskManager; + // Task 内からManager->task_create() とかするときに必要なTaskManager。 + // 現状では ppe 側からしか動かない + // spe 側から Task create できない + schedTaskManager = new SchedTask(); + schedTaskManager->init(0, 0, 0, ppeManager->get_scheduler(), 0); + ppeManager->schedTaskManager = schedTaskManager; } -void -CellTaskManagerImpl::append_activeTask(HTaskPtr task) -{ - if (task->cpu_type == CPU_PPE) { - ppeManager->append_activeTask(task); - } else { - activeTaskQueue->addLast(task); - } +void CellTaskManagerImpl::append_activeTask(HTaskPtr task) { + if (task->cpu_type == CPU_PPE) { + ppeManager->append_activeTask(task); + } else { + activeTaskQueue->addLast(task); + } } // SPE_ANY が指定されていた時に @@ -86,251 +81,231 @@ * ここの activeTaskQueue は FifoTaskManagerImpl のと意味が違い、 * spe に渡される Task だけ入っている */ -void -CellTaskManagerImpl::set_runTaskList(QueueInfo<HTask> *activeTaskQueue) -{ - int speid; - HTaskPtr htask = activeTaskQueue->getFirst(); - while (htask != NULL) { +void CellTaskManagerImpl::set_runTaskList(QueueInfo<HTask> *activeTaskQueue) { + int speid; + HTaskPtr htask = activeTaskQueue->getFirst(); + while (htask != NULL) { - if (htask->cpu_type == CPU_PPE) { + if (htask->cpu_type == CPU_PPE) { + + htask = activeTaskQueue->getNext(htask); - htask = activeTaskQueue->getNext(htask); - - } else{ - if (htask->cpu_type == SPE_ANY) { - speid = cur_anySpeid++; - } else { - // -1 してるのは - // htask->cpu_type - CPU_SPE で - // SPE0 = 1, SPE1 = 2, ... SPE5 = 6 ってなってるので - // 配列的 (SPE0 = arr[0], SPE1 = arr[1]) にするため - speid = htask->cpu_type - CPU_SPE - 1; + } else { + if (htask->cpu_type == SPE_ANY) { + speid = cur_anySpeid++; + } else { + // -1 してるのは + // htask->cpu_type - CPU_SPE で + // SPE0 = 1, SPE1 = 2, ... SPE5 = 6 ってなってるので + // 配列的 (SPE0 = arr[0], SPE1 = arr[1]) にするため + speid = htask->cpu_type - CPU_SPE - 1; + } + + speid %= machineNum; + set_taskList(htask, taskListInfo[speid]); + + HTaskPtr next = activeTaskQueue->getNext(htask); + activeTaskQueue->remove(htask); + htask = next; + + } } - - speid %= machineNum; - set_taskList(htask, taskListInfo[speid]); - - HTaskPtr next = activeTaskQueue->getNext(htask); - activeTaskQueue->remove(htask); - htask = next; - - } - } } -void -CellTaskManagerImpl::sendTaskList() -{ - for (int id = 0; id < machineNum; id++) { - mail_check(id); - if (!speTaskList[id]->empty()) { - continue; // まだ、走ってる +void CellTaskManagerImpl::sendTaskList() { + for (int id = 0; id < machineNum; id++) { + mail_check(id); + if (!speTaskList[id]->empty()) { + continue; // まだ、走ってる + } + if (!taskListInfo[id]->empty()) { + // SPE に送る TaskList の準備 + send_taskList(id); + spe_running++; + } } - if (! taskListInfo[id]->empty() ) { - // SPE に送る TaskList の準備 - send_taskList(id); - spe_running++; - } - } } -void -CellTaskManagerImpl::poll() -{ - set_runTaskList(activeTaskQueue); - // TaskList 待ちの SPE に TaskList を送る - sendTaskList(); +void CellTaskManagerImpl::poll() { + set_runTaskList(activeTaskQueue); + // TaskList 待ちの SPE に TaskList を送る + sendTaskList(); } - -void -CellTaskManagerImpl::debug_check_spe_idle(QueueInfo<HTask> * activeTaskQueue, int spe_running_) -{ - printf("spu_idle! spe_running = %d : activeTaskQueue->length = %d \n" - , spe_running_, activeTaskQueue->length()); +void CellTaskManagerImpl::debug_check_spe_idle( + QueueInfo<HTask> * activeTaskQueue, int spe_running_) { + printf("spu_idle! spe_running = %d : activeTaskQueue->length = %d \n", + spe_running_, activeTaskQueue->length()); HTaskPtr task = activeTaskQueue->getFirst(); int tmp_i = 0; do { - printf("task_name = %s ,",ppeManager->get_task_name(task)); + printf("task_name = %s ,", ppeManager->get_task_name(task)); printf("cpu = [%d], count = %d", task->cpu_type, tmp_i); tmp_i++; } while ((task = activeTaskQueue->getNext(task)) != 0); printf("\n"); } -void -CellTaskManagerImpl::run() -{ - int spu_limit = spuIdle; - if (machineNum==0) { - ppeManager->run(); - return; - } - - do { +void CellTaskManagerImpl::run() { + int spu_limit = spuIdle; + if (machineNum == 0) { + ppeManager->run(); + return; + } + + do { - // PPE side - ppeManager->poll(); - // SPE side - do { - poll(); - } while (ppeManager->activeTaskQueue->empty() && spe_running >0 ); - - if (spe_running < spu_limit) { - debug_check_spe_idle(ppeManager->activeTaskQueue, spe_running); - } - - } while (!ppeManager->activeTaskQueue->empty() || - !activeTaskQueue->empty() || - spe_running >0); - if (!waitTaskQueue->empty()) { - show_dead_lock_info(); - } - + // PPE side + ppeManager->poll(); + // SPE side + do { + poll(); + } while (ppeManager->activeTaskQueue->empty() && spe_running > 0); + + if (spe_running < spu_limit) { + debug_check_spe_idle(ppeManager->activeTaskQueue, spe_running); + } + + } while (!ppeManager->activeTaskQueue->empty() || !activeTaskQueue->empty() + || spe_running > 0); + if (!waitTaskQueue->empty()) { + show_dead_lock_info(); + } + } - -static void -loop_check(HTask *p,HTask *me, int depth) -{ - if (p==me) printf("*%lx ",(long)p); // loop - if (depth==0) return; - QueueInfo<TaskQueue> *w = p->wait_i; - if (w) { - for( TaskQueue *q = w->getFirst(); q; q = w->getNext(q)) { - loop_check(q->task,me, depth-1); +static void loop_check(HTask *p, HTask *me, int depth) { + if (p == me) + printf("*%lx ", (long) p); // loop + if (depth == 0) + return; + QueueInfo<TaskQueue> *w = p->wait_i; + if (w) { + for (TaskQueue *q = w->getFirst(); q; q = w->getNext(q)) { + loop_check(q->task, me, depth - 1); + } } - } } -void -CellTaskManagerImpl::show_dead_lock_info() -{ - get_scheduler()-> printf("Dead lock detected\n ppe queue %d\n", - ppeManager->activeTaskQueue->length()); - // 確か waitQueue は共通... - // get_scheduler()-> printf(" wait queue %d\n",ppeManager->waitTaskQueue->length()); - get_scheduler()-> printf(" wait queue %d\n",waitTaskQueue->length()); - for( HTask *p = waitTaskQueue->getFirst(); p; p = waitTaskQueue->getNext(p)) { - printf(" Waiting task%d %lx",p->command, (long)p); - QueueInfo<TaskQueue> *w = p->wait_i; - if (w) { - for( TaskQueue *q = w->getFirst(); q; q = w->getNext(q)) { - printf(" waiting task%d %lx",q->task->command, (long)q->task); - if (!waitTaskQueue->find(q->task)) { - printf("!"); // stray task +void CellTaskManagerImpl::show_dead_lock_info() { + get_scheduler()-> printf("Dead lock detected\n ppe queue %d\n", + ppeManager->activeTaskQueue->length()); + // 確か waitQueue は共通... + // get_scheduler()-> printf(" wait queue %d\n",ppeManager->waitTaskQueue->length()); + get_scheduler()-> printf(" wait queue %d\n", waitTaskQueue->length()); + for (HTask *p = waitTaskQueue->getFirst(); p; p = waitTaskQueue->getNext(p)) { + printf(" Waiting task%d %lx", p->command, (long) p); + QueueInfo<TaskQueue> *w = p->wait_i; + if (w) { + for (TaskQueue *q = w->getFirst(); q; q = w->getNext(q)) { + printf(" waiting task%d %lx", q->task->command, + (long) q->task); + if (!waitTaskQueue->find(q->task)) { + printf("!"); // stray task + } + loop_check(q->task, p, 10); + } } - loop_check(q->task,p, 10); - } + printf("\n"); } - printf("\n"); - } - get_scheduler()-> printf(" spe queue %d\n",activeTaskQueue->length()); - for (int i = 0; i < machineNum; i++) { - get_scheduler()-> printf(" spe %d send %d wait %d\n",i, - speTaskList[i]->length(), taskListInfo[i]->length()); - } + get_scheduler()-> printf(" spe queue %d\n", activeTaskQueue->length()); + for (int i = 0; i < machineNum; i++) { + get_scheduler()-> printf(" spe %d send %d wait %d\n", i, + speTaskList[i]->length(), taskListInfo[i]->length()); + } } /** * SPE からのメールをチェックする */ -void -CellTaskManagerImpl::mail_check(int id) -{ - memaddr data; +void CellTaskManagerImpl::mail_check(int id) { + memaddr data; - // SPE Scheduler からの mail check - while (speThreads->has_mail(id, 1, &data)) { - if (data == (memaddr)MY_SPE_STATUS_READY) { - // MY_SPE_STATUS_READY: SPE が持ってた Task 全て終了 - // freeAll する前に循環リストに戻す - speTaskList[id]->getLast()->next = speTaskList[id]; - speTaskList[id]->freeAll(); - spe_running--; -// printf("SPE %d status ready, %d running\n",id, spe_running); - } else if (data == (memaddr)MY_SPE_COMMAND_MALLOC) { - // MY_SPE_COMMAND_MALLOC SPE からのmain memory request - send_alloc_reply(this, id, speThreads); - } else if (data > (memaddr)MY_SPE_NOP) { + // SPE Scheduler からの mail check + while (speThreads->has_mail(id, 1, &data)) { + if (data == (memaddr) MY_SPE_STATUS_READY) { + // MY_SPE_STATUS_READY: SPE が持ってた Task 全て終了 + // freeAll する前に循環リストに戻す + speTaskList[id]->getLast()->next = speTaskList[id]; + speTaskList[id]->freeAll(); + spe_running--; + // printf("SPE %d status ready, %d running\n",id, spe_running); + } else if (data == (memaddr) MY_SPE_COMMAND_MALLOC) { + // MY_SPE_COMMAND_MALLOC SPE からのmain memory request + send_alloc_reply(this, id, speThreads); + } else if (data > (memaddr) MY_SPE_NOP) { #ifdef TASK_LIST_MAIL - TaskListPtr list = (TaskListPtr)data; - check_task_list_finish(schedTaskManager, list, waitTaskQueue); + TaskListPtr list = (TaskListPtr)data; + check_task_list_finish(schedTaskManager, list, waitTaskQueue); #else - // 終了したタスク(PPEにあるのでアドレス) - HTaskPtr task = (HTaskPtr)data; + // 終了したタスク(PPEにあるのでアドレス) + HTaskPtr task = (HTaskPtr) data; #if 0 - if (task->cpu_type != CPU_SPE) { - const char *name = get_task_name(task); - if (name != NULL) { - printf("[SPE] "); - printf("Task id : %d, ", task->command); - printf("Task name : %s\n", name); - } - } + if (task->cpu_type != CPU_SPE) { + const char *name = get_task_name(task); + if (name != NULL) { + printf("[SPE] "); + printf("Task id : %d, ", task->command); + printf("Task name : %s\n", name); + } + } #endif #ifndef NOT_CHECK - - if (task != NULL) { - //SPE で処理された Task が返ってくるはず。それがもし、type PPE なら・・・ - if (task->cpu_type == CPU_PPE) { - printf("attention : PPE task run on SPE\n"); - printf("Task id : %d\n", task->command); - const char *name = get_task_name(task); - if (name != NULL) { - printf("Task name : %s\n", name); - } - } - } + if (task != NULL) { + //SPE で処理された Task が返ってくるはず。それがもし、type PPE なら・・・ + if (task->cpu_type == CPU_PPE) { + printf("attention : PPE task run on SPE\n"); + printf("Task id : %d\n", task->command); + const char *name = get_task_name(task); + if (name != NULL) { + printf("Task name : %s\n", name); + } + } + } #endif - - task->post_func(schedTaskManager, task->post_arg1, task->post_arg2); - check_task_finish(task, waitTaskQueue); + + task->post_func(schedTaskManager, task->post_arg1, task->post_arg2); + check_task_finish(task, waitTaskQueue); #endif + } + // MY_SPE_NOP: 特に意味のないコマンド } - // MY_SPE_NOP: 特に意味のないコマンド - } } -void -CellTaskManagerImpl::polling() -{ - // may call recursively check_task_list_finish() - // we need fifo here - for (int i = 0; i < machineNum; i++) { - mail_check(i); - } +void CellTaskManagerImpl::polling() { + // may call recursively check_task_list_finish() + // we need fifo here + for (int i = 0; i < machineNum; i++) { + mail_check(i); + } } -static void -send_alloc_reply(CellTaskManagerImpl *tm, int id, SpeThreads *speThreads) -{ +static void send_alloc_reply(CellTaskManagerImpl *tm, int id, + Threads *speThreads) { + + /** + * info[0] = alloc_id; (CellScheduler::mainMem_alloc 参照) + * info[1] = alloc_addr; + */ + memaddr alloc_info[2]; + long alloc_size; + long command; - /** - * info[0] = alloc_id; (CellScheduler::mainMem_alloc 参照) - * info[1] = alloc_addr; - */ - memaddr alloc_info[2]; - long alloc_size; - long command; - - speThreads->get_mail(id, 2, alloc_info); - command = (long)alloc_info[0]; - alloc_size = (long)alloc_info[1]; + speThreads->get_mail(id, 2, alloc_info); + command = (long) alloc_info[0]; + alloc_size = (long) alloc_info[1]; - - alloc_info[1] = (memaddr)tm->allocate(alloc_size); - //__debug_ppe("[PPE] MALLOCED 0x%lx from [SPE %d]\n", alloc_info[1],id); - // 今のところ何もしてない。どうも、この allocate を free - // するのは、SPE task が返した値を見て行うらしい。それは、 - // 忘れやすいのではないか? - speThreads->add_output_tasklist(command, alloc_info[1], alloc_size); + alloc_info[1] = (memaddr) tm->allocate(alloc_size); + //__debug_ppe("[PPE] MALLOCED 0x%lx from [SPE %d]\n", alloc_info[1],id); + // 今のところ何もしてない。どうも、この allocate を free + // するのは、SPE task が返した値を見て行うらしい。それは、 + // 忘れやすいのではないか? + speThreads->add_output_tasklist(command, alloc_info[1], alloc_size); - speThreads->send_mail(id, 2, alloc_info); + speThreads->send_mail(id, 2, alloc_info); } /** @@ -341,52 +316,49 @@ * SPE で実行終了した speTaskList と * これから実行する taskListInfo のバッファを入れ替える */ -void -CellTaskManagerImpl::send_taskList(int id) -{ - // speTaskList は走り終わった ppe の Task の List. - // taskListInfo はこれから走る Task の List. - // 交換して実行する - QueueInfo<TaskList> *tmp = taskListInfo[id]; - taskListInfo[id] = speTaskList[id]; - speTaskList[id] = tmp; +void CellTaskManagerImpl::send_taskList(int id) { + // speTaskList は走り終わった ppe の Task の List. + // taskListInfo はこれから走る Task の List. + // 交換して実行する + QueueInfo<TaskList> *tmp = taskListInfo[id]; + taskListInfo[id] = speTaskList[id]; + speTaskList[id] = tmp; - // speTaskList は本来は循環リストなのだけど、実行中は線形リストである。 - // spe の Task が終了した時点でなおす。 - tmp->getLast()->next = 0; - TaskListPtr p = tmp->getFirst(); -// printf("SPE %d task list sending\n",id); - speThreads->send_mail(id, 1, (memaddr *)&p); -// printf("SPE %d task list sent\n",id); + // speTaskList は本来は循環リストなのだけど、実行中は線形リストである。 + // spe の Task が終了した時点でなおす。 + tmp->getLast()->next = 0; + TaskListPtr p = tmp->getFirst(); + // printf("SPE %d task list sending\n",id); + speThreads->send_mail(id, 1, (memaddr *) &p); + // printf("SPE %d task list sent\n",id); } void CellTaskManagerImpl::show_profile() { - for (int id = 0; id < machineNum; id++) { - HTaskPtr t = schedTaskManager->create_task(ShowTime,0,0,0,0); - t->set_cpu((CPU_TYPE)(id+2)); - t->spawn(); - } + for (int id = 0; id < machineNum; id++) { + HTaskPtr t = schedTaskManager->create_task(ShowTime, 0, 0, 0, 0); + t->set_cpu((CPU_TYPE) (id + 2)); + t->spawn(); + } } void CellTaskManagerImpl::start_profile() { - for (int id = 0; id < machineNum; id++) { - HTaskPtr t = schedTaskManager->create_task(StartProfile,0,0,0,0); - t->set_cpu((CPU_TYPE)(id+2)); - t->spawn(); - } + for (int id = 0; id < machineNum; id++) { + HTaskPtr t = schedTaskManager->create_task(StartProfile, 0, 0, 0, 0); + t->set_cpu((CPU_TYPE) (id + 2)); + t->spawn(); + } } -void CellTaskManagerImpl::print_arch() -{ - printf("CellTaskManager\n"); +void CellTaskManagerImpl::print_arch() { + printf("CellTaskManager\n"); } - - #ifdef __CERIUM_CELL__ TaskManagerImpl* create_impl(int num) { - return new CellTaskManagerImpl(num); + Threads *cpus = new SpeThreads(num); + cpus->init(); + return new CellTaskManagerImpl(num,cpus); } #endif // __CERIUM_CELL
--- a/TaskManager/Cell/CellTaskManagerImpl.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Cell/CellTaskManagerImpl.h Fri Jul 08 19:38:09 2011 +0900 @@ -3,19 +3,23 @@ #include "TaskManagerImpl.h" #include "FifoTaskManagerImpl.h" +#ifdef __CERIUM_CELL__ #include "SpeThreads.h" +#else +#include "CpuThreads.h" +#endif class CellTaskManagerImpl : public TaskManagerImpl { public: /* constructor */ - CellTaskManagerImpl(int num) : TaskManagerImpl(num) {} + CellTaskManagerImpl(int num, Threads *cpus) : TaskManagerImpl(num) {speThreads = cpus;} ~CellTaskManagerImpl(); /* variables */ QueueInfo<TaskList> **taskListInfo; QueueInfo<TaskList> **speTaskList; // running task - SpeThreads *speThreads; + Threads *speThreads; FifoTaskManagerImpl *ppeManager; int spe_running; int spuIdle;
--- a/TaskManager/Cell/SpeThreads.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Cell/SpeThreads.h Fri Jul 08 19:38:09 2011 +0900 @@ -13,7 +13,7 @@ } thread_arg_t; -class SpeThreads : Threads { +class SpeThreads : public Threads { public: /* constructor */ SpeThreads(int num = 1);
--- a/TaskManager/Cell/spe/SpeTaskManagerImpl.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Cell/spe/SpeTaskManagerImpl.cc Fri Jul 08 19:38:09 2011 +0900 @@ -20,6 +20,7 @@ void SpeTaskManagerImpl::print_arch() { printf("SpeTaskManagerImpl\n"); } // Odd +#ifndef __CERIUM_FIFO__ TaskManagerImpl::TaskManagerImpl(int i) {} void TaskManagerImpl::append_activeTask(HTask* p) {} @@ -33,3 +34,4 @@ void TaskManagerImpl::spawn_task(HTaskPtr) {} void TaskManagerImpl::set_task_cpu(HTaskPtr, CPU_TYPE) {} +#endif
--- a/TaskManager/Fifo/FifoDmaManager.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Fifo/FifoDmaManager.h Fri Jul 08 19:38:09 2011 +0900 @@ -4,6 +4,7 @@ #include "base.h" #include "DmaManager.h" #include "MailManager.h" +#include "SemMailManager.h" class FifoDmaManager : public DmaManager { @@ -16,8 +17,13 @@ BASE_NEW_DELETE(FifoDmaManager); FifoDmaManager() { - mail_queue1 = new MailManager(); - mail_queue2 = new MailManager(); +#ifdef __CERIUM_PARALLEL__ + mail_queue1 = new SemMailManager(); + mail_queue2 = new SemMailManager(); +#else + mail_queue1 = new MailManager(); + mail_queue2 = new MailManager(); +#endif } ~FifoDmaManager() {
--- a/TaskManager/Fifo/FifoTaskManagerImpl.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Fifo/FifoTaskManagerImpl.cc Fri Jul 08 19:38:09 2011 +0900 @@ -2,6 +2,7 @@ #include <stdlib.h> #include <string.h> #include "FifoTaskManagerImpl.h" +#include "CellTaskManagerImpl.h" #include "QueueInfo.h" #include "TaskList.h" #include "Scheduler.h" @@ -313,7 +314,13 @@ TaskManagerImpl* create_impl(int num) { + if (num == 0) { return new FifoTaskManagerImpl(num); + } else { + Threads *cpus = new CpuThreads(num); + cpus->init(); + return new CellTaskManagerImpl(num,cpus); + } } #endif // __CERIUM_FIFO__
--- a/TaskManager/Fifo/FifoTaskManagerImpl.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Fifo/FifoTaskManagerImpl.h Fri Jul 08 19:38:09 2011 +0900 @@ -4,6 +4,7 @@ #include "TaskManagerImpl.h" #include "MainScheduler.h" #include "Scheduler.h" +#include "CpuThreads.h" class FifoTaskManagerImpl : public TaskManagerImpl { public:
--- a/TaskManager/Makefile Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Makefile Fri Jul 08 19:38:09 2011 +0900 @@ -18,6 +18,9 @@ cell: FORCE @$(MAKE) -f Makefile.cell +parallel: FORCE + @$(MAKE) -f Makefile.parallel + FORCE: -mkdir -p ../include/TaskManager rsync `find . -name Test -prune -or -name '*.h' -print` ../include/TaskManager
--- a/TaskManager/Makefile.def Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Makefile.def Fri Jul 08 19:38:09 2011 +0900 @@ -29,8 +29,8 @@ ABIBIT = 64 -OPT = -g -DMAIL_QUEUE -DNOT_CHECK #-DTASK_LIST_MAIL #-DEARLY_TOUCH -DUSE_CACHE -# OPT = -O9 -DMAIL_QUEUE -DNOT_CHECK #-DTASK_LIST_MAIL #-DEARLY_TOUCH -DUSE_CACHE +#OPT = -g -DMAIL_QUEUE -DNOT_CHECK #-DTASK_LIST_MAIL #-DEARLY_TOUCH -DUSE_CACHE +OPT = -O9 -DMAIL_QUEUE -DNOT_CHECK #-DTASK_LIST_MAIL #-DEARLY_TOUCH -DUSE_CACHE
--- a/TaskManager/Makefile.fifo Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/Makefile.fifo Fri Jul 08 19:38:09 2011 +0900 @@ -12,7 +12,11 @@ all: default default: $(TARGET) -ALL_OBJS = $(KERN_MAIN_OBJS) $(KERN_PPE_OBJS) $(KERN_SCHED_OBJS) $(KERN_SYSTASK_OBJS) $(IMPL_FIFO_OBJS) $(KERN_MEM_OBJS) +ALL_OBJS = $(KERN_MAIN_OBJS) $(KERN_PPE_OBJS) $(KERN_SCHED_OBJS) \ + $(KERN_SYSTASK_OBJS) $(IMPL_FIFO_OBJS) $(KERN_MEM_OBJS) \ + Cell/spe/SpeTaskManagerImpl.o Cell/CellTaskManagerImpl.o \ + Cell/spe/ShowTime.o Cell/spe/StartProfile.o + Makefile.dep: make -f Makefile.fifo depend
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/Makefile.parallel Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,33 @@ +include ./Makefile.def +TARGET = libFifoManager.a +CFLAGS += -DHAS_POSIX_MEMALIGN + +.SUFFIXES: .cc .o + +EXTRA_CFLAGS = -D__CERIUM_FIFO__ -D__CERIUM_PARALLEL__ + +.cc.o: + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(INCLUDE) -c $< -o $@ + + +all: default +default: $(TARGET) + +ALL_OBJS = $(KERN_MAIN_OBJS) $(KERN_PPE_OBJS) $(KERN_SCHED_OBJS) \ + $(KERN_SYSTASK_OBJS) $(IMPL_FIFO_OBJS) $(KERN_MEM_OBJS) \ + Cell/spe/SpeTaskManagerImpl.o Cell/CellTaskManagerImpl.o \ + Cell/spe/ShowTime.o Cell/spe/StartProfile.o + +Makefile.dep: + make -f Makefile.parallel depend + +depend: + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(INCLUDE) $(ALL_OBJS:.o=.cc) -MM > Makefile.dep + +$(TARGET): $(ALL_OBJS) + ar crus $@ $(ALL_OBJS) + +paralleldistclean: parallelclean + rm -f $(TARGET) + +parallelclean: \ No newline at end of file
--- a/TaskManager/kernel/ppe/CpuThreads.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/CpuThreads.cc Fri Jul 08 19:38:09 2011 +0900 @@ -4,12 +4,25 @@ #include "MainScheduler.h" #include "SysFunc.h" #include "SchedNop.h" +#include "SpeTaskManagerImpl.h" -SchedExternTask(ShowTime); -SchedExternTask(StartProfile); +//SchedExternTask(ShowTime); +//SchedExternTask(StartProfile); -CpuThreads::CpuThreads(int num, int start_id) : cpu_num(num), id_offset(start_id) {} +CpuThreads::CpuThreads(int num, int start_id) : cpu_num(num), id_offset(start_id) { + + threads = new pthread_t[cpu_num]; + args = new cpu_thread_arg_t[cpu_num]; + wait = new Sem(0); + + for (int i = 0; i < cpu_num; i++) { + args[i].cpuid = i + id_offset; + args[i].scheduler = new MainScheduler(); + args[i].wait = wait; + } + +} CpuThreads::~CpuThreads() { @@ -20,7 +33,11 @@ } for (int i = 0; i < cpu_num; i++) { - pthread_join(threads[i], NULL); + pthread_join(threads[i], NULL); + } + + for (int i = 0; i < cpu_num; i++) { + delete args[i].scheduler; } delete [] threads; @@ -33,11 +50,16 @@ cpu_thread_arg_t *argt = (cpu_thread_arg_t *) args; Scheduler *c_scheduler = argt->scheduler; - SchedRegister(ShowTime); - SchedRegister(StartProfile); + TaskManagerImpl *manager = new SpeTaskManagerImpl(); + c_scheduler->init(manager); + c_scheduler->id = (int)argt->cpuid; - c_scheduler->init(argt->manager); - c_scheduler->id = (int)argt->cpuid; + manager->set_scheduler(c_scheduler); + + //SchedRegister(ShowTime); + //SchedRegister(StartProfile); + + argt->wait->sem_v(); //準備完了したスレッドができるたびに+1していく c_scheduler->run(new SchedNop()); c_scheduler->finish(); @@ -46,19 +68,16 @@ } void +//CpuThreads::init() CpuThreads::init() { - threads = new pthread_t[cpu_num]; - args = new cpu_thread_arg_t[cpu_num]; - for (int i = 0; i < cpu_num; i++) { - args[i].cpuid = i + id_offset; - args[i].scheduler = new MainScheduler(); + pthread_create(&threads[i], NULL, + &cpu_thread_run, (void*)&args[i]); } for (int i = 0; i < cpu_num; i++) { - pthread_create(&threads[i], NULL, - &cpu_thread_run, (void*)&args[i]); + wait->sem_p(); } } @@ -73,7 +92,6 @@ int CpuThreads::get_mail(int cpuid, int count, memaddr *ret) { - // need synchronization *ret = args[cpuid-id_offset].scheduler->mail_read_from_host(); return 1; } @@ -81,8 +99,11 @@ int CpuThreads::has_mail(int cpuid, int count, memaddr *ret) { - // need synchronization - return args[cpuid-id_offset].scheduler->has_mail_from_host(); + if (args[cpuid-id_offset].scheduler->has_mail_from_host() != 0) { + return get_mail(cpuid,count,ret); + } else { + return 0; //mailがないとき0を返す + } } /** @@ -101,11 +122,20 @@ */ void CpuThreads::send_mail(int cpuid, int num, memaddr *data) - { - // need synchronization args[cpuid-id_offset].scheduler->mail_write_from_host(*data); } +void +CpuThreads::add_output_tasklist(int command, memaddr buff, int alloc_size) +{ + /* + * output TaskList が無ければ新しく作る + * あれば TaskList に allocate した Task を追加 + * command に対応した Task の初期化を実行する + * SPE に data が書き出し終わった後に PPE 側で初期化 + */ + +} /* end */
--- a/TaskManager/kernel/ppe/CpuThreads.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/CpuThreads.h Fri Jul 08 19:38:09 2011 +0900 @@ -5,31 +5,35 @@ #include "Threads.h" #include "TaskManagerImpl.h" #include "MainScheduler.h" +#include "Sem.h" typedef struct cpu_arg { int cpuid; // should be syncrhonized MainScheduler *scheduler; TaskManagerImpl *manager; + SemPtr wait; } cpu_thread_arg_t; -class CpuThreads : Threads { +class CpuThreads : public Threads { public: /* constructor */ CpuThreads(int num = 1, int start_id = 0); - virtual ~CpuThreads(); + ~CpuThreads(); static void *cpu_thread_run(void *args); /* functions */ - void init(); - int get_mail(int speid, int count, memaddr *ret); // BLOCKING - int has_mail(int speid, int count, memaddr *ret); // NONBLOCK - void send_mail(int speid, int num, memaddr *data); // BLOCKING + virtual void init(); + virtual int get_mail(int speid, int count, memaddr *ret); // BLOCKING + virtual int has_mail(int speid, int count, memaddr *ret); // NONBLOCK + virtual void send_mail(int speid, int num, memaddr *data); // BLOCKING + virtual void add_output_tasklist(int command, memaddr buff, int alloc_size); private: /* variables */ pthread_t *threads; cpu_thread_arg_t *args; + SemPtr wait; //スレッド生成時の待ち用 int cpu_num; int id_offset; };
--- a/TaskManager/kernel/ppe/HTask.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/HTask.cc Fri Jul 08 19:38:09 2011 +0900 @@ -77,6 +77,7 @@ r_size = Task::calc_size(num_param, num_inData, num_outData)*num_task; rbuf = (memaddr) mimpl->allocate(r_size); //printf("r_size %d\n",r_size); + //bzeroするのはcostが高いので外したい bzero(rbuf,r_size); Task *task = (Task*)rbuf; task->init(id, num_param,num_inData,num_outData);
--- a/TaskManager/kernel/ppe/MailManager.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/MailManager.cc Fri Jul 08 19:38:09 2011 +0900 @@ -20,13 +20,13 @@ MailManager::~MailManager() { free(queue); } -int +int MailManager::count() { return (write+size-read)&mask; } -void +void MailManager::extend() { memaddr *newq = Newq(memaddr,size*2);
--- a/TaskManager/kernel/ppe/MailManager.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/MailManager.h Fri Jul 08 19:38:09 2011 +0900 @@ -8,12 +8,12 @@ /* constructor */ MailManager(unsigned int qsize = 32) ; - ~MailManager(); + virtual ~MailManager(); /* functions */ - void send(memaddr data); - memaddr recv(); - int count(); + virtual void send(memaddr data); + virtual memaddr recv(); + virtual int count(); private: /* variables */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/kernel/ppe/Sem.cc Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,60 @@ +#include "Sem.h" +#include <pthread.h> + + +Sem::Sem(int value) +{ + sem = new sem_t; + pthread_mutex_init(&sem->mutex, NULL); + pthread_cond_init(&sem->cond, NULL); + sem->value = value; +} + +Sem::~Sem() +{ + pthread_mutex_destroy(&sem->mutex); + pthread_cond_destroy(&sem->cond); + + delete sem; +} + +//P命令 +//資源を確保する +void +Sem::sem_p() +{ + pthread_mutex_lock(&sem->mutex); + //セマフォ変数が0の時、他のスレッドが資源を解放するのを待つ + while(sem->value == 0) { + pthread_cond_wait(&sem->cond, &sem->mutex); + } + //atomic + //sem->value--; //資源の確保 + //count()のvalueを取得する際にアトミック操作が必要 + __sync_fetch_and_sub(&sem->value,1); + pthread_mutex_unlock(&sem->mutex); +} + +//V命令 +//資源を解放する +void +Sem::sem_v() +{ + pthread_mutex_lock(&sem->mutex); + //atomic + //sem->value++; //資源の解放 + __sync_fetch_and_add(&sem->value,1); + + //資源の解放を知らせる + pthread_cond_signal(&sem->cond); + pthread_mutex_unlock(&sem->mutex); + +} + +int +Sem::count() +{ + //semの値を返せばよい。 + //atomic + return sem->value; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/kernel/ppe/Sem.h Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,27 @@ +#ifndef INCLUDED_SEM +#define INCLUDED_SEM + +#include <pthread.h> + +typedef struct sem_t { + volatile int value; //セマフォ変数 + pthread_mutex_t mutex; //セマフォ操作用のロック + pthread_cond_t cond; //待ち合わせ用の条件変数 +} sem_t, *sem_ptr; + +class Sem { +public: + /* constructor */ + Sem(int value); + ~Sem(); + void sem_p(); + void sem_v(); + int count(); + /* variables */ +private: + sem_t *sem; +}; + +typedef Sem *SemPtr; + +#endif /* INCLUDED_SEM */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/kernel/ppe/SemMailManager.cc Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,64 @@ +#include <stdlib.h> +#include "SemMailManager.h" + +void +SemMailManager::calc_mask(unsigned int qsize) +{ + mask = 1; + while(qsize>mask) { + mask <<= 1; + } + size = mask; + mask--; +} + +SemMailManager::SemMailManager(unsigned int qsize) { + read = write = 0; + calc_mask(qsize); + queue = Newq(memaddr,size); + + queue_remain = new Sem(size-1); //queue内に入る残りの数 + queue_count = new Sem(0); //queue内に現在入っている数 + +} + +SemMailManager::~SemMailManager() +{ + free(queue); + delete queue_remain; + delete queue_count; +} + +int +SemMailManager::count() +{ + return queue_count->count(); +} + +void +SemMailManager::send(memaddr data) +{ + queue_remain->sem_p(); //資源-1 + + queue[write++] = data; + //maskの範囲を超えた場合、0に戻す + write &= mask; + + queue_count->sem_v(); //資源+1 +} + +memaddr +SemMailManager::recv() +{ + queue_count->sem_p(); //資源-1 + + memaddr data; + data = queue[read++]; + read &= mask; + + queue_remain->sem_v(); //資源+1 + + return data; +} + +/* end */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/kernel/ppe/SemMailManager.h Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,37 @@ +#ifndef INCLUDED_SEM_MAIL_MANAGER +#define INCLUDED_SEM_MAIL_MANAGER + +#include <pthread.h> +#include "MailManager.h" +#include "types.h" +#include "Sem.h" + +class SemMailManager : public MailManager { +public: + /* constructor */ + SemMailManager(unsigned int qsize = 32) ; + + ~SemMailManager(); + + /* functions */ + void send(memaddr data); + memaddr recv(); + int count(); + +private: + /* variables */ + memaddr *queue; + SemPtr queue_remain; + SemPtr queue_count; + unsigned int size; + unsigned int read; + unsigned int write; + unsigned int mask; + + void calc_mask(unsigned int qsize); + void extend(); +} ; + +typedef SemMailManager *SemMailManagerPtr; + +#endif
--- a/TaskManager/kernel/ppe/Threads.h Wed Jul 06 21:05:06 2011 +0900 +++ b/TaskManager/kernel/ppe/Threads.h Fri Jul 08 19:38:09 2011 +0900 @@ -2,7 +2,8 @@ #define INCLUDED_THREADS #include <pthread.h> -#include <base.h> +#include "base.h" +#include "types.h" class Threads { @@ -18,6 +19,7 @@ virtual int get_mail(int speid, int count, memaddr *ret) = 0; // BLOCKING virtual int has_mail(int speid, int count, memaddr *ret) = 0; // NONBLOCK virtual void send_mail(int speid, int num, memaddr *data) = 0; // BLOCKING + virtual void add_output_tasklist(int command, memaddr buff, int alloc_size) = 0; /* variables */ pthread_t *threads;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/CpuThreadsTest/CpuThreadsTest.cc Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,51 @@ +#include <stdio.h> +#include "TaskManager/CpuThreads.h" +#include "TaskManager/CellTaskManagerImpl.h" + +static void +fail(const char *reason) +{ + printf("CpuThreadTest failed %s", reason); +} + +static void +tester(CpuThreads* c, int num, int size) +{ + for(int id = 0; id < num; id++){ + for(int i = 0; i < size; i++) { + c->send_mail(id, 1, (memaddr *) i); //mailqueue1に書き込む + } + } + + for(int id = 0; id < num; id++){ + for(int i = 0; i < size; i++) { + memaddr data; + if (c->has_mail(id, 1, &data) == 0) { + fail("early read fail\n"); + break; + } + if (data != (memaddr)i) { + fail("read data fail\n"); + break; + } + } + } +} + +static void +test1() { + int num = 1; + CpuThreads* c = new CpuThreads(num); + c->init(); + tester(c,num,16); + delete c; +} + +int +main(int ac, const char *av[]) +{ + test1(); + printf("CpuThreads succeed\n"); +} + +/* end */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/CpuThreadsTest/Makefile Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,15 @@ +include ../../Makefile.def + +CPPFLAGS += -g -Wall -I../../../include -m$(ABIBIT) + +TARGET=CpuThreadsTest + +$(TARGET) : + +LIBS += ../../libFifoManager.a + +CpuThreadsTest : CpuThreadsTest.o + $(CC) $(CFLAGS) -o $@ $? $(LIBS) + +clean: + rm -rf *.o $(TARGET)
--- a/TaskManager/test/MailManagerTest.cc Wed Jul 06 21:05:06 2011 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,47 +0,0 @@ -#include <stdio.h> -#include "TaskManager/MailManager.h" - -static void -fail(const char *reason) -{ - printf("MailManagerTest failed %s", reason); -} - -static void -tester(MailManagerPtr m, int size) -{ - - for(int i=0;i<size;i++) { - m->send((memaddr)i); - } - for(int i=0;i<size;i++) { - if (m->count()==0) { - fail("early read fail\n"); break; - } - if (m->recv()!=(memaddr)i) { - fail("read data fail\n"); break; - } - } -} - -static void -test1() { - MailManagerPtr m = new MailManager(); - tester(m,16); - tester(m,32); - tester(m,48); - delete m; - MailManagerPtr m1 = new MailManager(40); - tester(m1,16); - tester(m1,48); - delete m1; -} - -int -main(int ac, const char *av[]) -{ - test1(); - printf("MailManagerTest succeed\n"); -} - -/* end */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/MailManagerTest/MailManagerTest.cc Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,47 @@ +#include <stdio.h> +#include "TaskManager/MailManager.h" + +static void +fail(const char *reason) +{ + printf("MailManagerTest failed %s", reason); +} + +static void +tester(MailManagerPtr m, int size) +{ + + for(int i=0;i<size;i++) { + m->send((memaddr)i); + } + for(int i=0;i<size;i++) { + if (m->count()==0) { + fail("early read fail\n"); break; + } + if (m->recv()!=(memaddr)i) { + fail("read data fail\n"); break; + } + } +} + +static void +test1() { + MailManagerPtr m = new MailManager(); + tester(m,16); + tester(m,32); + tester(m,48); + delete m; + MailManagerPtr m1 = new MailManager(40); + tester(m1,16); + tester(m1,48); + delete m1; +} + +int +main(int ac, const char *av[]) +{ + test1(); + printf("MailManagerTest succeed\n"); +} + +/* end */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/MailManagerTest/Makefile Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,15 @@ +include ../../Makefile.def + +CPPFLAGS += -g -Wall -I../../../include -m$(ABIBIT) + +TARGET=MailManagerTest + +$(TARGET) : + +LIBS += ../../libFifoManager.a + +MailManagerTest : MailManagerTest.o + $(CC) $(CFLAGS) -o $@ $? $(LIBS) + +clean: + rm -rf *.o $(TARGET)
--- a/TaskManager/test/Makefile Wed Jul 06 21:05:06 2011 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -include ../Makefile.def - -CPPFLAGS += -g -Wall -I../../include -m$(ABIBIT) - -TARGET=MailManagerTest - -$(TARGET) : - -LIBS += ../libFifoManager.a - -MailManagerTest : MailManagerTest.o - $(CC) $(CFLAGS) -o $@ $? $(LIBS) - -clean: - rm -rf *.o $(TARGET)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/SemMailManagerTest/MailManagerTest.cc Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,85 @@ +#include <stdio.h> +#include "TaskManager/MailManager.h" +#include "TaskManager/SemMailManager.h" + +typedef struct _thread_arg { + MailManagerPtr m; + int size; +} thread_arg_t; + +static void +fail(const char *reason) +{ + printf("MailManagerTest failed %s", reason); +} + +static void +send_func(void *arg) +{ + thread_arg_t* targ = (thread_arg_t *)arg; + for(int i = 0; i < targ->size; i++) { + printf("send %d\n",i); + targ->m->send((memaddr)i); + } + return; +} + +static void +recv_func(void *arg) +{ + thread_arg_t* targ = (thread_arg_t *)arg; + for(int i = 0; i < targ->size; i++) { + if (targ->m->recv() != (memaddr)i) { + fail("read data fail\n"); + break; + } + printf("\t receive %d\n",i); + } + return; +} + +static void +tester(MailManagerPtr m, int size) +{ + //送信者スレッド作成 + thread_arg_t starg; + starg.m = m; + starg.size = size; + + pthread_t send; + pthread_create(&send, NULL, (void* (*)(void*))&send_func, (void*)&starg); + + //受信者スレッド作成 + thread_arg_t rtarg; + rtarg.m = m; + rtarg.size = size; + + pthread_t recv; + pthread_create(&recv, NULL, (void* (*)(void*))&recv_func, (void*)&rtarg); + + //終了待ち + pthread_join(send, NULL); + pthread_join(recv, NULL); +} + +static void +test1() { + MailManagerPtr m = new SemMailManager(2); + tester(m,16); + tester(m,32); + tester(m,48); + delete m; + MailManagerPtr m1 = new SemMailManager(32); + tester(m1,16); + tester(m1,48); + delete m1; +} + +int +main(int ac, const char *av[]) +{ + test1(); + printf("MailManagerTest succeed\n"); +} + +/* end */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/TaskManager/test/SemMailManagerTest/Makefile Fri Jul 08 19:38:09 2011 +0900 @@ -0,0 +1,15 @@ +include ../../Makefile.def + +CPPFLAGS += -g -Wall -I../../../include -m$(ABIBIT) + +TARGET=MailManagerTest + +$(TARGET) : + +LIBS += ../../libFifoManager.a + +MailManagerTest : MailManagerTest.o + $(CC) $(CFLAGS) -o $@ $? $(LIBS) + +clean: + rm -rf *.o $(TARGET)
--- a/example/Prime/Makefile.def Wed Jul 06 21:05:06 2011 +0900 +++ b/example/Prime/Makefile.def Fri Jul 08 19:38:09 2011 +0900 @@ -7,7 +7,7 @@ # ex linux/ps3 CERIUM = ../../../Cerium -CC = g++ -m32 +CC = g++ -m64 CFLAGS = -O9 -Wall #CFLAGS = -g -Wall
--- a/example/Prime/ppe/Prime.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/example/Prime/ppe/Prime.cc Fri Jul 08 19:38:09 2011 +0900 @@ -9,8 +9,8 @@ static int prime(SchedTask *smanager, void *rbuf, void *wbuf) { - int start = (int)smanager->get_param(0); /* 素数判定の開始地点 */ - int end = (int)smanager->get_param(1); /* 素数判定の終了地点 */ + int start = (long)smanager->get_param(0); /* 素数判定の開始地点 */ + int end = (long)smanager->get_param(1); /* 素数判定の終了地点 */ int range = end - start; /* 判定する範囲 */ /* 判定結果を収める配列を受け取る */
--- a/example/Prime/ppe/PrintTask.cc Wed Jul 06 21:05:06 2011 +0900 +++ b/example/Prime/ppe/PrintTask.cc Fri Jul 08 19:38:09 2011 +0900 @@ -8,7 +8,7 @@ static int print(SchedTask *smanager, void *rbuf, void *wbuf) { - int length = (int)smanager->get_param(0); /* 出力する範囲 */ + int length = (long)smanager->get_param(0); /* 出力する範囲 */ int *input = (int*)smanager->get_input(rbuf, 0); /* 出力する配列 */ /* 素数の判定結果が1ならば出力する */
--- a/example/Simple/Makefile.def Wed Jul 06 21:05:06 2011 +0900 +++ b/example/Simple/Makefile.def Fri Jul 08 19:38:09 2011 +0900 @@ -1,6 +1,7 @@ TARGET = twice # include/library path +ABIBIT=64 # ex linux/ps3 CERIUM = ../../../Cerium
--- a/example/Simple/Makefile.macosx Wed Jul 06 21:05:06 2011 +0900 +++ b/example/Simple/Makefile.macosx Fri Jul 08 19:38:09 2011 +0900 @@ -11,8 +11,8 @@ TASK_SRCS = $(filter-out $(TASK_DIR)/$(TASK_SRCS_EXCLUDE),$(TASK_SRCS_TMP)) TASK_OBJS = $(TASK_SRCS:.cc=.o) -LIBS += -lFifoManager `sdl-config --libs` -CC += -m32 +LIBS += -lParallelManager `sdl-config --libs` +CC += -m$(ABIBIT) .SUFFIXES: .cc .o