Mercurial > hg > Game > Cerium
changeset 1585:90c0ad32655f draft
init dim
author | Yuhi TOMARI <yuhi@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 31 Mar 2013 20:32:24 +0900 |
parents | 40a3504126a4 |
children | 345139e65ef1 |
files | TaskManager/Cell/CellTaskManagerImpl.cc TaskManager/Cell/CellTaskManagerImpl.h TaskManager/Cell/SpeThreads.cc TaskManager/Cell/spe/SpeTaskManagerImpl.h TaskManager/Fifo/FifoTaskManagerImpl.cc TaskManager/Fifo/FifoTaskManagerImpl.h TaskManager/Fifo/MainScheduler.h TaskManager/kernel/ppe/TaskList.h TaskManager/kernel/ppe/TaskManagerImpl.h TaskManager/kernel/schedule/SchedTask.cc TaskManager/kernel/schedule/Scheduler.cc example/word_count/main.cc |
diffstat | 12 files changed, 116 insertions(+), 93 deletions(-) [+] |
line wrap: on
line diff
--- a/TaskManager/Cell/CellTaskManagerImpl.cc Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Cell/CellTaskManagerImpl.cc Sun Mar 31 20:32:24 2013 +0900 @@ -175,6 +175,11 @@ } +int +CellTaskManagerImpl::max_cpu() { + return machineNum; +} + static void loop_check(HTask *p, HTask *me, int depth) { if (p == me) printf("*%lx ", (long) p); // loop @@ -378,9 +383,9 @@ { #ifdef __CERIUM_CELL__ Threads *cpus = new SpeThreads(num); + #elif __CERIUM_GPU__ int num_gpu = 1; - Threads *cpus = new CpuThreads(num, useRefDma,num_gpu); num += num_gpu; // for GPU #else
--- a/TaskManager/Cell/CellTaskManagerImpl.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Cell/CellTaskManagerImpl.h Sun Mar 31 20:32:24 2013 +0900 @@ -43,7 +43,7 @@ void polling(); void debug_check_spe_idle(QueueInfo<HTask> * activeTaskQueue, int spe_running_); void print_arch(); - + int max_cpu(); private: void send_taskList(int id); void show_dead_lock_info();
--- a/TaskManager/Cell/SpeThreads.cc Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Cell/SpeThreads.cc Sun Mar 31 20:32:24 2013 +0900 @@ -16,11 +16,11 @@ } for (int i = 0; i < cpu_num; i++) { - pthread_join(threads[i], NULL); - ret = spe_context_destroy(spe_ctx[i]); - if (ret) { - perror("[~SpeThreads] spe_context_destroy"); - } + pthread_join(threads[i], NULL); + ret = spe_context_destroy(spe_ctx[i]); + if (ret) { + perror("[~SpeThreads] spe_context_destroy"); + } } spe_image_close(spe_handle); @@ -43,20 +43,20 @@ spe_context_run(arg_t->ctx, &entry, 0, (void*)arg_t->speid, NULL, &stop_info); status = ((stop_info.result.spe_exit_code & 0xff) << 8) - | (stop_info.result.spe_signal_code & 0xff); + | (stop_info.result.spe_signal_code & 0xff); switch(stop_info.stop_reason) { case SPE_EXIT: - break; + break; case SPE_STOP_AND_SIGNAL: - printf("[SPE %d] SPE_STOP_AND_SIGNAL stop_info.result.stop_signal_code=%d\n", arg_t->speid, stop_info.result.spe_signal_code); - break; + printf("[SPE %d] SPE_STOP_AND_SIGNAL stop_info.result.stop_signal_code=%d\n", arg_t->speid, stop_info.result.spe_signal_code); + break; case SPE_RUNTIME_ERROR: - printf("[SPE %d] SPE_RUNTIME_ERROR stop_info.result.spe_runtime_error=%d\n", arg_t->speid, stop_info.result.spe_runtime_error); - break; + printf("[SPE %d] SPE_RUNTIME_ERROR stop_info.result.spe_runtime_error=%d\n", arg_t->speid, stop_info.result.spe_runtime_error); + break; case SPE_RUNTIME_EXCEPTION: - printf("[SPE %d] SPE_RUNTIME_EXCEPTION stop_info.result.spe_runtime_exception=%d\n", arg_t->speid, stop_info.result.spe_runtime_exception); - break; + printf("[SPE %d] SPE_RUNTIME_EXCEPTION stop_info.result.spe_runtime_exception=%d\n", arg_t->speid, stop_info.result.spe_runtime_exception); + break; } pthread_exit(NULL); @@ -82,8 +82,8 @@ spe_handle = spe_image_open(SPE_ELF); if (spe_handle == NULL) { - perror("spe_image_open"); - exit(EXIT_FAILURE); + perror("spe_image_open"); + exit(EXIT_FAILURE); } spe_ctx = new spe_context_ptr_t[cpu_num]; @@ -91,29 +91,29 @@ args = new thread_arg_t[cpu_num]; for (int i = 0; i < cpu_num; i++) { - args[i].speid = i; - spe_ctx[i] = spe_context_create(0, NULL); - spe_program_load(spe_ctx[i], spe_handle); - args[i].ctx = spe_ctx[i]; + args[i].speid = i; + spe_ctx[i] = spe_context_create(0, NULL); + spe_program_load(spe_ctx[i], spe_handle); + args[i].ctx = spe_ctx[i]; } for (int i = 0; i < cpu_num; i++) { - pthread_create(&threads[i], NULL, - &spe_thread_run, (void*)&args[i]); + pthread_create(&threads[i], NULL, + &spe_thread_run, (void*)&args[i]); } } void SpeThreads::spawn_task(int cpu_num, TaskListPtr p) { if (p->dim>0) { - int dim_count = (x+1)*(y+1)*(z+1); - if (cpu_num < dim_count) - p->tasks[0].self->dim_count = cpu_num; + int dim_count = (x+1)*(y+1)*(z+1); + if (cpu_num < dim_count) + p->tasks[0].self->dim_count = cpu_num; for (int i = 0; i < cpu_num; i++) { - send_mail(i+SPE_0,1,(memaddr)&p); - } + send_mail(i+SPE_0,1,(memaddr*)p); + } } else { - send_mail(cpu_num,1,(memaddr)&p); + send_mail(cpu_num,1,(memaddr*)p); } } @@ -135,14 +135,14 @@ int SpeThreads::has_mail(int speid, int count, memaddr *ret) { -/* - * spe_out_mbox_status return only 1, waiting for multiple length - * does not work. - */ + /* + * spe_out_mbox_status return only 1, waiting for multiple length + * does not work. + */ if (spe_out_mbox_status(spe_ctx[speid]) >= 1) { - return spe_out_mbox_read(spe_ctx[speid], (unsigned int*)ret, count*(sizeof(memaddr)/sizeof(int))); + return spe_out_mbox_read(spe_ctx[speid], (unsigned int*)ret, count*(sizeof(memaddr)/sizeof(int))); } else { - return 0; + return 0; } }
--- a/TaskManager/Cell/spe/SpeTaskManagerImpl.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Cell/spe/SpeTaskManagerImpl.h Sun Mar 31 20:32:24 2013 +0900 @@ -33,6 +33,7 @@ void free_htask(HTaskPtr htask) {} void print_arch(); void set_NDRange(void* ndr){} + int max_cpu(){return 0;} #ifdef __CERIUM_GPU__ SpeTaskManagerImpl(int i);
--- a/TaskManager/Fifo/FifoTaskManagerImpl.cc Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Fifo/FifoTaskManagerImpl.cc Sun Mar 31 20:32:24 2013 +0900 @@ -165,6 +165,11 @@ } } +int +FifoTaskManagerImpl::max_cpu() { + return machineNum; +} + /** * @param [list] 実行タスクリスト * @return FifoScheduler からのメール @@ -354,7 +359,7 @@ #endif if (num == 0) { - return new FifoTaskManagerImpl(num); + return new FifoTaskManagerImpl(num); } else { Threads *cpus = new CpuThreads(num,useRefDma); return new CellTaskManagerImpl(num,cpus);
--- a/TaskManager/Fifo/FifoTaskManagerImpl.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Fifo/FifoTaskManagerImpl.h Sun Mar 31 20:32:24 2013 +0900 @@ -41,7 +41,7 @@ void print_arch(); void set_NDRange(void* ndr); - + int max_cpu(); // call by user private: void set_runTaskList1(QueueInfo<HTask>* activeTaskQueue);
--- a/TaskManager/Fifo/MainScheduler.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/Fifo/MainScheduler.h Sun Mar 31 20:32:24 2013 +0900 @@ -5,23 +5,22 @@ #include "FifoDmaManager.h" class MainScheduler : public Scheduler { -protected: + protected: FifoDmaManager *fifoDmaManager; -public: + public: ~MainScheduler(void) {} void init_impl(int useRefDma); void mainMem_alloc(int id, int size); - void mail_write_from_host(memaddr data) { - fifoDmaManager->mail_write_from_host(data); + fifoDmaManager->mail_write_from_host(data); } memaddr mail_read_from_host() { - return fifoDmaManager->mail_read_from_host(); + return fifoDmaManager->mail_read_from_host(); } int has_mail_from_host() { - return fifoDmaManager->has_mail_from_host(); + return fifoDmaManager->has_mail_from_host(); } };
--- a/TaskManager/kernel/ppe/TaskList.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/kernel/ppe/TaskList.h Sun Mar 31 20:32:24 2013 +0900 @@ -23,7 +23,7 @@ TaskPtr last() { return (TaskPtr)(((memaddr)tasks)+lastTask); } void set_last(Task *t) { lastTask = ((memaddr)t) - ((memaddr)tasks); } - void init() { lastTask = ((memaddr)&tasks[TASK_MAX_SIZE])-(memaddr)(tasks); waiter=this; } + void init() { lastTask = ((memaddr)&tasks[TASK_MAX_SIZE])-(memaddr)(tasks); waiter=this; dim=0;} void initOnce() { } void freeOnce() {}
--- a/TaskManager/kernel/ppe/TaskManagerImpl.h Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/kernel/ppe/TaskManagerImpl.h Sun Mar 31 20:32:24 2013 +0900 @@ -69,6 +69,7 @@ virtual void set_task_depend(HTaskPtr master, HTaskPtr slave); virtual void spawn_task(HTaskPtr); virtual void set_task_cpu(HTaskPtr, CPU_TYPE); + virtual int max_cpu()=0; void set_taskList(HTaskPtr htask, QueueInfo<TaskList> * taskList); void free_htask(HTaskPtr htask) {
--- a/TaskManager/kernel/schedule/SchedTask.cc Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/kernel/schedule/SchedTask.cc Sun Mar 31 20:32:24 2013 +0900 @@ -113,7 +113,7 @@ } connector->dma_wait((DMA_READ + this->tag)); void *read = get_input(readbuf, 0); void *write = get_output(writebuf, 0); - // set param (if exit dim ) + // set param (if exist dim ) if (list->dim) { multi_dimension(list, read, write,run); } else {
--- a/TaskManager/kernel/schedule/Scheduler.cc Sun Mar 31 14:21:42 2013 +0900 +++ b/TaskManager/kernel/schedule/Scheduler.cc Sun Mar 31 20:32:24 2013 +0900 @@ -48,11 +48,11 @@ task_count = 0; #endif - /* - * ;TODO - * Multi-Core Verの場合、各スレッドにMain Schedulerが作られるが、 - * その際、globalなlistの初期化を繰り返して無駄な処理を行なっている - */ + /* + * ;TODO + * Multi-Core Verの場合、各スレッドにMain Schedulerが作られるが、 + * その際、globalなlistの初期化を繰り返して無駄な処理を行なっている + */ for (int i = 0; i< MAX_TASK_OBJECT; i++) { task_list[i].run = null_run; @@ -135,8 +135,8 @@ /* - ここから下は、memory 以下にあるべき - */ + ここから下は、memory 以下にあるべき +*/ void* Scheduler::global_alloc(int id, int size) @@ -186,7 +186,7 @@ code_segment_pool = createMemList(size, count); if (table) { MemorySegment* here = (MemorySegment*)( - manager->allocate(sizeof(MemorySegment))); + manager->allocate(sizeof(MemorySegment))); here->data = (void*)(table->vma); here->size = size; here->address = (memaddr)here; @@ -198,23 +198,23 @@ load_task(Scheduler *m, int task_id) { MemorySegment *s = m->get_segment( - task_list[task_id].location, - m->code_segment_pool, - task_list[task_id].end-task_list[task_id].location); + task_list[task_id].location, + m->code_segment_pool, + task_list[task_id].end-task_list[task_id].location); task_list[task_id].segment = s; // calcurate call address TaskObjectRun run = (TaskObjectRun)( - (char*)task_list[task_id].segment->data + - task_list[task_id].entry_offset); + (char*)task_list[task_id].segment->data + + task_list[task_id].entry_offset); task_list[task_id].run = run; #if 0 m->printf("loadng task id %d at 0x%x entry 0x%x location 0x%x\n",task_id, - (unsigned int)(task_list[task_id].segment->data ), - (unsigned int)( - (char*)task_list[task_id].segment->data + - task_list[task_id].entry_offset), - task_list[task_id].location); + (unsigned int)(task_list[task_id].segment->data ), + (unsigned int)( + (char*)task_list[task_id].segment->data + + task_list[task_id].entry_offset), + task_list[task_id].location); #endif } @@ -237,8 +237,8 @@ m->wait_segment(task_list[task_id].segment); #if 0 m->printf("wait load task id %d done. creator = 0x%x entry_offset = 0x%x\n",task_id, - (unsigned int)(task_list[task_id].run), - task_list[task_id].entry_offset); + (unsigned int)(task_list[task_id].run), + task_list[task_id].entry_offset); #endif } @@ -263,9 +263,9 @@ extern void register_dynamic_task(int cmd, - memaddr start, int size, - TaskObjectRun run, int entry_offset, - const char *str) + memaddr start, int size, + TaskObjectRun run, int entry_offset, + const char *str) { task_list[cmd].run = run; task_list[cmd].location = start; @@ -281,11 +281,11 @@ #endif #if 0 -this->printf("cmd = %d\n",cmd); -this->printf("locatation = 0x%x\n",start); -this->printf("end = 0x%x\n",start+size); -this->printf("size = 0x%x\n",size); -this->printf("entry = 0x%x\n",entry_offset); + this->printf("cmd = %d\n",cmd); + this->printf("locatation = 0x%x\n",start); + this->printf("end = 0x%x\n",start+size); + this->printf("size = 0x%x\n",size); + this->printf("entry = 0x%x\n",entry_offset); #endif } @@ -299,7 +299,7 @@ @param [count] 要素数 @return allocate した領域のポインタ - */ +*/ MemList* Scheduler::createMemList(int size, int count) { @@ -326,14 +326,14 @@ /*! - Main Memory のSegmentを取得する + Main Memory のSegmentを取得する @param [addr] Main Memory のアドレス @param [m] Mem List @return allocate した領域のポインタ - memory directory にあるべきだが... + memory directory にあるべきだが... - */ +*/ MemorySegment * Scheduler::get_segment(memaddr addr, MemList *m) { @@ -362,16 +362,16 @@ /*! - free な Segmentを取得する(SPEのLSから) - 書き込み専用の場合、dma_loadは必要ないので - こういう感じになるのかな. + free な Segmentを取得する(SPEのLSから) + 書き込み専用の場合、dma_loadは必要ないので + こういう感じになるのかな. @param [addr] Main Memory のアドレス @param [m] Mem List @return allocate した領域のポインタ - memory directory にあるべきだが... + memory directory にあるべきだが... - */ +*/ MemorySegment * Scheduler::get_free_segment(memaddr addr, MemList *m) @@ -398,7 +398,7 @@ @param [s] 上書きするMemorySegment @param [addr] 上書きするdataへのアドレス - */ +*/ void Scheduler::overwrite_segment(MemorySegment *s, memaddr addr) @@ -414,6 +414,18 @@ } +int +Scheduler::max_cpu() +{ // todo + return manager->max_cpu(); +} + +int +Scheduler::min_cpu() +{ // todo + return 0; +} + MemorySegment * Scheduler::get_segment(memaddr addr, MemList *m, int size) { @@ -424,7 +436,7 @@ if (s) { /* 既に load されている */ -// this->printf("get_segement loaded %llx 0x%x size 0x%d\n",addr,s->data,size); + // this->printf("get_segement loaded %llx 0x%x size 0x%d\n",addr,s->data,size); m->moveToFirst(s); return s; } @@ -443,7 +455,7 @@ s->address = addr; hash->put(s->address, s); -// this->printf("get_segement %llx 0x%x size 0x%d\n",addr, s->data,size); + // this->printf("get_segement %llx 0x%x size 0x%d\n",addr, s->data,size); return s; } @@ -452,16 +464,16 @@ /*! - Main Memory のSegmentを書き出す - Segment は get_segement されていて、 - 追い出されていてはいけない。 - それを保証するのは難しい? + Main Memory のSegmentを書き出す + Segment は get_segement されていて、 + 追い出されていてはいけない。 + それを保証するのは難しい? @param [addr] Main Memory のアドレス @param [m] Mem List @return allocate した領域のポインタ - */ +*/ void Scheduler::put_segment(MemorySegment *s) { @@ -473,11 +485,11 @@ /*! - Main Memory のSegmentを読込、書き出しを待つ + Main Memory のSegmentを読込、書き出しを待つ @param [id] MemorySegment のid - */ +*/ void Scheduler::wait_segment(MemorySegment *s) {