view TaskManager/kernel/ppe/CpuThreads.cc @ 1886:c3573a5ac6a1 draft

GPU also waits
author Shinji KONO <kono@ie.u-ryukyu.ac.jp>
date Fri, 03 Jan 2014 16:57:34 +0900
parents 392c1a2d699d
children 9085a4692cfd
line wrap: on
line source

#include <stdlib.h>
#include "types.h"
#include "CpuThreads.h"
#ifdef __CERIUM_GPU__
#include "GpuThreads.h"
#endif
#include "MainScheduler.h"
#include "SysFunc.h"
#include "SchedNop.h"
#include "SpeTaskManagerImpl.h"
#include "CellScheduler.h"
#include <fcntl.h>

SchedExternTask(ShowTime);
SchedExternTask(StartProfile);

/**
 id_offset is gpu_num
*/
CpuThreads::CpuThreads(int num,int i_num, int useRefDma, int start_id) :
	cpu_num(num), use_refdma(useRefDma), id_offset(start_id) {
    io_num = 2;
#ifdef __CERIUM_GPU__
    gpu = new GpuThreads(useRefDma);
#endif
    threads = new pthread_t[cpu_num+io_num];
    args    = new cpu_thread_arg_t[cpu_num+io_num+id_offset];
    wait    = new Sem(0);

}

CpuThreads::~CpuThreads()
{
    memaddr mail = (memaddr)MY_SPE_COMMAND_EXIT;
    
    for (int i = 0; i < cpu_num+io_num; i++) {
        send_mail(i+id_offset, 1, &mail);
    }

    for (int i = 0; i < cpu_num+io_num; i++) {
        pthread_join(threads[i], NULL);
    }

    for (int i = 0; i < cpu_num+io_num; i++) {
        delete args[i].scheduler;
    }

    delete [] threads;
    delete [] args;
#ifdef __CERIUM_GPU__
    delete gpu;
#endif
}

void *
CpuThreads::cpu_thread_run(void *args)
{
    cpu_thread_arg_t *argt = (cpu_thread_arg_t *) args;
    Scheduler *c_scheduler = argt->scheduler;

    TaskManagerImpl *manager = new SpeTaskManagerImpl();
    c_scheduler->init(manager,argt->useRefDma);
    c_scheduler->id = (int)argt->cpuid;
    c_scheduler->mincpu = (int)argt->id_offset;
    c_scheduler->maxcpu = (int)argt->cpu_num + (int)argt->id_offset;
    manager->set_scheduler(c_scheduler);
    SchedRegister(ShowTime);
    SchedRegister(StartProfile);
    if (argt->cpuid >= argt->cpu_num) {
        // set IO thread priory maximum
	int policy;
	struct sched_param param;
        pthread_getschedparam(pthread_self(), &policy, &param);
        param.sched_priority = 1;
        pthread_setschedparam(pthread_self(), policy, &param);
    }
    
    argt->wait->sem_v();        //準備完了したスレッドができるたびに+1していく

    c_scheduler->run(new SchedNop());
    c_scheduler->finish();

    return NULL;
}


void
CpuThreads::init()
{
#ifdef __CERIUM_GPU__
    gpu->set_wait(wait);
    gpu->init();
    wait->sem_p();
#endif

    for (int i = 0; i < cpu_num+io_num; i++) {
        args[i].cpuid = i + id_offset;
        args[i].scheduler = new MainScheduler();
        args[i].wait = wait;
        args[i].useRefDma = use_refdma;
        args[i].id_offset = id_offset;
        args[i].cpu_num = cpu_num;
    }

    for (int i = 0; i < cpu_num+io_num; i++) {
        pthread_create(&threads[i], NULL,
                       &cpu_thread_run, (void*)&args[i]);
    }

    for (int i = 0; i < cpu_num+io_num; i++) {
        wait->sem_p();
    }
}

void
CpuThreads::set_mail_waiter(SemPtr w) {
    for (int i = 0; i < cpu_num+io_num; i++) {
        args[i].scheduler->connector->set_mail_waiter(w);
    }
#ifdef __CERIUM_GPU__
    gpu->set_mail_waiter(w);
#endif

}

int
CpuThreads::spawn_task(int id, TaskListPtr p) {
    p->cpu = id - id_offset;
    if (p->dim>0 && id >= SPE_0) {  // should check IO
        int dim_count = (p->x)*(p->y)*(p->z);
        if (cpu_num < dim_count)
            dim_count = cpu_num;
        p->self->flag.dim_count = dim_count;
    }
    send_mail(id, 1, (memaddr*)&p);
    return 1;
}

/**
 * このCPU からのメールを受信する。
 *
 * @param [cpuid] SPE ID
 *
 * @return Received 32-bit mailbox messages
 *         if ([ret] < 0) no data read
 */
int
CpuThreads::get_mail(int cpuid, int count, memaddr *ret)
{
#ifdef __CERIUM_GPU__
    if (is_gpu(cpuid)) return gpu->get_mail(cpuid, count, ret);
#endif
    *ret = args[cpuid-id_offset].scheduler->mail_read_from_host();
    return 1;
}

int
CpuThreads::has_mail(int cpuid, int count, memaddr *ret)
{
#ifdef __CERIUM_GPU__
    if (is_gpu(cpuid)) return gpu->has_mail(cpuid, count, ret);
#endif
    if (args[cpuid-id_offset].scheduler->has_mail_from_host() != 0) {
        return get_mail(cpuid,count,ret);
    } else {
        return 0; //mailがないとき0を返す
    }

}
/**
 * Inbound Mailbox
 * メール送信 Front End -> CPU
 *
 * なるべく NONBLOCKING なんだけど、
 * Inbound Mailbox キューに空きがないと送信できないので
 * 送信する数だけ空いているか確認してから送る。空いて無い場合は待つ。
 *
 * 結局待つんだよな。しかも ALL_BLOCKING って実は busy wait だったりするし
 *
 * @param [cpuid] SPE ID
 * @param [data] Send 32-bit mailbox messages
 * @param [num] The number of messages
 */
void
CpuThreads::send_mail(int cpuid, int num, memaddr *data)
{
#ifdef __CERIUM_GPU__
    if (is_gpu(cpuid)){
        gpu->send_mail(cpuid, num, data);
        return;
    }
#endif
    args[cpuid-id_offset].scheduler->mail_write_from_host(*data);
}

void
CpuThreads::add_output_tasklist(int command, memaddr buff, int alloc_size)
{
    /*
     * output TaskList が無ければ新しく作る
     * あれば TaskList に allocate した Task を追加
     * command に対応した Task の初期化を実行する
     * SPE に data が書き出し終わった後に PPE 側で初期化
     */
}

        //GPUなら1を返す
int
CpuThreads::is_gpu(int cpuid)
{
    if (cpuid < id_offset) {
        return 1;
    } else {
        return 0;
    }
}

/* end */