Mercurial > hg > Members > menikon > CbC_xv6
changeset 35:ad1d3b268e2d
update
author | mir3636 |
---|---|
date | Fri, 22 Feb 2019 16:32:51 +0900 |
parents | a7144583914c |
children | d4e5846ddb48 |
files | src/file.c src/pipe.c src/spinlock.c |
diffstat | 3 files changed, 78 insertions(+), 1 deletions(-) [+] |
line wrap: on
line diff
--- a/src/file.c Thu Feb 14 23:33:57 2019 +0900 +++ b/src/file.c Fri Feb 22 16:32:51 2019 +0900 @@ -118,7 +118,7 @@ } if (f->type == FD_PIPE) { - //goto cbc_piperead(f->pipe, addr, n, next); + goto cbc_piperead(f->pipe, addr, n, next); goto next(-1); }
--- a/src/pipe.c Thu Feb 14 23:33:57 2019 +0900 +++ b/src/pipe.c Fri Feb 22 16:32:51 2019 +0900 @@ -116,6 +116,34 @@ return n; } +int cbc_piperead(struct pipe *p, char *addr, int n, __code (*next)(int ret)) +{ + int i; + + acquire(&p->lock); + + while(p->nread == p->nwrite && p->writeopen){ //DOC: pipe-empty + if(proc->killed){ + release(&p->lock); + goto next(-1); + } + + sleep(&p->nread, &p->lock); //DOC: piperead-sleep*/ + } + + for(i = 0; i < n; i++){ //DOC: piperead-copy + if(p->nread == p->nwrite) { + break; + } + + addr[i] = p->data[p->nread++ % PIPESIZE]; + } + + goto cbc_wakeup(&p->nwrite); //DOC: piperead-wakeup + release(&p->lock); + + return i; +} int piperead(struct pipe *p, char *addr, int n) { int i;
--- a/src/spinlock.c Thu Feb 14 23:33:57 2019 +0900 +++ b/src/spinlock.c Fri Feb 22 16:32:51 2019 +0900 @@ -46,7 +46,56 @@ #endif } +void cbc_acquire(struct spinlock *lk, __code (*next)(int ret)) +{ + pushcli(); // disable interrupts to avoid deadlock. + lk->locked = 1; // set the lock status to make the kernel happy + +#if 0 + if(holding(lk)) + panic("acquire"); + + // The xchg is atomic. + // It also serializes, so that reads after acquire are not + // reordered before it. + while(xchg(&lk->locked, 1) != 0) + ; + + // Record info about lock acquisition for debugging. + lk->cpu = cpu; + getcallerpcs(get_fp(), lk->pcs); + +#endif + goto next(); +} + // Release the lock. +void cbc_release(struct spinlock *lk, __code (*next)(int ret)) +{ +#if 0 + if(!holding(lk)) + panic("release"); + + lk->pcs[0] = 0; + lk->cpu = 0; + + // The xchg serializes, so that reads before release are + // not reordered after it. The 1996 PentiumPro manual (Volume 3, + // 7.2) says reads can be carried out speculatively and in + // any order, which implies we need to serialize here. + // But the 2007 Intel 64 Architecture Memory Ordering White + // Paper says that Intel 64 and IA-32 will not move a load + // after a store. So lock->locked = 0 would work here. + // The xchg being asm volatile ensures gcc emits it after + // the above assignments (and after the critical section). + xchg(&lk->locked, 0); +#endif + + lk->locked = 0; // set the lock state to keep the kernel happy + popcli(); + goto next(); +} + void release(struct spinlock *lk) { #if 0