0
|
1 #include "types.h"
|
|
2 #include "defs.h"
|
|
3 #include "param.h"
|
|
4 #include "memlayout.h"
|
|
5 #include "mmu.h"
|
|
6 #include "arm.h"
|
|
7 #include "proc.h"
|
|
8 #include "spinlock.h"
|
220
|
9 #interface "vm.h"
|
0
|
10
|
52
|
11 #define __ncode __code
|
|
12
|
0
|
13 //
|
|
14 // Process initialization:
|
|
15 // process initialize is somewhat tricky.
|
|
16 // 1. We need to fake the kernel stack of a new process as if the process
|
|
17 // has been interrupt (a trapframe on the stack), this would allow us
|
|
18 // to "return" to the correct user instruction.
|
|
19 // 2. We also need to fake the kernel execution for this new process. When
|
|
20 // swtch switches to this (new) process, it will switch to its stack,
|
|
21 // and reload registers with the saved context. We use forkret as the
|
|
22 // return address (in lr register). (In x86, it will be the return address
|
|
23 // pushed on the stack by the process.)
|
|
24 //
|
|
25 // The design of context switch in xv6 is interesting: after initialization,
|
|
26 // each CPU executes in the scheduler() function. The context switch is not
|
|
27 // between two processes, but instead, between the scheduler. Think of scheduler
|
|
28 // as the idle process.
|
|
29 //
|
|
30 struct {
|
|
31 struct spinlock lock;
|
|
32 struct proc proc[NPROC];
|
|
33 } ptable;
|
|
34
|
|
35 static struct proc *initproc;
|
|
36 struct proc *proc;
|
|
37
|
|
38 int nextpid = 1;
|
|
39 extern void forkret(void);
|
|
40 extern void trapret(void);
|
|
41
|
|
42 static void wakeup1(void *chan);
|
|
43
|
|
44 void pinit(void)
|
|
45 {
|
|
46 initlock(&ptable.lock, "ptable");
|
|
47 }
|
|
48
|
|
49 //PAGEBREAK: 32
|
|
50 // Look in the process table for an UNUSED proc.
|
|
51 // If found, change state to EMBRYO and initialize
|
|
52 // state required to run in the kernel.
|
|
53 // Otherwise return 0.
|
|
54 static struct proc* allocproc(void)
|
|
55 {
|
|
56 struct proc *p;
|
|
57 char *sp;
|
|
58
|
|
59 acquire(&ptable.lock);
|
|
60
|
|
61 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
62 if(p->state == UNUSED) {
|
|
63 goto found;
|
|
64 }
|
|
65
|
|
66 }
|
|
67
|
|
68 release(&ptable.lock);
|
|
69 return 0;
|
|
70
|
|
71 found:
|
|
72 p->state = EMBRYO;
|
|
73 p->pid = nextpid++;
|
|
74 release(&ptable.lock);
|
|
75
|
|
76 // Allocate kernel stack.
|
|
77 if((p->kstack = alloc_page ()) == 0){
|
|
78 p->state = UNUSED;
|
|
79 return 0;
|
|
80 }
|
|
81
|
|
82 sp = p->kstack + KSTACKSIZE;
|
|
83
|
|
84 // Leave room for trap frame.
|
|
85 sp -= sizeof (*p->tf);
|
|
86 p->tf = (struct trapframe*)sp;
|
|
87
|
|
88 // Set up new context to start executing at forkret,
|
|
89 // which returns to trapret.
|
|
90 sp -= 4;
|
|
91 *(uint*)sp = (uint)trapret;
|
|
92
|
|
93 sp -= 4;
|
|
94 *(uint*)sp = (uint)p->kstack + KSTACKSIZE;
|
|
95
|
|
96 sp -= sizeof (*p->context);
|
|
97 p->context = (struct context*)sp;
|
|
98 memset(p->context, 0, sizeof(*p->context));
|
|
99
|
|
100 // skip the push {fp, lr} instruction in the prologue of forkret.
|
|
101 // This is different from x86, in which the harderware pushes return
|
|
102 // address before executing the callee. In ARM, return address is
|
|
103 // loaded into the lr register, and push to the stack by the callee
|
|
104 // (if and when necessary). We need to skip that instruction and let
|
|
105 // it use our implementation.
|
|
106 p->context->lr = (uint)forkret+4;
|
|
107
|
|
108 return p;
|
|
109 }
|
|
110
|
|
111 void error_init ()
|
|
112 {
|
|
113 panic ("failed to craft first process\n");
|
|
114 }
|
|
115
|
|
116
|
|
117 //PAGEBREAK: 32
|
|
118 // hand-craft the first user process. We link initcode.S into the kernel
|
|
119 // as a binary, the linker will generate __binary_initcode_start/_size
|
329
|
120 void cbc_init_vmm_dummy(struct Context* cbc_context, struct proc* p, pde_t* pgdir, char* init, uint sz)
|
220
|
121 {
|
|
122 // inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
|
123
|
225
|
124 struct vm* vm = createvm_impl(cbc_context);
|
227
|
125 // goto vm->init_vmm(vm, pgdir, init, sz , vm->void_ret);
|
220
|
126 Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
|
127 Gearef(cbc_context, vm)->pgdir = pgdir;
|
|
128 Gearef(cbc_context, vm)->init = init;
|
|
129 Gearef(cbc_context, vm)->sz = sz ;
|
227
|
130 Gearef(cbc_context, vm)->next = C_vm_void_ret ;
|
229
|
131 goto meta(cbc_context, vm->init_inituvm);
|
220
|
132 }
|
|
133
|
329
|
134
|
|
135
|
|
136
|
0
|
137 void userinit(void)
|
|
138 {
|
220
|
139 struct proc* p;
|
0
|
140 extern char _binary_initcode_start[], _binary_initcode_size[];
|
|
141
|
|
142 p = allocproc();
|
220
|
143 initContext(&p->cbc_context);
|
|
144
|
0
|
145 initproc = p;
|
|
146
|
|
147 if((p->pgdir = kpt_alloc()) == NULL) {
|
|
148 panic("userinit: out of memory?");
|
|
149 }
|
|
150
|
329
|
151 cbc_init_vmm_dummy(&p->cbc_context, p, p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
0
|
152
|
|
153 p->sz = PTE_SZ;
|
|
154
|
|
155 // craft the trapframe as if
|
|
156 memset(p->tf, 0, sizeof(*p->tf));
|
|
157
|
|
158 p->tf->r14_svc = (uint)error_init;
|
|
159 p->tf->spsr = spsr_usr ();
|
|
160 p->tf->sp_usr = PTE_SZ; // set the user stack
|
|
161 p->tf->lr_usr = 0;
|
|
162
|
|
163 // set the user pc. The actual pc loaded into r15_usr is in
|
|
164 // p->tf, the trapframe.
|
|
165 p->tf->pc = 0; // beginning of initcode.S
|
|
166
|
|
167 safestrcpy(p->name, "initcode", sizeof(p->name));
|
|
168 p->cwd = namei("/");
|
|
169
|
|
170 p->state = RUNNABLE;
|
|
171 }
|
|
172
|
241
|
173
|
328
|
174 void switchuvm_dummy(struct Context* cbc_context, struct proc* proc)
|
241
|
175 {
|
|
176 struct vm* vm = createvm_impl(cbc_context);
|
277
|
177 Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
263
|
178 Gearef(cbc_context, vm)->p = proc;
|
|
179 Gearef(cbc_context, vm)->next = C_vm_void_ret ;
|
241
|
180 goto meta(cbc_context, vm->switchuvm);
|
|
181 }
|
|
182
|
328
|
183
|
0
|
184 // Grow current process's memory by n bytes.
|
|
185 // Return 0 on success, -1 on failure.
|
|
186 int growproc(int n)
|
|
187 {
|
|
188 uint sz;
|
|
189
|
|
190 sz = proc->sz;
|
|
191
|
|
192 if(n > 0){
|
|
193 if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
194 return -1;
|
|
195 }
|
|
196
|
|
197 } else if(n < 0){
|
|
198 if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
199 return -1;
|
|
200 }
|
|
201 }
|
|
202
|
|
203 proc->sz = sz;
|
277
|
204 //switchuvm(proc);
|
328
|
205 switchuvm_dummy(&proc->cbc_context, proc);
|
0
|
206 return 0;
|
|
207 }
|
|
208
|
|
209 // Create a new process copying p as the parent.
|
|
210 // Sets up stack to return as if from system call.
|
|
211 // Caller must set state of returned proc to RUNNABLE.
|
|
212 int fork(void)
|
|
213 {
|
|
214 int i, pid;
|
|
215 struct proc *np;
|
|
216
|
|
217 // Allocate process.
|
|
218 if((np = allocproc()) == 0) {
|
|
219 return -1;
|
|
220 }
|
220
|
221 initContext(&np->cbc_context);
|
0
|
222
|
|
223 // Copy process state from p.
|
|
224 if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
|
|
225 free_page(np->kstack);
|
|
226 np->kstack = 0;
|
|
227 np->state = UNUSED;
|
|
228 return -1;
|
|
229 }
|
|
230
|
|
231 np->sz = proc->sz;
|
|
232 np->parent = proc;
|
19
|
233 // *np->tf = *proc->tf; // This generate memcpy4 which is not in libgcc.a
|
|
234 memmove(np->tf, proc->tf, sizeof(*np->tf));
|
0
|
235
|
|
236 // Clear r0 so that fork returns 0 in the child.
|
|
237 np->tf->r0 = 0;
|
|
238
|
|
239 for(i = 0; i < NOFILE; i++) {
|
|
240 if(proc->ofile[i]) {
|
|
241 np->ofile[i] = filedup(proc->ofile[i]);
|
|
242 }
|
|
243 }
|
|
244
|
|
245 np->cwd = idup(proc->cwd);
|
|
246
|
|
247 pid = np->pid;
|
|
248 np->state = RUNNABLE;
|
|
249 safestrcpy(np->name, proc->name, sizeof(proc->name));
|
|
250
|
|
251 return pid;
|
|
252 }
|
|
253
|
|
254 // Exit the current process. Does not return.
|
|
255 // An exited process remains in the zombie state
|
|
256 // until its parent calls wait() to find out it exited.
|
|
257 void exit(void)
|
|
258 {
|
|
259 struct proc *p;
|
|
260 int fd;
|
|
261
|
|
262 if(proc == initproc) {
|
|
263 panic("init exiting");
|
|
264 }
|
|
265
|
|
266 // Close all open files.
|
|
267 for(fd = 0; fd < NOFILE; fd++){
|
|
268 if(proc->ofile[fd]){
|
|
269 fileclose(proc->ofile[fd]);
|
|
270 proc->ofile[fd] = 0;
|
|
271 }
|
|
272 }
|
|
273
|
|
274 iput(proc->cwd);
|
|
275 proc->cwd = 0;
|
|
276
|
|
277 acquire(&ptable.lock);
|
|
278
|
|
279 // Parent might be sleeping in wait().
|
|
280 wakeup1(proc->parent);
|
|
281
|
|
282 // Pass abandoned children to init.
|
|
283 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
284 if(p->parent == proc){
|
|
285 p->parent = initproc;
|
|
286
|
|
287 if(p->state == ZOMBIE) {
|
|
288 wakeup1(initproc);
|
|
289 }
|
|
290 }
|
|
291 }
|
|
292
|
|
293 // Jump into the scheduler, never to return.
|
|
294 proc->state = ZOMBIE;
|
|
295 sched();
|
|
296
|
|
297 panic("zombie exit");
|
|
298 }
|
|
299
|
|
300 // Wait for a child process to exit and return its pid.
|
|
301 // Return -1 if this process has no children.
|
|
302 int wait(void)
|
|
303 {
|
|
304 struct proc *p;
|
|
305 int havekids, pid;
|
|
306
|
|
307 acquire(&ptable.lock);
|
|
308
|
|
309 for(;;){
|
|
310 // Scan through table looking for zombie children.
|
|
311 havekids = 0;
|
|
312
|
|
313 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
314 if(p->parent != proc) {
|
|
315 continue;
|
|
316 }
|
|
317
|
|
318 havekids = 1;
|
|
319
|
|
320 if(p->state == ZOMBIE){
|
|
321 // Found one.
|
|
322 pid = p->pid;
|
|
323 free_page(p->kstack);
|
|
324 p->kstack = 0;
|
|
325 freevm(p->pgdir);
|
|
326 p->state = UNUSED;
|
|
327 p->pid = 0;
|
|
328 p->parent = 0;
|
|
329 p->name[0] = 0;
|
|
330 p->killed = 0;
|
|
331 release(&ptable.lock);
|
|
332
|
|
333 return pid;
|
|
334 }
|
|
335 }
|
|
336
|
|
337 // No point waiting if we don't have any children.
|
|
338 if(!havekids || proc->killed){
|
|
339 release(&ptable.lock);
|
|
340 return -1;
|
|
341 }
|
|
342
|
|
343 // Wait for children to exit. (See wakeup1 call in proc_exit.)
|
|
344 sleep(proc, &ptable.lock); //DOC: wait-sleep
|
|
345 }
|
|
346 }
|
|
347
|
|
348 //PAGEBREAK: 42
|
|
349 // Per-CPU process scheduler.
|
|
350 // Each CPU calls scheduler() after setting itself up.
|
|
351 // Scheduler never returns. It loops, doing:
|
|
352 // - choose a process to run
|
|
353 // - swtch to start running that process
|
|
354 // - eventually that process transfers control
|
|
355 // via swtch back to the scheduler.
|
|
356 void scheduler(void)
|
|
357 {
|
|
358 struct proc *p;
|
|
359
|
|
360 for(;;){
|
|
361 // Enable interrupts on this processor.
|
|
362 sti();
|
|
363
|
|
364 // Loop over process table looking for process to run.
|
|
365 acquire(&ptable.lock);
|
|
366
|
|
367 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
368 if(p->state != RUNNABLE) {
|
|
369 continue;
|
|
370 }
|
|
371
|
|
372 // Switch to chosen process. It is the process's job
|
|
373 // to release ptable.lock and then reacquire it
|
|
374 // before jumping back to us.
|
|
375 proc = p;
|
|
376 switchuvm(p);
|
|
377
|
|
378 p->state = RUNNING;
|
|
379
|
|
380 swtch(&cpu->scheduler, proc->context);
|
|
381 // Process is done running for now.
|
|
382 // It should have changed its p->state before coming back.
|
|
383 proc = 0;
|
|
384 }
|
|
385
|
|
386 release(&ptable.lock);
|
|
387 }
|
|
388 }
|
|
389
|
52
|
390 __ncode cbc_sched(__code(*next)())
|
28
|
391 {
|
|
392 int intena;
|
|
393
|
|
394 if(!holding(&ptable.lock)) {
|
|
395 panic("sched ptable.lock");
|
|
396 }
|
|
397
|
|
398 if(cpu->ncli != 1) {
|
|
399 panic("sched locks");
|
|
400 }
|
|
401
|
|
402 if(proc->state == RUNNING) {
|
|
403 panic("sched running");
|
|
404 }
|
|
405
|
|
406 if(int_enabled ()) {
|
|
407 panic("sched interruptible");
|
|
408 }
|
|
409
|
|
410 intena = cpu->intena;
|
|
411 swtch(&proc->context, cpu->scheduler);
|
|
412 cpu->intena = intena;
|
|
413
|
|
414 goto next();
|
|
415 }
|
|
416
|
|
417
|
0
|
418 // Enter scheduler. Must hold only ptable.lock
|
|
419 // and have changed proc->state.
|
|
420 void sched(void)
|
|
421 {
|
|
422 int intena;
|
|
423
|
|
424 //show_callstk ("sched");
|
|
425
|
|
426 if(!holding(&ptable.lock)) {
|
|
427 panic("sched ptable.lock");
|
|
428 }
|
|
429
|
|
430 if(cpu->ncli != 1) {
|
|
431 panic("sched locks");
|
|
432 }
|
|
433
|
|
434 if(proc->state == RUNNING) {
|
|
435 panic("sched running");
|
|
436 }
|
|
437
|
|
438 if(int_enabled ()) {
|
|
439 panic("sched interruptible");
|
|
440 }
|
|
441
|
|
442 intena = cpu->intena;
|
|
443 swtch(&proc->context, cpu->scheduler);
|
|
444 cpu->intena = intena;
|
|
445 }
|
|
446
|
|
447 // Give up the CPU for one scheduling round.
|
|
448 void yield(void)
|
|
449 {
|
|
450 acquire(&ptable.lock); //DOC: yieldlock
|
|
451 proc->state = RUNNABLE;
|
|
452 sched();
|
|
453 release(&ptable.lock);
|
|
454 }
|
|
455
|
|
456 // A fork child's very first scheduling by scheduler()
|
|
457 // will swtch here. "Return" to user space.
|
|
458 void forkret(void)
|
|
459 {
|
|
460 static int first = 1;
|
|
461
|
|
462 // Still holding ptable.lock from scheduler.
|
|
463 release(&ptable.lock);
|
|
464
|
|
465 if (first) {
|
|
466 // Some initialization functions must be run in the context
|
|
467 // of a regular process (e.g., they call sleep), and thus cannot
|
|
468 // be run from main().
|
|
469 first = 0;
|
|
470 initlog();
|
|
471 }
|
|
472
|
|
473 // Return to "caller", actually trapret (see allocproc).
|
|
474 }
|
|
475
|
52
|
476 __ncode cbc_sleep1()
|
24
|
477 {
|
|
478 struct spinlock *lk = proc->lk;
|
|
479 // Tidy up.
|
|
480 proc->chan = 0;
|
|
481
|
|
482 // Reacquire original lock.
|
|
483 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
484 release(&ptable.lock);
|
|
485 acquire(lk);
|
|
486 }
|
|
487 goto proc->cbc_next();
|
|
488 }
|
|
489
|
52
|
490 __ncode cbc_sleep(void *chan, struct spinlock *lk, __code(*next1)())
|
24
|
491 {
|
|
492 //show_callstk("sleep");
|
|
493
|
|
494 if(proc == 0) {
|
|
495 panic("sleep");
|
|
496 }
|
|
497
|
|
498 if(lk == 0) {
|
|
499 panic("sleep without lk");
|
|
500 }
|
|
501
|
|
502 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
503 acquire(&ptable.lock); //DOC: sleeplock1
|
|
504 release(lk);
|
|
505 }
|
|
506 proc->chan = chan;
|
|
507 proc->state = SLEEPING;
|
32
|
508 proc->lk = lk;
|
24
|
509 proc->cbc_next = next1;
|
|
510
|
|
511 goto cbc_sched(cbc_sleep1);
|
|
512 }
|
|
513
|
0
|
514 // Atomically release lock and sleep on chan.
|
|
515 // Reacquires lock when awakened.
|
|
516 void sleep(void *chan, struct spinlock *lk)
|
|
517 {
|
|
518 //show_callstk("sleep");
|
|
519
|
|
520 if(proc == 0) {
|
|
521 panic("sleep");
|
|
522 }
|
|
523
|
|
524 if(lk == 0) {
|
|
525 panic("sleep without lk");
|
|
526 }
|
|
527
|
|
528 // Must acquire ptable.lock in order to change p->state and then call
|
|
529 // sched. Once we hold ptable.lock, we can be guaranteed that we won't
|
|
530 // miss any wakeup (wakeup runs with ptable.lock locked), so it's okay
|
|
531 // to release lk.
|
|
532 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
533 acquire(&ptable.lock); //DOC: sleeplock1
|
|
534 release(lk);
|
|
535 }
|
|
536
|
|
537 // Go to sleep.
|
|
538 proc->chan = chan;
|
|
539 proc->state = SLEEPING;
|
|
540 sched();
|
|
541
|
|
542 // Tidy up.
|
|
543 proc->chan = 0;
|
|
544
|
|
545 // Reacquire original lock.
|
|
546 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
547 release(&ptable.lock);
|
|
548 acquire(lk);
|
|
549 }
|
|
550 }
|
|
551
|
|
552 //PAGEBREAK!
|
|
553 // Wake up all processes sleeping on chan. The ptable lock must be held.
|
|
554 static void wakeup1(void *chan)
|
|
555 {
|
|
556 struct proc *p;
|
|
557
|
|
558 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
559 if(p->state == SLEEPING && p->chan == chan) {
|
|
560 p->state = RUNNABLE;
|
|
561 }
|
|
562 }
|
|
563 }
|
|
564
|
52
|
565 __ncode cbc_wakeup1(void *chan)
|
36
|
566 {
|
|
567 struct proc *p;
|
|
568
|
|
569 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
570 if(p->state == SLEEPING && p->chan == chan) {
|
|
571 p->state = RUNNABLE;
|
|
572 }
|
|
573 }
|
|
574
|
|
575 release(&ptable.lock);
|
37
|
576 goto proc->cbc_next();
|
36
|
577 }
|
|
578
|
52
|
579 __ncode cbc_wakeup(void *chan, __code(*next1)())
|
36
|
580 {
|
|
581 acquire(&ptable.lock);
|
37
|
582 proc->cbc_next = next1;
|
36
|
583 cbc_wakeup1(chan);
|
|
584 }
|
|
585
|
0
|
586 // Wake up all processes sleeping on chan.
|
|
587 void wakeup(void *chan)
|
|
588 {
|
|
589 acquire(&ptable.lock);
|
|
590 wakeup1(chan);
|
|
591 release(&ptable.lock);
|
|
592 }
|
|
593
|
|
594 // Kill the process with the given pid. Process won't exit until it returns
|
|
595 // to user space (see trap in trap.c).
|
|
596 int kill(int pid)
|
|
597 {
|
|
598 struct proc *p;
|
|
599
|
|
600 acquire(&ptable.lock);
|
|
601
|
|
602 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
603 if(p->pid == pid){
|
|
604 p->killed = 1;
|
|
605
|
|
606 // Wake process from sleep if necessary.
|
|
607 if(p->state == SLEEPING) {
|
|
608 p->state = RUNNABLE;
|
|
609 }
|
|
610
|
|
611 release(&ptable.lock);
|
|
612 return 0;
|
|
613 }
|
|
614 }
|
|
615
|
|
616 release(&ptable.lock);
|
|
617 return -1;
|
|
618 }
|
|
619
|
|
620 //PAGEBREAK: 36
|
|
621 // Print a process listing to console. For debugging. Runs when user
|
|
622 // types ^P on console. No lock to avoid wedging a stuck machine further.
|
|
623 void procdump(void)
|
|
624 {
|
|
625 static char *states[] = {
|
22
|
626 [UNUSED] ="unused",
|
|
627 [EMBRYO] ="embryo",
|
|
628 [SLEEPING] ="sleep ",
|
|
629 [RUNNABLE] ="runble",
|
|
630 [RUNNING] ="run ",
|
|
631 [ZOMBIE] ="zombie"
|
0
|
632 };
|
|
633
|
|
634 struct proc *p;
|
|
635 char *state;
|
|
636
|
|
637 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
638 if(p->state == UNUSED) {
|
|
639 continue;
|
|
640 }
|
|
641
|
|
642 if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) {
|
|
643 state = states[p->state];
|
|
644 } else {
|
|
645 state = "???";
|
|
646 }
|
|
647
|
|
648 cprintf("%d %s %d:%s %d\n", p->pid, state, p->pid, p->name, p->parent->pid);
|
|
649 }
|
|
650
|
|
651 show_callstk("procdump: \n");
|
|
652 }
|
|
653
|
|
654
|