0
|
1 #include "types.h"
|
|
2 #include "defs.h"
|
|
3 #include "param.h"
|
|
4 #include "memlayout.h"
|
|
5 #include "mmu.h"
|
|
6 #include "arm.h"
|
|
7 #include "proc.h"
|
|
8 #include "spinlock.h"
|
|
9
|
52
|
10 #define __ncode __code
|
|
11
|
0
|
12 //
|
|
13 // Process initialization:
|
|
14 // process initialize is somewhat tricky.
|
|
15 // 1. We need to fake the kernel stack of a new process as if the process
|
|
16 // has been interrupt (a trapframe on the stack), this would allow us
|
|
17 // to "return" to the correct user instruction.
|
|
18 // 2. We also need to fake the kernel execution for this new process. When
|
|
19 // swtch switches to this (new) process, it will switch to its stack,
|
|
20 // and reload registers with the saved context. We use forkret as the
|
|
21 // return address (in lr register). (In x86, it will be the return address
|
|
22 // pushed on the stack by the process.)
|
|
23 //
|
|
24 // The design of context switch in xv6 is interesting: after initialization,
|
|
25 // each CPU executes in the scheduler() function. The context switch is not
|
|
26 // between two processes, but instead, between the scheduler. Think of scheduler
|
|
27 // as the idle process.
|
|
28 //
|
|
29 struct {
|
|
30 struct spinlock lock;
|
|
31 struct proc proc[NPROC];
|
|
32 } ptable;
|
|
33
|
|
34 static struct proc *initproc;
|
|
35 struct proc *proc;
|
|
36
|
|
37 int nextpid = 1;
|
|
38 extern void forkret(void);
|
|
39 extern void trapret(void);
|
|
40
|
|
41 static void wakeup1(void *chan);
|
|
42
|
|
43 void pinit(void)
|
|
44 {
|
|
45 initlock(&ptable.lock, "ptable");
|
|
46 }
|
|
47
|
|
48 //PAGEBREAK: 32
|
|
49 // Look in the process table for an UNUSED proc.
|
|
50 // If found, change state to EMBRYO and initialize
|
|
51 // state required to run in the kernel.
|
|
52 // Otherwise return 0.
|
|
53 static struct proc* allocproc(void)
|
|
54 {
|
|
55 struct proc *p;
|
|
56 char *sp;
|
|
57
|
|
58 acquire(&ptable.lock);
|
|
59
|
|
60 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
61 if(p->state == UNUSED) {
|
|
62 goto found;
|
|
63 }
|
|
64
|
|
65 }
|
|
66
|
|
67 release(&ptable.lock);
|
|
68 return 0;
|
|
69
|
|
70 found:
|
|
71 p->state = EMBRYO;
|
|
72 p->pid = nextpid++;
|
|
73 release(&ptable.lock);
|
|
74
|
|
75 // Allocate kernel stack.
|
|
76 if((p->kstack = alloc_page ()) == 0){
|
|
77 p->state = UNUSED;
|
|
78 return 0;
|
|
79 }
|
|
80
|
|
81 sp = p->kstack + KSTACKSIZE;
|
|
82
|
|
83 // Leave room for trap frame.
|
|
84 sp -= sizeof (*p->tf);
|
|
85 p->tf = (struct trapframe*)sp;
|
|
86
|
|
87 // Set up new context to start executing at forkret,
|
|
88 // which returns to trapret.
|
|
89 sp -= 4;
|
|
90 *(uint*)sp = (uint)trapret;
|
|
91
|
|
92 sp -= 4;
|
|
93 *(uint*)sp = (uint)p->kstack + KSTACKSIZE;
|
|
94
|
|
95 sp -= sizeof (*p->context);
|
|
96 p->context = (struct context*)sp;
|
|
97 memset(p->context, 0, sizeof(*p->context));
|
|
98
|
|
99 // skip the push {fp, lr} instruction in the prologue of forkret.
|
|
100 // This is different from x86, in which the harderware pushes return
|
|
101 // address before executing the callee. In ARM, return address is
|
|
102 // loaded into the lr register, and push to the stack by the callee
|
|
103 // (if and when necessary). We need to skip that instruction and let
|
|
104 // it use our implementation.
|
|
105 p->context->lr = (uint)forkret+4;
|
|
106
|
|
107 return p;
|
|
108 }
|
|
109
|
|
110 void error_init ()
|
|
111 {
|
|
112 panic ("failed to craft first process\n");
|
|
113 }
|
|
114
|
|
115
|
|
116 //PAGEBREAK: 32
|
|
117 // hand-craft the first user process. We link initcode.S into the kernel
|
|
118 // as a binary, the linker will generate __binary_initcode_start/_size
|
|
119 void userinit(void)
|
|
120 {
|
|
121 struct proc *p;
|
|
122 extern char _binary_initcode_start[], _binary_initcode_size[];
|
|
123
|
|
124 p = allocproc();
|
|
125 initproc = p;
|
|
126
|
|
127 if((p->pgdir = kpt_alloc()) == NULL) {
|
|
128 panic("userinit: out of memory?");
|
|
129 }
|
|
130
|
|
131 inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
|
132
|
|
133 p->sz = PTE_SZ;
|
|
134
|
|
135 // craft the trapframe as if
|
|
136 memset(p->tf, 0, sizeof(*p->tf));
|
|
137
|
|
138 p->tf->r14_svc = (uint)error_init;
|
|
139 p->tf->spsr = spsr_usr ();
|
|
140 p->tf->sp_usr = PTE_SZ; // set the user stack
|
|
141 p->tf->lr_usr = 0;
|
|
142
|
|
143 // set the user pc. The actual pc loaded into r15_usr is in
|
|
144 // p->tf, the trapframe.
|
|
145 p->tf->pc = 0; // beginning of initcode.S
|
|
146
|
|
147 safestrcpy(p->name, "initcode", sizeof(p->name));
|
|
148 p->cwd = namei("/");
|
|
149
|
|
150 p->state = RUNNABLE;
|
|
151 }
|
|
152
|
|
153 // Grow current process's memory by n bytes.
|
|
154 // Return 0 on success, -1 on failure.
|
|
155 int growproc(int n)
|
|
156 {
|
|
157 uint sz;
|
|
158
|
|
159 sz = proc->sz;
|
|
160
|
|
161 if(n > 0){
|
|
162 if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
163 return -1;
|
|
164 }
|
|
165
|
|
166 } else if(n < 0){
|
|
167 if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
168 return -1;
|
|
169 }
|
|
170 }
|
|
171
|
|
172 proc->sz = sz;
|
|
173 switchuvm(proc);
|
|
174
|
|
175 return 0;
|
|
176 }
|
|
177
|
|
178 // Create a new process copying p as the parent.
|
|
179 // Sets up stack to return as if from system call.
|
|
180 // Caller must set state of returned proc to RUNNABLE.
|
|
181 int fork(void)
|
|
182 {
|
|
183 int i, pid;
|
|
184 struct proc *np;
|
|
185
|
|
186 // Allocate process.
|
|
187 if((np = allocproc()) == 0) {
|
|
188 return -1;
|
|
189 }
|
|
190
|
|
191 // Copy process state from p.
|
|
192 if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
|
|
193 free_page(np->kstack);
|
|
194 np->kstack = 0;
|
|
195 np->state = UNUSED;
|
|
196 return -1;
|
|
197 }
|
|
198
|
|
199 np->sz = proc->sz;
|
|
200 np->parent = proc;
|
19
|
201 // *np->tf = *proc->tf; // This generate memcpy4 which is not in libgcc.a
|
|
202 memmove(np->tf, proc->tf, sizeof(*np->tf));
|
0
|
203
|
|
204 // Clear r0 so that fork returns 0 in the child.
|
|
205 np->tf->r0 = 0;
|
|
206
|
|
207 for(i = 0; i < NOFILE; i++) {
|
|
208 if(proc->ofile[i]) {
|
|
209 np->ofile[i] = filedup(proc->ofile[i]);
|
|
210 }
|
|
211 }
|
|
212
|
|
213 np->cwd = idup(proc->cwd);
|
|
214
|
|
215 pid = np->pid;
|
|
216 np->state = RUNNABLE;
|
|
217 safestrcpy(np->name, proc->name, sizeof(proc->name));
|
|
218
|
|
219 return pid;
|
|
220 }
|
|
221
|
|
222 // Exit the current process. Does not return.
|
|
223 // An exited process remains in the zombie state
|
|
224 // until its parent calls wait() to find out it exited.
|
|
225 void exit(void)
|
|
226 {
|
|
227 struct proc *p;
|
|
228 int fd;
|
|
229
|
|
230 if(proc == initproc) {
|
|
231 panic("init exiting");
|
|
232 }
|
|
233
|
|
234 // Close all open files.
|
|
235 for(fd = 0; fd < NOFILE; fd++){
|
|
236 if(proc->ofile[fd]){
|
|
237 fileclose(proc->ofile[fd]);
|
|
238 proc->ofile[fd] = 0;
|
|
239 }
|
|
240 }
|
|
241
|
|
242 iput(proc->cwd);
|
|
243 proc->cwd = 0;
|
|
244
|
|
245 acquire(&ptable.lock);
|
|
246
|
|
247 // Parent might be sleeping in wait().
|
|
248 wakeup1(proc->parent);
|
|
249
|
|
250 // Pass abandoned children to init.
|
|
251 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
252 if(p->parent == proc){
|
|
253 p->parent = initproc;
|
|
254
|
|
255 if(p->state == ZOMBIE) {
|
|
256 wakeup1(initproc);
|
|
257 }
|
|
258 }
|
|
259 }
|
|
260
|
|
261 // Jump into the scheduler, never to return.
|
|
262 proc->state = ZOMBIE;
|
|
263 sched();
|
|
264
|
|
265 panic("zombie exit");
|
|
266 }
|
|
267
|
|
268 // Wait for a child process to exit and return its pid.
|
|
269 // Return -1 if this process has no children.
|
|
270 int wait(void)
|
|
271 {
|
|
272 struct proc *p;
|
|
273 int havekids, pid;
|
|
274
|
|
275 acquire(&ptable.lock);
|
|
276
|
|
277 for(;;){
|
|
278 // Scan through table looking for zombie children.
|
|
279 havekids = 0;
|
|
280
|
|
281 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
282 if(p->parent != proc) {
|
|
283 continue;
|
|
284 }
|
|
285
|
|
286 havekids = 1;
|
|
287
|
|
288 if(p->state == ZOMBIE){
|
|
289 // Found one.
|
|
290 pid = p->pid;
|
|
291 free_page(p->kstack);
|
|
292 p->kstack = 0;
|
|
293 freevm(p->pgdir);
|
|
294 p->state = UNUSED;
|
|
295 p->pid = 0;
|
|
296 p->parent = 0;
|
|
297 p->name[0] = 0;
|
|
298 p->killed = 0;
|
|
299 release(&ptable.lock);
|
|
300
|
|
301 return pid;
|
|
302 }
|
|
303 }
|
|
304
|
|
305 // No point waiting if we don't have any children.
|
|
306 if(!havekids || proc->killed){
|
|
307 release(&ptable.lock);
|
|
308 return -1;
|
|
309 }
|
|
310
|
|
311 // Wait for children to exit. (See wakeup1 call in proc_exit.)
|
|
312 sleep(proc, &ptable.lock); //DOC: wait-sleep
|
|
313 }
|
|
314 }
|
|
315
|
|
316 //PAGEBREAK: 42
|
|
317 // Per-CPU process scheduler.
|
|
318 // Each CPU calls scheduler() after setting itself up.
|
|
319 // Scheduler never returns. It loops, doing:
|
|
320 // - choose a process to run
|
|
321 // - swtch to start running that process
|
|
322 // - eventually that process transfers control
|
|
323 // via swtch back to the scheduler.
|
|
324 void scheduler(void)
|
|
325 {
|
|
326 struct proc *p;
|
|
327
|
|
328 for(;;){
|
|
329 // Enable interrupts on this processor.
|
|
330 sti();
|
|
331
|
|
332 // Loop over process table looking for process to run.
|
|
333 acquire(&ptable.lock);
|
|
334
|
|
335 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
336 if(p->state != RUNNABLE) {
|
|
337 continue;
|
|
338 }
|
|
339
|
|
340 // Switch to chosen process. It is the process's job
|
|
341 // to release ptable.lock and then reacquire it
|
|
342 // before jumping back to us.
|
|
343 proc = p;
|
|
344 switchuvm(p);
|
|
345
|
|
346 p->state = RUNNING;
|
|
347
|
|
348 swtch(&cpu->scheduler, proc->context);
|
|
349 // Process is done running for now.
|
|
350 // It should have changed its p->state before coming back.
|
|
351 proc = 0;
|
|
352 }
|
|
353
|
|
354 release(&ptable.lock);
|
|
355 }
|
|
356 }
|
|
357
|
52
|
358 __ncode cbc_sched(__code(*next)())
|
28
|
359 {
|
|
360 int intena;
|
|
361
|
|
362 if(!holding(&ptable.lock)) {
|
|
363 panic("sched ptable.lock");
|
|
364 }
|
|
365
|
|
366 if(cpu->ncli != 1) {
|
|
367 panic("sched locks");
|
|
368 }
|
|
369
|
|
370 if(proc->state == RUNNING) {
|
|
371 panic("sched running");
|
|
372 }
|
|
373
|
|
374 if(int_enabled ()) {
|
|
375 panic("sched interruptible");
|
|
376 }
|
|
377
|
|
378 intena = cpu->intena;
|
|
379 swtch(&proc->context, cpu->scheduler);
|
|
380 cpu->intena = intena;
|
|
381
|
|
382 goto next();
|
|
383 }
|
|
384
|
|
385
|
0
|
386 // Enter scheduler. Must hold only ptable.lock
|
|
387 // and have changed proc->state.
|
|
388 void sched(void)
|
|
389 {
|
|
390 int intena;
|
|
391
|
|
392 //show_callstk ("sched");
|
|
393
|
|
394 if(!holding(&ptable.lock)) {
|
|
395 panic("sched ptable.lock");
|
|
396 }
|
|
397
|
|
398 if(cpu->ncli != 1) {
|
|
399 panic("sched locks");
|
|
400 }
|
|
401
|
|
402 if(proc->state == RUNNING) {
|
|
403 panic("sched running");
|
|
404 }
|
|
405
|
|
406 if(int_enabled ()) {
|
|
407 panic("sched interruptible");
|
|
408 }
|
|
409
|
|
410 intena = cpu->intena;
|
|
411 swtch(&proc->context, cpu->scheduler);
|
|
412 cpu->intena = intena;
|
|
413 }
|
|
414
|
|
415 // Give up the CPU for one scheduling round.
|
|
416 void yield(void)
|
|
417 {
|
|
418 acquire(&ptable.lock); //DOC: yieldlock
|
|
419 proc->state = RUNNABLE;
|
|
420 sched();
|
|
421 release(&ptable.lock);
|
|
422 }
|
|
423
|
|
424 // A fork child's very first scheduling by scheduler()
|
|
425 // will swtch here. "Return" to user space.
|
|
426 void forkret(void)
|
|
427 {
|
|
428 static int first = 1;
|
|
429
|
|
430 // Still holding ptable.lock from scheduler.
|
|
431 release(&ptable.lock);
|
|
432
|
|
433 if (first) {
|
|
434 // Some initialization functions must be run in the context
|
|
435 // of a regular process (e.g., they call sleep), and thus cannot
|
|
436 // be run from main().
|
|
437 first = 0;
|
|
438 initlog();
|
|
439 }
|
|
440
|
|
441 // Return to "caller", actually trapret (see allocproc).
|
|
442 }
|
|
443
|
52
|
444 __ncode cbc_sleep1()
|
24
|
445 {
|
|
446 struct spinlock *lk = proc->lk;
|
|
447 // Tidy up.
|
|
448 proc->chan = 0;
|
|
449
|
|
450 // Reacquire original lock.
|
|
451 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
452 release(&ptable.lock);
|
|
453 acquire(lk);
|
|
454 }
|
|
455 goto proc->cbc_next();
|
|
456 }
|
|
457
|
52
|
458 __ncode cbc_sleep(void *chan, struct spinlock *lk, __code(*next1)())
|
24
|
459 {
|
|
460 //show_callstk("sleep");
|
|
461
|
|
462 if(proc == 0) {
|
|
463 panic("sleep");
|
|
464 }
|
|
465
|
|
466 if(lk == 0) {
|
|
467 panic("sleep without lk");
|
|
468 }
|
|
469
|
|
470 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
471 acquire(&ptable.lock); //DOC: sleeplock1
|
|
472 release(lk);
|
|
473 }
|
|
474 proc->chan = chan;
|
|
475 proc->state = SLEEPING;
|
32
|
476 proc->lk = lk;
|
24
|
477 proc->cbc_next = next1;
|
|
478
|
|
479 goto cbc_sched(cbc_sleep1);
|
|
480 }
|
|
481
|
0
|
482 // Atomically release lock and sleep on chan.
|
|
483 // Reacquires lock when awakened.
|
|
484 void sleep(void *chan, struct spinlock *lk)
|
|
485 {
|
|
486 //show_callstk("sleep");
|
|
487
|
|
488 if(proc == 0) {
|
|
489 panic("sleep");
|
|
490 }
|
|
491
|
|
492 if(lk == 0) {
|
|
493 panic("sleep without lk");
|
|
494 }
|
|
495
|
|
496 // Must acquire ptable.lock in order to change p->state and then call
|
|
497 // sched. Once we hold ptable.lock, we can be guaranteed that we won't
|
|
498 // miss any wakeup (wakeup runs with ptable.lock locked), so it's okay
|
|
499 // to release lk.
|
|
500 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
501 acquire(&ptable.lock); //DOC: sleeplock1
|
|
502 release(lk);
|
|
503 }
|
|
504
|
|
505 // Go to sleep.
|
|
506 proc->chan = chan;
|
|
507 proc->state = SLEEPING;
|
|
508 sched();
|
|
509
|
|
510 // Tidy up.
|
|
511 proc->chan = 0;
|
|
512
|
|
513 // Reacquire original lock.
|
|
514 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
515 release(&ptable.lock);
|
|
516 acquire(lk);
|
|
517 }
|
|
518 }
|
|
519
|
|
520 //PAGEBREAK!
|
|
521 // Wake up all processes sleeping on chan. The ptable lock must be held.
|
|
522 static void wakeup1(void *chan)
|
|
523 {
|
|
524 struct proc *p;
|
|
525
|
|
526 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
527 if(p->state == SLEEPING && p->chan == chan) {
|
|
528 p->state = RUNNABLE;
|
|
529 }
|
|
530 }
|
|
531 }
|
|
532
|
52
|
533 __ncode cbc_wakeup1(void *chan)
|
36
|
534 {
|
|
535 struct proc *p;
|
|
536
|
|
537 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
538 if(p->state == SLEEPING && p->chan == chan) {
|
|
539 p->state = RUNNABLE;
|
|
540 }
|
|
541 }
|
|
542
|
|
543 release(&ptable.lock);
|
37
|
544 goto proc->cbc_next();
|
36
|
545 }
|
|
546
|
52
|
547 __ncode cbc_wakeup(void *chan, __code(*next1)())
|
36
|
548 {
|
|
549 acquire(&ptable.lock);
|
37
|
550 proc->cbc_next = next1;
|
36
|
551 cbc_wakeup1(chan);
|
|
552 }
|
|
553
|
0
|
554 // Wake up all processes sleeping on chan.
|
|
555 void wakeup(void *chan)
|
|
556 {
|
|
557 acquire(&ptable.lock);
|
|
558 wakeup1(chan);
|
|
559 release(&ptable.lock);
|
|
560 }
|
|
561
|
|
562 // Kill the process with the given pid. Process won't exit until it returns
|
|
563 // to user space (see trap in trap.c).
|
|
564 int kill(int pid)
|
|
565 {
|
|
566 struct proc *p;
|
|
567
|
|
568 acquire(&ptable.lock);
|
|
569
|
|
570 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
571 if(p->pid == pid){
|
|
572 p->killed = 1;
|
|
573
|
|
574 // Wake process from sleep if necessary.
|
|
575 if(p->state == SLEEPING) {
|
|
576 p->state = RUNNABLE;
|
|
577 }
|
|
578
|
|
579 release(&ptable.lock);
|
|
580 return 0;
|
|
581 }
|
|
582 }
|
|
583
|
|
584 release(&ptable.lock);
|
|
585 return -1;
|
|
586 }
|
|
587
|
|
588 //PAGEBREAK: 36
|
|
589 // Print a process listing to console. For debugging. Runs when user
|
|
590 // types ^P on console. No lock to avoid wedging a stuck machine further.
|
|
591 void procdump(void)
|
|
592 {
|
|
593 static char *states[] = {
|
22
|
594 [UNUSED] ="unused",
|
|
595 [EMBRYO] ="embryo",
|
|
596 [SLEEPING] ="sleep ",
|
|
597 [RUNNABLE] ="runble",
|
|
598 [RUNNING] ="run ",
|
|
599 [ZOMBIE] ="zombie"
|
0
|
600 };
|
|
601
|
|
602 struct proc *p;
|
|
603 char *state;
|
|
604
|
|
605 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
606 if(p->state == UNUSED) {
|
|
607 continue;
|
|
608 }
|
|
609
|
|
610 if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) {
|
|
611 state = states[p->state];
|
|
612 } else {
|
|
613 state = "???";
|
|
614 }
|
|
615
|
|
616 cprintf("%d %s %d:%s %d\n", p->pid, state, p->pid, p->name, p->parent->pid);
|
|
617 }
|
|
618
|
|
619 show_callstk("procdump: \n");
|
|
620 }
|
|
621
|
|
622
|