0
|
1 #include "types.h"
|
|
2 #include "defs.h"
|
|
3 #include "param.h"
|
|
4 #include "memlayout.h"
|
|
5 #include "mmu.h"
|
|
6 #include "arm.h"
|
|
7 #include "proc.h"
|
|
8 #include "spinlock.h"
|
263
|
9 #interface "vm.h"
|
0
|
10
|
52
|
11 #define __ncode __code
|
|
12
|
0
|
13 //
|
|
14 // Process initialization:
|
|
15 // process initialize is somewhat tricky.
|
|
16 // 1. We need to fake the kernel stack of a new process as if the process
|
|
17 // has been interrupt (a trapframe on the stack), this would allow us
|
|
18 // to "return" to the correct user instruction.
|
|
19 // 2. We also need to fake the kernel execution for this new process. When
|
|
20 // swtch switches to this (new) process, it will switch to its stack,
|
|
21 // and reload registers with the saved context. We use forkret as the
|
|
22 // return address (in lr register). (In x86, it will be the return address
|
|
23 // pushed on the stack by the process.)
|
|
24 //
|
|
25 // The design of context switch in xv6 is interesting: after initialization,
|
|
26 // each CPU executes in the scheduler() function. The context switch is not
|
|
27 // between two processes, but instead, between the scheduler. Think of scheduler
|
|
28 // as the idle process.
|
|
29 //
|
|
30 struct {
|
|
31 struct spinlock lock;
|
|
32 struct proc proc[NPROC];
|
|
33 } ptable;
|
|
34
|
|
35 static struct proc *initproc;
|
|
36 struct proc *proc;
|
|
37
|
|
38 int nextpid = 1;
|
|
39 extern void forkret(void);
|
|
40 extern void trapret(void);
|
|
41
|
|
42 static void wakeup1(void *chan);
|
|
43
|
|
44 void pinit(void)
|
|
45 {
|
|
46 initlock(&ptable.lock, "ptable");
|
|
47 }
|
|
48
|
|
49 //PAGEBREAK: 32
|
|
50 // Look in the process table for an UNUSED proc.
|
|
51 // If found, change state to EMBRYO and initialize
|
|
52 // state required to run in the kernel.
|
|
53 // Otherwise return 0.
|
|
54 static struct proc* allocproc(void)
|
|
55 {
|
|
56 struct proc *p;
|
|
57 char *sp;
|
|
58
|
|
59 acquire(&ptable.lock);
|
|
60
|
|
61 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
62 if(p->state == UNUSED) {
|
|
63 goto found;
|
|
64 }
|
|
65
|
|
66 }
|
|
67
|
|
68 release(&ptable.lock);
|
|
69 return 0;
|
|
70
|
|
71 found:
|
|
72 p->state = EMBRYO;
|
|
73 p->pid = nextpid++;
|
|
74 release(&ptable.lock);
|
|
75
|
|
76 // Allocate kernel stack.
|
|
77 if((p->kstack = alloc_page ()) == 0){
|
|
78 p->state = UNUSED;
|
|
79 return 0;
|
|
80 }
|
|
81
|
|
82 sp = p->kstack + KSTACKSIZE;
|
|
83
|
|
84 // Leave room for trap frame.
|
|
85 sp -= sizeof (*p->tf);
|
|
86 p->tf = (struct trapframe*)sp;
|
|
87
|
|
88 // Set up new context to start executing at forkret,
|
|
89 // which returns to trapret.
|
|
90 sp -= 4;
|
|
91 *(uint*)sp = (uint)trapret;
|
|
92
|
|
93 sp -= 4;
|
|
94 *(uint*)sp = (uint)p->kstack + KSTACKSIZE;
|
|
95
|
|
96 sp -= sizeof (*p->context);
|
|
97 p->context = (struct context*)sp;
|
|
98 memset(p->context, 0, sizeof(*p->context));
|
|
99
|
|
100 // skip the push {fp, lr} instruction in the prologue of forkret.
|
|
101 // This is different from x86, in which the harderware pushes return
|
|
102 // address before executing the callee. In ARM, return address is
|
|
103 // loaded into the lr register, and push to the stack by the callee
|
|
104 // (if and when necessary). We need to skip that instruction and let
|
|
105 // it use our implementation.
|
|
106 p->context->lr = (uint)forkret+4;
|
|
107
|
|
108 return p;
|
|
109 }
|
|
110
|
|
111 void error_init ()
|
|
112 {
|
|
113 panic ("failed to craft first process\n");
|
|
114 }
|
|
115
|
|
116
|
|
117 //PAGEBREAK: 32
|
|
118 // hand-craft the first user process. We link initcode.S into the kernel
|
|
119 // as a binary, the linker will generate __binary_initcode_start/_size
|
263
|
120 void dummy(struct proc *p, char _binary_initcode_start[], char _binary_initcode_size[])
|
|
121 {
|
|
122 // inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
273
|
123 goto cbc_init_vmm_dummy(&p->cbc_context, p, p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
263
|
124
|
|
125 }
|
|
126
|
|
127
|
|
128
|
268
|
129 __ncode cbc_init_vmm_dummy(struct Context* cbc_context, struct proc* p, pde_t* pgdir, char* init, uint sz){//:skip
|
263
|
130
|
268
|
131 struct vm* vm = createvm_impl(cbc_context);
|
270
|
132 // goto vm->init_vmm(vm, pgdir, init, sz , vm->void_ret);
|
263
|
133 Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
|
134 Gearef(cbc_context, vm)->pgdir = pgdir;
|
|
135 Gearef(cbc_context, vm)->init = init;
|
|
136 Gearef(cbc_context, vm)->sz = sz ;
|
270
|
137 Gearef(cbc_context, vm)->next = C_vm_void_ret ;
|
272
|
138 goto meta(cbc_context, vm->init_inituvm);
|
263
|
139 }
|
|
140
|
0
|
141 void userinit(void)
|
|
142 {
|
263
|
143 struct proc* p;
|
0
|
144 extern char _binary_initcode_start[], _binary_initcode_size[];
|
|
145
|
|
146 p = allocproc();
|
263
|
147 initContext(&p->cbc_context);
|
|
148
|
0
|
149 initproc = p;
|
|
150
|
|
151 if((p->pgdir = kpt_alloc()) == NULL) {
|
|
152 panic("userinit: out of memory?");
|
|
153 }
|
|
154
|
263
|
155 dummy(p, _binary_initcode_start, _binary_initcode_size);
|
0
|
156
|
|
157 p->sz = PTE_SZ;
|
|
158
|
|
159 // craft the trapframe as if
|
|
160 memset(p->tf, 0, sizeof(*p->tf));
|
|
161
|
|
162 p->tf->r14_svc = (uint)error_init;
|
|
163 p->tf->spsr = spsr_usr ();
|
|
164 p->tf->sp_usr = PTE_SZ; // set the user stack
|
|
165 p->tf->lr_usr = 0;
|
|
166
|
|
167 // set the user pc. The actual pc loaded into r15_usr is in
|
|
168 // p->tf, the trapframe.
|
|
169 p->tf->pc = 0; // beginning of initcode.S
|
|
170
|
|
171 safestrcpy(p->name, "initcode", sizeof(p->name));
|
|
172 p->cwd = namei("/");
|
|
173
|
|
174 p->state = RUNNABLE;
|
|
175 }
|
|
176
|
284
|
177
|
|
178 void switchuvm_dummy(struct proc* proc)
|
|
179 {
|
|
180 goto cbc_switchuvm_dummy(&proc->cbc_context, proc);
|
|
181 }
|
|
182
|
|
183 __ncode cbc_switchuvm_dummy(struct Context* cbc_context, struct proc* proc){
|
|
184
|
|
185 struct vm* vm = createvm_impl(cbc_context);
|
306
|
186 //Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
|
187 Gearef(cbc_context, vm)->p = proc;
|
|
188 Gearef(cbc_context, vm)->next = C_vm_void_ret ;
|
284
|
189 goto meta(cbc_context, vm->switchuvm);
|
|
190 }
|
|
191
|
0
|
192 // Grow current process's memory by n bytes.
|
|
193 // Return 0 on success, -1 on failure.
|
|
194 int growproc(int n)
|
|
195 {
|
|
196 uint sz;
|
|
197
|
|
198 sz = proc->sz;
|
|
199
|
|
200 if(n > 0){
|
|
201 if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
202 return -1;
|
|
203 }
|
|
204
|
|
205 } else if(n < 0){
|
|
206 if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
207 return -1;
|
|
208 }
|
|
209 }
|
|
210
|
|
211 proc->sz = sz;
|
284
|
212 // switchuvm(proc);
|
|
213 switchuvm_dummy(proc);
|
0
|
214
|
|
215 return 0;
|
|
216 }
|
|
217
|
|
218 // Create a new process copying p as the parent.
|
|
219 // Sets up stack to return as if from system call.
|
|
220 // Caller must set state of returned proc to RUNNABLE.
|
|
221 int fork(void)
|
|
222 {
|
|
223 int i, pid;
|
|
224 struct proc *np;
|
|
225
|
|
226 // Allocate process.
|
|
227 if((np = allocproc()) == 0) {
|
|
228 return -1;
|
|
229 }
|
263
|
230 initContext(&np->cbc_context);
|
0
|
231
|
|
232 // Copy process state from p.
|
|
233 if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
|
|
234 free_page(np->kstack);
|
|
235 np->kstack = 0;
|
|
236 np->state = UNUSED;
|
|
237 return -1;
|
|
238 }
|
|
239
|
|
240 np->sz = proc->sz;
|
|
241 np->parent = proc;
|
19
|
242 // *np->tf = *proc->tf; // This generate memcpy4 which is not in libgcc.a
|
|
243 memmove(np->tf, proc->tf, sizeof(*np->tf));
|
0
|
244
|
|
245 // Clear r0 so that fork returns 0 in the child.
|
|
246 np->tf->r0 = 0;
|
|
247
|
|
248 for(i = 0; i < NOFILE; i++) {
|
|
249 if(proc->ofile[i]) {
|
|
250 np->ofile[i] = filedup(proc->ofile[i]);
|
|
251 }
|
|
252 }
|
|
253
|
|
254 np->cwd = idup(proc->cwd);
|
|
255
|
|
256 pid = np->pid;
|
|
257 np->state = RUNNABLE;
|
|
258 safestrcpy(np->name, proc->name, sizeof(proc->name));
|
|
259
|
|
260 return pid;
|
|
261 }
|
|
262
|
|
263 // Exit the current process. Does not return.
|
|
264 // An exited process remains in the zombie state
|
|
265 // until its parent calls wait() to find out it exited.
|
|
266 void exit(void)
|
|
267 {
|
|
268 struct proc *p;
|
|
269 int fd;
|
|
270
|
|
271 if(proc == initproc) {
|
|
272 panic("init exiting");
|
|
273 }
|
|
274
|
|
275 // Close all open files.
|
|
276 for(fd = 0; fd < NOFILE; fd++){
|
|
277 if(proc->ofile[fd]){
|
|
278 fileclose(proc->ofile[fd]);
|
|
279 proc->ofile[fd] = 0;
|
|
280 }
|
|
281 }
|
|
282
|
|
283 iput(proc->cwd);
|
|
284 proc->cwd = 0;
|
|
285
|
|
286 acquire(&ptable.lock);
|
|
287
|
|
288 // Parent might be sleeping in wait().
|
|
289 wakeup1(proc->parent);
|
|
290
|
|
291 // Pass abandoned children to init.
|
|
292 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
293 if(p->parent == proc){
|
|
294 p->parent = initproc;
|
|
295
|
|
296 if(p->state == ZOMBIE) {
|
|
297 wakeup1(initproc);
|
|
298 }
|
|
299 }
|
|
300 }
|
|
301
|
|
302 // Jump into the scheduler, never to return.
|
|
303 proc->state = ZOMBIE;
|
|
304 sched();
|
|
305
|
|
306 panic("zombie exit");
|
|
307 }
|
|
308
|
|
309 // Wait for a child process to exit and return its pid.
|
|
310 // Return -1 if this process has no children.
|
|
311 int wait(void)
|
|
312 {
|
|
313 struct proc *p;
|
|
314 int havekids, pid;
|
|
315
|
|
316 acquire(&ptable.lock);
|
|
317
|
|
318 for(;;){
|
|
319 // Scan through table looking for zombie children.
|
|
320 havekids = 0;
|
|
321
|
|
322 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
323 if(p->parent != proc) {
|
|
324 continue;
|
|
325 }
|
|
326
|
|
327 havekids = 1;
|
|
328
|
|
329 if(p->state == ZOMBIE){
|
|
330 // Found one.
|
|
331 pid = p->pid;
|
|
332 free_page(p->kstack);
|
|
333 p->kstack = 0;
|
|
334 freevm(p->pgdir);
|
|
335 p->state = UNUSED;
|
|
336 p->pid = 0;
|
|
337 p->parent = 0;
|
|
338 p->name[0] = 0;
|
|
339 p->killed = 0;
|
|
340 release(&ptable.lock);
|
|
341
|
|
342 return pid;
|
|
343 }
|
|
344 }
|
|
345
|
|
346 // No point waiting if we don't have any children.
|
|
347 if(!havekids || proc->killed){
|
|
348 release(&ptable.lock);
|
|
349 return -1;
|
|
350 }
|
|
351
|
|
352 // Wait for children to exit. (See wakeup1 call in proc_exit.)
|
|
353 sleep(proc, &ptable.lock); //DOC: wait-sleep
|
|
354 }
|
|
355 }
|
|
356
|
|
357 //PAGEBREAK: 42
|
|
358 // Per-CPU process scheduler.
|
|
359 // Each CPU calls scheduler() after setting itself up.
|
|
360 // Scheduler never returns. It loops, doing:
|
|
361 // - choose a process to run
|
|
362 // - swtch to start running that process
|
|
363 // - eventually that process transfers control
|
|
364 // via swtch back to the scheduler.
|
|
365 void scheduler(void)
|
|
366 {
|
|
367 struct proc *p;
|
|
368
|
|
369 for(;;){
|
|
370 // Enable interrupts on this processor.
|
|
371 sti();
|
|
372
|
|
373 // Loop over process table looking for process to run.
|
|
374 acquire(&ptable.lock);
|
|
375
|
|
376 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
377 if(p->state != RUNNABLE) {
|
|
378 continue;
|
|
379 }
|
|
380
|
|
381 // Switch to chosen process. It is the process's job
|
|
382 // to release ptable.lock and then reacquire it
|
|
383 // before jumping back to us.
|
|
384 proc = p;
|
|
385 switchuvm(p);
|
|
386
|
|
387 p->state = RUNNING;
|
|
388
|
|
389 swtch(&cpu->scheduler, proc->context);
|
|
390 // Process is done running for now.
|
|
391 // It should have changed its p->state before coming back.
|
|
392 proc = 0;
|
|
393 }
|
|
394
|
|
395 release(&ptable.lock);
|
|
396 }
|
|
397 }
|
|
398
|
52
|
399 __ncode cbc_sched(__code(*next)())
|
28
|
400 {
|
|
401 int intena;
|
|
402
|
|
403 if(!holding(&ptable.lock)) {
|
|
404 panic("sched ptable.lock");
|
|
405 }
|
|
406
|
|
407 if(cpu->ncli != 1) {
|
|
408 panic("sched locks");
|
|
409 }
|
|
410
|
|
411 if(proc->state == RUNNING) {
|
|
412 panic("sched running");
|
|
413 }
|
|
414
|
|
415 if(int_enabled ()) {
|
|
416 panic("sched interruptible");
|
|
417 }
|
|
418
|
|
419 intena = cpu->intena;
|
|
420 swtch(&proc->context, cpu->scheduler);
|
|
421 cpu->intena = intena;
|
|
422
|
|
423 goto next();
|
|
424 }
|
|
425
|
|
426
|
0
|
427 // Enter scheduler. Must hold only ptable.lock
|
|
428 // and have changed proc->state.
|
|
429 void sched(void)
|
|
430 {
|
|
431 int intena;
|
|
432
|
|
433 //show_callstk ("sched");
|
|
434
|
|
435 if(!holding(&ptable.lock)) {
|
|
436 panic("sched ptable.lock");
|
|
437 }
|
|
438
|
|
439 if(cpu->ncli != 1) {
|
|
440 panic("sched locks");
|
|
441 }
|
|
442
|
|
443 if(proc->state == RUNNING) {
|
|
444 panic("sched running");
|
|
445 }
|
|
446
|
|
447 if(int_enabled ()) {
|
|
448 panic("sched interruptible");
|
|
449 }
|
|
450
|
|
451 intena = cpu->intena;
|
|
452 swtch(&proc->context, cpu->scheduler);
|
|
453 cpu->intena = intena;
|
|
454 }
|
|
455
|
|
456 // Give up the CPU for one scheduling round.
|
|
457 void yield(void)
|
|
458 {
|
|
459 acquire(&ptable.lock); //DOC: yieldlock
|
|
460 proc->state = RUNNABLE;
|
|
461 sched();
|
|
462 release(&ptable.lock);
|
|
463 }
|
|
464
|
|
465 // A fork child's very first scheduling by scheduler()
|
|
466 // will swtch here. "Return" to user space.
|
|
467 void forkret(void)
|
|
468 {
|
|
469 static int first = 1;
|
|
470
|
|
471 // Still holding ptable.lock from scheduler.
|
|
472 release(&ptable.lock);
|
|
473
|
|
474 if (first) {
|
|
475 // Some initialization functions must be run in the context
|
|
476 // of a regular process (e.g., they call sleep), and thus cannot
|
|
477 // be run from main().
|
|
478 first = 0;
|
|
479 initlog();
|
|
480 }
|
|
481
|
|
482 // Return to "caller", actually trapret (see allocproc).
|
|
483 }
|
|
484
|
52
|
485 __ncode cbc_sleep1()
|
24
|
486 {
|
|
487 struct spinlock *lk = proc->lk;
|
|
488 // Tidy up.
|
|
489 proc->chan = 0;
|
|
490
|
|
491 // Reacquire original lock.
|
|
492 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
493 release(&ptable.lock);
|
|
494 acquire(lk);
|
|
495 }
|
|
496 goto proc->cbc_next();
|
|
497 }
|
|
498
|
52
|
499 __ncode cbc_sleep(void *chan, struct spinlock *lk, __code(*next1)())
|
24
|
500 {
|
|
501 //show_callstk("sleep");
|
|
502
|
|
503 if(proc == 0) {
|
|
504 panic("sleep");
|
|
505 }
|
|
506
|
|
507 if(lk == 0) {
|
|
508 panic("sleep without lk");
|
|
509 }
|
|
510
|
|
511 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
512 acquire(&ptable.lock); //DOC: sleeplock1
|
|
513 release(lk);
|
|
514 }
|
|
515 proc->chan = chan;
|
|
516 proc->state = SLEEPING;
|
32
|
517 proc->lk = lk;
|
24
|
518 proc->cbc_next = next1;
|
|
519
|
|
520 goto cbc_sched(cbc_sleep1);
|
|
521 }
|
|
522
|
0
|
523 // Atomically release lock and sleep on chan.
|
|
524 // Reacquires lock when awakened.
|
|
525 void sleep(void *chan, struct spinlock *lk)
|
|
526 {
|
|
527 //show_callstk("sleep");
|
|
528
|
|
529 if(proc == 0) {
|
|
530 panic("sleep");
|
|
531 }
|
|
532
|
|
533 if(lk == 0) {
|
|
534 panic("sleep without lk");
|
|
535 }
|
|
536
|
|
537 // Must acquire ptable.lock in order to change p->state and then call
|
|
538 // sched. Once we hold ptable.lock, we can be guaranteed that we won't
|
|
539 // miss any wakeup (wakeup runs with ptable.lock locked), so it's okay
|
|
540 // to release lk.
|
|
541 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
542 acquire(&ptable.lock); //DOC: sleeplock1
|
|
543 release(lk);
|
|
544 }
|
|
545
|
|
546 // Go to sleep.
|
|
547 proc->chan = chan;
|
|
548 proc->state = SLEEPING;
|
|
549 sched();
|
|
550
|
|
551 // Tidy up.
|
|
552 proc->chan = 0;
|
|
553
|
|
554 // Reacquire original lock.
|
|
555 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
556 release(&ptable.lock);
|
|
557 acquire(lk);
|
|
558 }
|
|
559 }
|
|
560
|
|
561 //PAGEBREAK!
|
|
562 // Wake up all processes sleeping on chan. The ptable lock must be held.
|
|
563 static void wakeup1(void *chan)
|
|
564 {
|
|
565 struct proc *p;
|
|
566
|
|
567 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
568 if(p->state == SLEEPING && p->chan == chan) {
|
|
569 p->state = RUNNABLE;
|
|
570 }
|
|
571 }
|
|
572 }
|
|
573
|
52
|
574 __ncode cbc_wakeup1(void *chan)
|
36
|
575 {
|
|
576 struct proc *p;
|
|
577
|
|
578 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
579 if(p->state == SLEEPING && p->chan == chan) {
|
|
580 p->state = RUNNABLE;
|
|
581 }
|
|
582 }
|
|
583
|
|
584 release(&ptable.lock);
|
37
|
585 goto proc->cbc_next();
|
36
|
586 }
|
|
587
|
52
|
588 __ncode cbc_wakeup(void *chan, __code(*next1)())
|
36
|
589 {
|
|
590 acquire(&ptable.lock);
|
37
|
591 proc->cbc_next = next1;
|
36
|
592 cbc_wakeup1(chan);
|
|
593 }
|
|
594
|
0
|
595 // Wake up all processes sleeping on chan.
|
|
596 void wakeup(void *chan)
|
|
597 {
|
|
598 acquire(&ptable.lock);
|
|
599 wakeup1(chan);
|
|
600 release(&ptable.lock);
|
|
601 }
|
|
602
|
|
603 // Kill the process with the given pid. Process won't exit until it returns
|
|
604 // to user space (see trap in trap.c).
|
|
605 int kill(int pid)
|
|
606 {
|
|
607 struct proc *p;
|
|
608
|
|
609 acquire(&ptable.lock);
|
|
610
|
|
611 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
612 if(p->pid == pid){
|
|
613 p->killed = 1;
|
|
614
|
|
615 // Wake process from sleep if necessary.
|
|
616 if(p->state == SLEEPING) {
|
|
617 p->state = RUNNABLE;
|
|
618 }
|
|
619
|
|
620 release(&ptable.lock);
|
|
621 return 0;
|
|
622 }
|
|
623 }
|
|
624
|
|
625 release(&ptable.lock);
|
|
626 return -1;
|
|
627 }
|
|
628
|
|
629 //PAGEBREAK: 36
|
|
630 // Print a process listing to console. For debugging. Runs when user
|
|
631 // types ^P on console. No lock to avoid wedging a stuck machine further.
|
|
632 void procdump(void)
|
|
633 {
|
|
634 static char *states[] = {
|
22
|
635 [UNUSED] ="unused",
|
|
636 [EMBRYO] ="embryo",
|
|
637 [SLEEPING] ="sleep ",
|
|
638 [RUNNABLE] ="runble",
|
|
639 [RUNNING] ="run ",
|
|
640 [ZOMBIE] ="zombie"
|
0
|
641 };
|
|
642
|
|
643 struct proc *p;
|
|
644 char *state;
|
|
645
|
|
646 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
647 if(p->state == UNUSED) {
|
|
648 continue;
|
|
649 }
|
|
650
|
|
651 if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) {
|
|
652 state = states[p->state];
|
|
653 } else {
|
|
654 state = "???";
|
|
655 }
|
|
656
|
|
657 cprintf("%d %s %d:%s %d\n", p->pid, state, p->pid, p->name, p->parent->pid);
|
|
658 }
|
|
659
|
|
660 show_callstk("procdump: \n");
|
|
661 }
|
|
662
|
|
663
|