0
|
1 #include "types.h"
|
|
2 #include "defs.h"
|
|
3 #include "param.h"
|
|
4 #include "memlayout.h"
|
|
5 #include "mmu.h"
|
|
6 #include "arm.h"
|
|
7 #include "proc.h"
|
|
8 #include "spinlock.h"
|
220
|
9 #interface "vm.h"
|
0
|
10
|
52
|
11 #define __ncode __code
|
|
12
|
0
|
13 //
|
|
14 // Process initialization:
|
|
15 // process initialize is somewhat tricky.
|
|
16 // 1. We need to fake the kernel stack of a new process as if the process
|
|
17 // has been interrupt (a trapframe on the stack), this would allow us
|
|
18 // to "return" to the correct user instruction.
|
|
19 // 2. We also need to fake the kernel execution for this new process. When
|
|
20 // swtch switches to this (new) process, it will switch to its stack,
|
|
21 // and reload registers with the saved context. We use forkret as the
|
|
22 // return address (in lr register). (In x86, it will be the return address
|
|
23 // pushed on the stack by the process.)
|
|
24 //
|
|
25 // The design of context switch in xv6 is interesting: after initialization,
|
|
26 // each CPU executes in the scheduler() function. The context switch is not
|
|
27 // between two processes, but instead, between the scheduler. Think of scheduler
|
|
28 // as the idle process.
|
|
29 //
|
|
30 struct {
|
|
31 struct spinlock lock;
|
|
32 struct proc proc[NPROC];
|
|
33 } ptable;
|
|
34
|
|
35 static struct proc *initproc;
|
|
36 struct proc *proc;
|
|
37
|
|
38 int nextpid = 1;
|
|
39 extern void forkret(void);
|
|
40 extern void trapret(void);
|
|
41
|
|
42 static void wakeup1(void *chan);
|
|
43
|
|
44 void pinit(void)
|
|
45 {
|
|
46 initlock(&ptable.lock, "ptable");
|
|
47 }
|
|
48
|
|
49 //PAGEBREAK: 32
|
|
50 // Look in the process table for an UNUSED proc.
|
|
51 // If found, change state to EMBRYO and initialize
|
|
52 // state required to run in the kernel.
|
|
53 // Otherwise return 0.
|
|
54 static struct proc* allocproc(void)
|
|
55 {
|
|
56 struct proc *p;
|
|
57 char *sp;
|
|
58
|
|
59 acquire(&ptable.lock);
|
|
60
|
|
61 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
62 if(p->state == UNUSED) {
|
|
63 goto found;
|
|
64 }
|
|
65
|
|
66 }
|
|
67
|
|
68 release(&ptable.lock);
|
|
69 return 0;
|
|
70
|
|
71 found:
|
|
72 p->state = EMBRYO;
|
|
73 p->pid = nextpid++;
|
|
74 release(&ptable.lock);
|
|
75
|
|
76 // Allocate kernel stack.
|
|
77 if((p->kstack = alloc_page ()) == 0){
|
|
78 p->state = UNUSED;
|
|
79 return 0;
|
|
80 }
|
|
81
|
|
82 sp = p->kstack + KSTACKSIZE;
|
|
83
|
|
84 // Leave room for trap frame.
|
|
85 sp -= sizeof (*p->tf);
|
|
86 p->tf = (struct trapframe*)sp;
|
|
87
|
|
88 // Set up new context to start executing at forkret,
|
|
89 // which returns to trapret.
|
|
90 sp -= 4;
|
|
91 *(uint*)sp = (uint)trapret;
|
|
92
|
|
93 sp -= 4;
|
|
94 *(uint*)sp = (uint)p->kstack + KSTACKSIZE;
|
|
95
|
|
96 sp -= sizeof (*p->context);
|
|
97 p->context = (struct context*)sp;
|
|
98 memset(p->context, 0, sizeof(*p->context));
|
|
99
|
|
100 // skip the push {fp, lr} instruction in the prologue of forkret.
|
|
101 // This is different from x86, in which the harderware pushes return
|
|
102 // address before executing the callee. In ARM, return address is
|
|
103 // loaded into the lr register, and push to the stack by the callee
|
|
104 // (if and when necessary). We need to skip that instruction and let
|
|
105 // it use our implementation.
|
|
106 p->context->lr = (uint)forkret+4;
|
|
107
|
|
108 return p;
|
|
109 }
|
|
110
|
|
111 void error_init ()
|
|
112 {
|
|
113 panic ("failed to craft first process\n");
|
|
114 }
|
|
115
|
|
116
|
|
117 //PAGEBREAK: 32
|
|
118 // hand-craft the first user process. We link initcode.S into the kernel
|
|
119 // as a binary, the linker will generate __binary_initcode_start/_size
|
220
|
120 void dummy(struct proc *p, char _binary_initcode_start[], char _binary_initcode_size[])
|
|
121 {
|
|
122 // inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
230
|
123 goto cbc_init_vmm_dummy(&p->cbc_context, p, p->pgdir, _binary_initcode_start, (int)_binary_initcode_size);
|
220
|
124
|
|
125 }
|
|
126
|
|
127
|
|
128
|
225
|
129 __ncode cbc_init_vmm_dummy(struct Context* cbc_context, struct proc* p, pde_t* pgdir, char* init, uint sz){//:skip
|
220
|
130
|
225
|
131 struct vm* vm = createvm_impl(cbc_context);
|
227
|
132 // goto vm->init_vmm(vm, pgdir, init, sz , vm->void_ret);
|
220
|
133 Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
|
134 Gearef(cbc_context, vm)->pgdir = pgdir;
|
|
135 Gearef(cbc_context, vm)->init = init;
|
|
136 Gearef(cbc_context, vm)->sz = sz ;
|
227
|
137 Gearef(cbc_context, vm)->next = C_vm_void_ret ;
|
229
|
138 goto meta(cbc_context, vm->init_inituvm);
|
220
|
139 }
|
|
140
|
0
|
141 void userinit(void)
|
|
142 {
|
220
|
143 struct proc* p;
|
0
|
144 extern char _binary_initcode_start[], _binary_initcode_size[];
|
|
145
|
|
146 p = allocproc();
|
220
|
147 initContext(&p->cbc_context);
|
|
148
|
0
|
149 initproc = p;
|
|
150
|
|
151 if((p->pgdir = kpt_alloc()) == NULL) {
|
|
152 panic("userinit: out of memory?");
|
|
153 }
|
|
154
|
220
|
155 dummy(p, _binary_initcode_start, _binary_initcode_size);
|
0
|
156
|
|
157 p->sz = PTE_SZ;
|
|
158
|
|
159 // craft the trapframe as if
|
|
160 memset(p->tf, 0, sizeof(*p->tf));
|
|
161
|
|
162 p->tf->r14_svc = (uint)error_init;
|
|
163 p->tf->spsr = spsr_usr ();
|
|
164 p->tf->sp_usr = PTE_SZ; // set the user stack
|
|
165 p->tf->lr_usr = 0;
|
|
166
|
|
167 // set the user pc. The actual pc loaded into r15_usr is in
|
|
168 // p->tf, the trapframe.
|
|
169 p->tf->pc = 0; // beginning of initcode.S
|
|
170
|
|
171 safestrcpy(p->name, "initcode", sizeof(p->name));
|
|
172 p->cwd = namei("/");
|
|
173
|
|
174 p->state = RUNNABLE;
|
|
175 }
|
|
176
|
241
|
177
|
|
178 void switchuvm_dummy(struct proc* proc)
|
|
179 {
|
|
180 goto cbc_switchuvm_dummy(&proc->cbc_context, proc);
|
|
181 }
|
|
182
|
|
183 __ncode cbc_switchuvm_dummy(struct Context* cbc_context, struct proc* proc){
|
|
184
|
|
185 struct vm* vm = createvm_impl(cbc_context);
|
|
186 Gearef(cbc_context, vm)->vm = (union Data*) vm;
|
|
187 goto meta(cbc_context, vm->switchuvm);
|
|
188 }
|
|
189
|
0
|
190 // Grow current process's memory by n bytes.
|
|
191 // Return 0 on success, -1 on failure.
|
|
192 int growproc(int n)
|
|
193 {
|
|
194 uint sz;
|
|
195
|
|
196 sz = proc->sz;
|
|
197
|
|
198 if(n > 0){
|
|
199 if((sz = allocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
200 return -1;
|
|
201 }
|
|
202
|
|
203 } else if(n < 0){
|
|
204 if((sz = deallocuvm(proc->pgdir, sz, sz + n)) == 0) {
|
|
205 return -1;
|
|
206 }
|
|
207 }
|
|
208
|
|
209 proc->sz = sz;
|
241
|
210 // switchuvm(proc);
|
|
211 switchuvm_dummy(proc);
|
0
|
212
|
|
213 return 0;
|
|
214 }
|
|
215
|
|
216 // Create a new process copying p as the parent.
|
|
217 // Sets up stack to return as if from system call.
|
|
218 // Caller must set state of returned proc to RUNNABLE.
|
|
219 int fork(void)
|
|
220 {
|
|
221 int i, pid;
|
|
222 struct proc *np;
|
|
223
|
|
224 // Allocate process.
|
|
225 if((np = allocproc()) == 0) {
|
|
226 return -1;
|
|
227 }
|
220
|
228 initContext(&np->cbc_context);
|
0
|
229
|
|
230 // Copy process state from p.
|
|
231 if((np->pgdir = copyuvm(proc->pgdir, proc->sz)) == 0){
|
|
232 free_page(np->kstack);
|
|
233 np->kstack = 0;
|
|
234 np->state = UNUSED;
|
|
235 return -1;
|
|
236 }
|
|
237
|
|
238 np->sz = proc->sz;
|
|
239 np->parent = proc;
|
19
|
240 // *np->tf = *proc->tf; // This generate memcpy4 which is not in libgcc.a
|
|
241 memmove(np->tf, proc->tf, sizeof(*np->tf));
|
0
|
242
|
|
243 // Clear r0 so that fork returns 0 in the child.
|
|
244 np->tf->r0 = 0;
|
|
245
|
|
246 for(i = 0; i < NOFILE; i++) {
|
|
247 if(proc->ofile[i]) {
|
|
248 np->ofile[i] = filedup(proc->ofile[i]);
|
|
249 }
|
|
250 }
|
|
251
|
|
252 np->cwd = idup(proc->cwd);
|
|
253
|
|
254 pid = np->pid;
|
|
255 np->state = RUNNABLE;
|
|
256 safestrcpy(np->name, proc->name, sizeof(proc->name));
|
|
257
|
|
258 return pid;
|
|
259 }
|
|
260
|
|
261 // Exit the current process. Does not return.
|
|
262 // An exited process remains in the zombie state
|
|
263 // until its parent calls wait() to find out it exited.
|
|
264 void exit(void)
|
|
265 {
|
|
266 struct proc *p;
|
|
267 int fd;
|
|
268
|
|
269 if(proc == initproc) {
|
|
270 panic("init exiting");
|
|
271 }
|
|
272
|
|
273 // Close all open files.
|
|
274 for(fd = 0; fd < NOFILE; fd++){
|
|
275 if(proc->ofile[fd]){
|
|
276 fileclose(proc->ofile[fd]);
|
|
277 proc->ofile[fd] = 0;
|
|
278 }
|
|
279 }
|
|
280
|
|
281 iput(proc->cwd);
|
|
282 proc->cwd = 0;
|
|
283
|
|
284 acquire(&ptable.lock);
|
|
285
|
|
286 // Parent might be sleeping in wait().
|
|
287 wakeup1(proc->parent);
|
|
288
|
|
289 // Pass abandoned children to init.
|
|
290 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
291 if(p->parent == proc){
|
|
292 p->parent = initproc;
|
|
293
|
|
294 if(p->state == ZOMBIE) {
|
|
295 wakeup1(initproc);
|
|
296 }
|
|
297 }
|
|
298 }
|
|
299
|
|
300 // Jump into the scheduler, never to return.
|
|
301 proc->state = ZOMBIE;
|
|
302 sched();
|
|
303
|
|
304 panic("zombie exit");
|
|
305 }
|
|
306
|
|
307 // Wait for a child process to exit and return its pid.
|
|
308 // Return -1 if this process has no children.
|
|
309 int wait(void)
|
|
310 {
|
|
311 struct proc *p;
|
|
312 int havekids, pid;
|
|
313
|
|
314 acquire(&ptable.lock);
|
|
315
|
|
316 for(;;){
|
|
317 // Scan through table looking for zombie children.
|
|
318 havekids = 0;
|
|
319
|
|
320 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
321 if(p->parent != proc) {
|
|
322 continue;
|
|
323 }
|
|
324
|
|
325 havekids = 1;
|
|
326
|
|
327 if(p->state == ZOMBIE){
|
|
328 // Found one.
|
|
329 pid = p->pid;
|
|
330 free_page(p->kstack);
|
|
331 p->kstack = 0;
|
|
332 freevm(p->pgdir);
|
|
333 p->state = UNUSED;
|
|
334 p->pid = 0;
|
|
335 p->parent = 0;
|
|
336 p->name[0] = 0;
|
|
337 p->killed = 0;
|
|
338 release(&ptable.lock);
|
|
339
|
|
340 return pid;
|
|
341 }
|
|
342 }
|
|
343
|
|
344 // No point waiting if we don't have any children.
|
|
345 if(!havekids || proc->killed){
|
|
346 release(&ptable.lock);
|
|
347 return -1;
|
|
348 }
|
|
349
|
|
350 // Wait for children to exit. (See wakeup1 call in proc_exit.)
|
|
351 sleep(proc, &ptable.lock); //DOC: wait-sleep
|
|
352 }
|
|
353 }
|
|
354
|
|
355 //PAGEBREAK: 42
|
|
356 // Per-CPU process scheduler.
|
|
357 // Each CPU calls scheduler() after setting itself up.
|
|
358 // Scheduler never returns. It loops, doing:
|
|
359 // - choose a process to run
|
|
360 // - swtch to start running that process
|
|
361 // - eventually that process transfers control
|
|
362 // via swtch back to the scheduler.
|
|
363 void scheduler(void)
|
|
364 {
|
|
365 struct proc *p;
|
|
366
|
|
367 for(;;){
|
|
368 // Enable interrupts on this processor.
|
|
369 sti();
|
|
370
|
|
371 // Loop over process table looking for process to run.
|
|
372 acquire(&ptable.lock);
|
|
373
|
|
374 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
375 if(p->state != RUNNABLE) {
|
|
376 continue;
|
|
377 }
|
|
378
|
|
379 // Switch to chosen process. It is the process's job
|
|
380 // to release ptable.lock and then reacquire it
|
|
381 // before jumping back to us.
|
|
382 proc = p;
|
|
383 switchuvm(p);
|
|
384
|
|
385 p->state = RUNNING;
|
|
386
|
|
387 swtch(&cpu->scheduler, proc->context);
|
|
388 // Process is done running for now.
|
|
389 // It should have changed its p->state before coming back.
|
|
390 proc = 0;
|
|
391 }
|
|
392
|
|
393 release(&ptable.lock);
|
|
394 }
|
|
395 }
|
|
396
|
52
|
397 __ncode cbc_sched(__code(*next)())
|
28
|
398 {
|
|
399 int intena;
|
|
400
|
|
401 if(!holding(&ptable.lock)) {
|
|
402 panic("sched ptable.lock");
|
|
403 }
|
|
404
|
|
405 if(cpu->ncli != 1) {
|
|
406 panic("sched locks");
|
|
407 }
|
|
408
|
|
409 if(proc->state == RUNNING) {
|
|
410 panic("sched running");
|
|
411 }
|
|
412
|
|
413 if(int_enabled ()) {
|
|
414 panic("sched interruptible");
|
|
415 }
|
|
416
|
|
417 intena = cpu->intena;
|
|
418 swtch(&proc->context, cpu->scheduler);
|
|
419 cpu->intena = intena;
|
|
420
|
|
421 goto next();
|
|
422 }
|
|
423
|
|
424
|
0
|
425 // Enter scheduler. Must hold only ptable.lock
|
|
426 // and have changed proc->state.
|
|
427 void sched(void)
|
|
428 {
|
|
429 int intena;
|
|
430
|
|
431 //show_callstk ("sched");
|
|
432
|
|
433 if(!holding(&ptable.lock)) {
|
|
434 panic("sched ptable.lock");
|
|
435 }
|
|
436
|
|
437 if(cpu->ncli != 1) {
|
|
438 panic("sched locks");
|
|
439 }
|
|
440
|
|
441 if(proc->state == RUNNING) {
|
|
442 panic("sched running");
|
|
443 }
|
|
444
|
|
445 if(int_enabled ()) {
|
|
446 panic("sched interruptible");
|
|
447 }
|
|
448
|
|
449 intena = cpu->intena;
|
|
450 swtch(&proc->context, cpu->scheduler);
|
|
451 cpu->intena = intena;
|
|
452 }
|
|
453
|
|
454 // Give up the CPU for one scheduling round.
|
|
455 void yield(void)
|
|
456 {
|
|
457 acquire(&ptable.lock); //DOC: yieldlock
|
|
458 proc->state = RUNNABLE;
|
|
459 sched();
|
|
460 release(&ptable.lock);
|
|
461 }
|
|
462
|
|
463 // A fork child's very first scheduling by scheduler()
|
|
464 // will swtch here. "Return" to user space.
|
|
465 void forkret(void)
|
|
466 {
|
|
467 static int first = 1;
|
|
468
|
|
469 // Still holding ptable.lock from scheduler.
|
|
470 release(&ptable.lock);
|
|
471
|
|
472 if (first) {
|
|
473 // Some initialization functions must be run in the context
|
|
474 // of a regular process (e.g., they call sleep), and thus cannot
|
|
475 // be run from main().
|
|
476 first = 0;
|
|
477 initlog();
|
|
478 }
|
|
479
|
|
480 // Return to "caller", actually trapret (see allocproc).
|
|
481 }
|
|
482
|
52
|
483 __ncode cbc_sleep1()
|
24
|
484 {
|
|
485 struct spinlock *lk = proc->lk;
|
|
486 // Tidy up.
|
|
487 proc->chan = 0;
|
|
488
|
|
489 // Reacquire original lock.
|
|
490 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
491 release(&ptable.lock);
|
|
492 acquire(lk);
|
|
493 }
|
|
494 goto proc->cbc_next();
|
|
495 }
|
|
496
|
52
|
497 __ncode cbc_sleep(void *chan, struct spinlock *lk, __code(*next1)())
|
24
|
498 {
|
|
499 //show_callstk("sleep");
|
|
500
|
|
501 if(proc == 0) {
|
|
502 panic("sleep");
|
|
503 }
|
|
504
|
|
505 if(lk == 0) {
|
|
506 panic("sleep without lk");
|
|
507 }
|
|
508
|
|
509 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
510 acquire(&ptable.lock); //DOC: sleeplock1
|
|
511 release(lk);
|
|
512 }
|
|
513 proc->chan = chan;
|
|
514 proc->state = SLEEPING;
|
32
|
515 proc->lk = lk;
|
24
|
516 proc->cbc_next = next1;
|
|
517
|
|
518 goto cbc_sched(cbc_sleep1);
|
|
519 }
|
|
520
|
0
|
521 // Atomically release lock and sleep on chan.
|
|
522 // Reacquires lock when awakened.
|
|
523 void sleep(void *chan, struct spinlock *lk)
|
|
524 {
|
|
525 //show_callstk("sleep");
|
|
526
|
|
527 if(proc == 0) {
|
|
528 panic("sleep");
|
|
529 }
|
|
530
|
|
531 if(lk == 0) {
|
|
532 panic("sleep without lk");
|
|
533 }
|
|
534
|
|
535 // Must acquire ptable.lock in order to change p->state and then call
|
|
536 // sched. Once we hold ptable.lock, we can be guaranteed that we won't
|
|
537 // miss any wakeup (wakeup runs with ptable.lock locked), so it's okay
|
|
538 // to release lk.
|
|
539 if(lk != &ptable.lock){ //DOC: sleeplock0
|
|
540 acquire(&ptable.lock); //DOC: sleeplock1
|
|
541 release(lk);
|
|
542 }
|
|
543
|
|
544 // Go to sleep.
|
|
545 proc->chan = chan;
|
|
546 proc->state = SLEEPING;
|
|
547 sched();
|
|
548
|
|
549 // Tidy up.
|
|
550 proc->chan = 0;
|
|
551
|
|
552 // Reacquire original lock.
|
|
553 if(lk != &ptable.lock){ //DOC: sleeplock2
|
|
554 release(&ptable.lock);
|
|
555 acquire(lk);
|
|
556 }
|
|
557 }
|
|
558
|
|
559 //PAGEBREAK!
|
|
560 // Wake up all processes sleeping on chan. The ptable lock must be held.
|
|
561 static void wakeup1(void *chan)
|
|
562 {
|
|
563 struct proc *p;
|
|
564
|
|
565 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
566 if(p->state == SLEEPING && p->chan == chan) {
|
|
567 p->state = RUNNABLE;
|
|
568 }
|
|
569 }
|
|
570 }
|
|
571
|
52
|
572 __ncode cbc_wakeup1(void *chan)
|
36
|
573 {
|
|
574 struct proc *p;
|
|
575
|
|
576 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
|
|
577 if(p->state == SLEEPING && p->chan == chan) {
|
|
578 p->state = RUNNABLE;
|
|
579 }
|
|
580 }
|
|
581
|
|
582 release(&ptable.lock);
|
37
|
583 goto proc->cbc_next();
|
36
|
584 }
|
|
585
|
52
|
586 __ncode cbc_wakeup(void *chan, __code(*next1)())
|
36
|
587 {
|
|
588 acquire(&ptable.lock);
|
37
|
589 proc->cbc_next = next1;
|
36
|
590 cbc_wakeup1(chan);
|
|
591 }
|
|
592
|
0
|
593 // Wake up all processes sleeping on chan.
|
|
594 void wakeup(void *chan)
|
|
595 {
|
|
596 acquire(&ptable.lock);
|
|
597 wakeup1(chan);
|
|
598 release(&ptable.lock);
|
|
599 }
|
|
600
|
|
601 // Kill the process with the given pid. Process won't exit until it returns
|
|
602 // to user space (see trap in trap.c).
|
|
603 int kill(int pid)
|
|
604 {
|
|
605 struct proc *p;
|
|
606
|
|
607 acquire(&ptable.lock);
|
|
608
|
|
609 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
610 if(p->pid == pid){
|
|
611 p->killed = 1;
|
|
612
|
|
613 // Wake process from sleep if necessary.
|
|
614 if(p->state == SLEEPING) {
|
|
615 p->state = RUNNABLE;
|
|
616 }
|
|
617
|
|
618 release(&ptable.lock);
|
|
619 return 0;
|
|
620 }
|
|
621 }
|
|
622
|
|
623 release(&ptable.lock);
|
|
624 return -1;
|
|
625 }
|
|
626
|
|
627 //PAGEBREAK: 36
|
|
628 // Print a process listing to console. For debugging. Runs when user
|
|
629 // types ^P on console. No lock to avoid wedging a stuck machine further.
|
|
630 void procdump(void)
|
|
631 {
|
|
632 static char *states[] = {
|
22
|
633 [UNUSED] ="unused",
|
|
634 [EMBRYO] ="embryo",
|
|
635 [SLEEPING] ="sleep ",
|
|
636 [RUNNABLE] ="runble",
|
|
637 [RUNNING] ="run ",
|
|
638 [ZOMBIE] ="zombie"
|
0
|
639 };
|
|
640
|
|
641 struct proc *p;
|
|
642 char *state;
|
|
643
|
|
644 for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){
|
|
645 if(p->state == UNUSED) {
|
|
646 continue;
|
|
647 }
|
|
648
|
|
649 if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) {
|
|
650 state = states[p->state];
|
|
651 } else {
|
|
652 state = "???";
|
|
653 }
|
|
654
|
|
655 cprintf("%d %s %d:%s %d\n", p->pid, state, p->pid, p->name, p->parent->pid);
|
|
656 }
|
|
657
|
|
658 show_callstk("procdump: \n");
|
|
659 }
|
|
660
|
|
661
|