root/kernel/proc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. proc_mapstacks
  2. procinit
  3. cpuid
  4. mycpu
  5. myproc
  6. allocpid
  7. allocproc
  8. freeproc
  9. proc_pagetable
  10. proc_freepagetable
  11. userinit
  12. growproc
  13. fork
  14. reparent
  15. exit
  16. wait
  17. scheduler
  18. sched
  19. yield
  20. forkret
  21. sleep
  22. wakeup
  23. kill
  24. setkilled
  25. killed
  26. either_copyout
  27. either_copyin
  28. procdump

   1 #include "types.h"
   2 #include "param.h"
   3 #include "memlayout.h"
   4 #include "riscv.h"
   5 #include "spinlock.h"
   6 #include "proc.h"
   7 #include "defs.h"
   8 
   9 struct cpu cpus[NCPU];
  10 
  11 struct proc proc[NPROC];
  12 
  13 struct proc *initproc;
  14 
  15 int nextpid = 1;
  16 struct spinlock pid_lock;
  17 
  18 extern void forkret(void);
  19 static void freeproc(struct proc *p);
  20 
  21 extern char trampoline[]; // trampoline.S
  22 
  23 // helps ensure that wakeups of wait()ing
  24 // parents are not lost. helps obey the
  25 // memory model when using p->parent.
  26 // must be acquired before any p->lock.
  27 struct spinlock wait_lock;
  28 
  29 // Allocate a page for each process's kernel stack.
  30 // Map it high in memory, followed by an invalid
  31 // guard page.
  32 void
  33 proc_mapstacks(pagetable_t kpgtbl)
  34 {
  35   struct proc *p;
  36   
  37   for(p = proc; p < &proc[NPROC]; p++) {
  38     char *pa = kalloc();
  39     if(pa == 0)
  40       panic("kalloc");
  41     uint64 va = KSTACK((int) (p - proc));
  42     kvmmap(kpgtbl, va, (uint64)pa, PGSIZE, PTE_R | PTE_W);
  43   }
  44 }
  45 
  46 // initialize the proc table.
  47 void
  48 procinit(void)
  49 {
  50   struct proc *p;
  51   
  52   initlock(&pid_lock, "nextpid");
  53   initlock(&wait_lock, "wait_lock");
  54   for(p = proc; p < &proc[NPROC]; p++) {
  55       initlock(&p->lock, "proc");
  56       p->state = UNUSED;
  57       p->kstack = KSTACK((int) (p - proc));
  58   }
  59 }
  60 
  61 // Must be called with interrupts disabled,
  62 // to prevent race with process being moved
  63 // to a different CPU.
  64 int
  65 cpuid()
  66 {
  67   int id = r_tp();
  68   return id;
  69 }
  70 
  71 // Return this CPU's cpu struct.
  72 // Interrupts must be disabled.
  73 struct cpu*
  74 mycpu(void)
  75 {
  76   int id = cpuid();
  77   struct cpu *c = &cpus[id];
  78   return c;
  79 }
  80 
  81 // Return the current struct proc *, or zero if none.
  82 struct proc*
  83 myproc(void)
  84 {
  85   push_off();
  86   struct cpu *c = mycpu();
  87   struct proc *p = c->proc;
  88   pop_off();
  89   return p;
  90 }
  91 
  92 int
  93 allocpid()
  94 {
  95   int pid;
  96   
  97   acquire(&pid_lock);
  98   pid = nextpid;
  99   nextpid = nextpid + 1;
 100   release(&pid_lock);
 101 
 102   return pid;
 103 }
 104 
 105 // Look in the process table for an UNUSED proc.
 106 // If found, initialize state required to run in the kernel,
 107 // and return with p->lock held.
 108 // If there are no free procs, or a memory allocation fails, return 0.
 109 static struct proc*
 110 allocproc(void)
 111 {
 112   struct proc *p;
 113 
 114   for(p = proc; p < &proc[NPROC]; p++) {
 115     acquire(&p->lock);
 116     if(p->state == UNUSED) {
 117       goto found;
 118     } else {
 119       release(&p->lock);
 120     }
 121   }
 122   return 0;
 123 
 124 found:
 125   p->pid = allocpid();
 126   p->state = USED;
 127 
 128   // Allocate a trapframe page.
 129   if((p->trapframe = (struct trapframe *)kalloc()) == 0){
 130     freeproc(p);
 131     release(&p->lock);
 132     return 0;
 133   }
 134 
 135   // An empty user page table.
 136   p->pagetable = proc_pagetable(p);
 137   if(p->pagetable == 0){
 138     freeproc(p);
 139     release(&p->lock);
 140     return 0;
 141   }
 142 
 143   // Set up new context to start executing at forkret,
 144   // which returns to user space.
 145   memset(&p->context, 0, sizeof(p->context));
 146   p->context.ra = (uint64)forkret;
 147   p->context.sp = p->kstack + PGSIZE;
 148 
 149   return p;
 150 }
 151 
 152 // free a proc structure and the data hanging from it,
 153 // including user pages.
 154 // p->lock must be held.
 155 static void
 156 freeproc(struct proc *p)
 157 {
 158   if(p->trapframe)
 159     kfree((void*)p->trapframe);
 160   p->trapframe = 0;
 161   if(p->pagetable)
 162     proc_freepagetable(p->pagetable, p->sz);
 163   p->pagetable = 0;
 164   p->sz = 0;
 165   p->pid = 0;
 166   p->parent = 0;
 167   p->name[0] = 0;
 168   p->chan = 0;
 169   p->killed = 0;
 170   p->xstate = 0;
 171   p->state = UNUSED;
 172 }
 173 
 174 // Create a user page table for a given process, with no user memory,
 175 // but with trampoline and trapframe pages.
 176 pagetable_t
 177 proc_pagetable(struct proc *p)
 178 {
 179   pagetable_t pagetable;
 180 
 181   // An empty page table.
 182   pagetable = uvmcreate();
 183   if(pagetable == 0)
 184     return 0;
 185 
 186   // map the trampoline code (for system call return)
 187   // at the highest user virtual address.
 188   // only the supervisor uses it, on the way
 189   // to/from user space, so not PTE_U.
 190   if(mappages(pagetable, TRAMPOLINE, PGSIZE,
 191               (uint64)trampoline, PTE_R | PTE_X) < 0){
 192     uvmfree(pagetable, 0);
 193     return 0;
 194   }
 195 
 196   // map the trapframe page just below the trampoline page, for
 197   // trampoline.S.
 198   if(mappages(pagetable, TRAPFRAME, PGSIZE,
 199               (uint64)(p->trapframe), PTE_R | PTE_W) < 0){
 200     uvmunmap(pagetable, TRAMPOLINE, 1, 0);
 201     uvmfree(pagetable, 0);
 202     return 0;
 203   }
 204 
 205   return pagetable;
 206 }
 207 
 208 // Free a process's page table, and free the
 209 // physical memory it refers to.
 210 void
 211 proc_freepagetable(pagetable_t pagetable, uint64 sz)
 212 {
 213   uvmunmap(pagetable, TRAMPOLINE, 1, 0);
 214   uvmunmap(pagetable, TRAPFRAME, 1, 0);
 215   uvmfree(pagetable, sz);
 216 }
 217 
 218 // a user program that calls exec("/init")
 219 // assembled from ../user/initcode.S
 220 // od -t xC ../user/initcode
 221 uchar initcode[] = {
 222   0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x45, 0x02,
 223   0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x35, 0x02,
 224   0x93, 0x08, 0x70, 0x00, 0x73, 0x00, 0x00, 0x00,
 225   0x93, 0x08, 0x20, 0x00, 0x73, 0x00, 0x00, 0x00,
 226   0xef, 0xf0, 0x9f, 0xff, 0x2f, 0x69, 0x6e, 0x69,
 227   0x74, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
 228   0x00, 0x00, 0x00, 0x00
 229 };
 230 
 231 // Set up first user process.
 232 void
 233 userinit(void)
 234 {
 235   struct proc *p;
 236 
 237   p = allocproc();
 238   initproc = p;
 239   
 240   // allocate one user page and copy initcode's instructions
 241   // and data into it.
 242   uvmfirst(p->pagetable, initcode, sizeof(initcode));
 243   p->sz = PGSIZE;
 244 
 245   // prepare for the very first "return" from kernel to user.
 246   p->trapframe->epc = 0;      // user program counter
 247   p->trapframe->sp = PGSIZE;  // user stack pointer
 248 
 249   safestrcpy(p->name, "initcode", sizeof(p->name));
 250   p->cwd = namei("/");
 251 
 252   p->state = RUNNABLE;
 253 
 254   release(&p->lock);
 255 }
 256 
 257 // Grow or shrink user memory by n bytes.
 258 // Return 0 on success, -1 on failure.
 259 int
 260 growproc(int n)
 261 {
 262   uint64 sz;
 263   struct proc *p = myproc();
 264 
 265   sz = p->sz;
 266   if(n > 0){
 267     if((sz = uvmalloc(p->pagetable, sz, sz + n, PTE_W)) == 0) {
 268       return -1;
 269     }
 270   } else if(n < 0){
 271     sz = uvmdealloc(p->pagetable, sz, sz + n);
 272   }
 273   p->sz = sz;
 274   return 0;
 275 }
 276 
 277 // Create a new process, copying the parent.
 278 // Sets up child kernel stack to return as if from fork() system call.
 279 int
 280 fork(void)
 281 {
 282   int i, pid;
 283   struct proc *np;
 284   struct proc *p = myproc();
 285 
 286   // Allocate process.
 287   if((np = allocproc()) == 0){
 288     return -1;
 289   }
 290 
 291   // Copy user memory from parent to child.
 292   if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){
 293     freeproc(np);
 294     release(&np->lock);
 295     return -1;
 296   }
 297   np->sz = p->sz;
 298 
 299   // copy saved user registers.
 300   *(np->trapframe) = *(p->trapframe);
 301 
 302   // Cause fork to return 0 in the child.
 303   np->trapframe->a0 = 0;
 304 
 305   // increment reference counts on open file descriptors.
 306   for(i = 0; i < NOFILE; i++)
 307     if(p->ofile[i])
 308       np->ofile[i] = filedup(p->ofile[i]);
 309   np->cwd = idup(p->cwd);
 310 
 311   safestrcpy(np->name, p->name, sizeof(p->name));
 312 
 313   pid = np->pid;
 314 
 315   release(&np->lock);
 316 
 317   acquire(&wait_lock);
 318   np->parent = p;
 319   release(&wait_lock);
 320 
 321   acquire(&np->lock);
 322   np->state = RUNNABLE;
 323   release(&np->lock);
 324 
 325   return pid;
 326 }
 327 
 328 // Pass p's abandoned children to init.
 329 // Caller must hold wait_lock.
 330 void
 331 reparent(struct proc *p)
 332 {
 333   struct proc *pp;
 334 
 335   for(pp = proc; pp < &proc[NPROC]; pp++){
 336     if(pp->parent == p){
 337       pp->parent = initproc;
 338       wakeup(initproc);
 339     }
 340   }
 341 }
 342 
 343 // Exit the current process.  Does not return.
 344 // An exited process remains in the zombie state
 345 // until its parent calls wait().
 346 void
 347 exit(int status)
 348 {
 349   struct proc *p = myproc();
 350 
 351   if(p == initproc)
 352     panic("init exiting");
 353 
 354   // Close all open files.
 355   for(int fd = 0; fd < NOFILE; fd++){
 356     if(p->ofile[fd]){
 357       struct file *f = p->ofile[fd];
 358       fileclose(f);
 359       p->ofile[fd] = 0;
 360     }
 361   }
 362 
 363   begin_op();
 364   iput(p->cwd);
 365   end_op();
 366   p->cwd = 0;
 367 
 368   acquire(&wait_lock);
 369 
 370   // Give any children to init.
 371   reparent(p);
 372 
 373   // Parent might be sleeping in wait().
 374   wakeup(p->parent);
 375   
 376   acquire(&p->lock);
 377 
 378   p->xstate = status;
 379   p->state = ZOMBIE;
 380 
 381   release(&wait_lock);
 382 
 383   // Jump into the scheduler, never to return.
 384   sched();
 385   panic("zombie exit");
 386 }
 387 
 388 // Wait for a child process to exit and return its pid.
 389 // Return -1 if this process has no children.
 390 int
 391 wait(uint64 addr)
 392 {
 393   struct proc *pp;
 394   int havekids, pid;
 395   struct proc *p = myproc();
 396 
 397   acquire(&wait_lock);
 398 
 399   for(;;){
 400     // Scan through table looking for exited children.
 401     havekids = 0;
 402     for(pp = proc; pp < &proc[NPROC]; pp++){
 403       if(pp->parent == p){
 404         // make sure the child isn't still in exit() or swtch().
 405         acquire(&pp->lock);
 406 
 407         havekids = 1;
 408         if(pp->state == ZOMBIE){
 409           // Found one.
 410           pid = pp->pid;
 411           if(addr != 0 && copyout(p->pagetable, addr, (char *)&pp->xstate,
 412                                   sizeof(pp->xstate)) < 0) {
 413             release(&pp->lock);
 414             release(&wait_lock);
 415             return -1;
 416           }
 417           freeproc(pp);
 418           release(&pp->lock);
 419           release(&wait_lock);
 420           return pid;
 421         }
 422         release(&pp->lock);
 423       }
 424     }
 425 
 426     // No point waiting if we don't have any children.
 427     if(!havekids || killed(p)){
 428       release(&wait_lock);
 429       return -1;
 430     }
 431     
 432     // Wait for a child to exit.
 433     sleep(p, &wait_lock);  //DOC: wait-sleep
 434   }
 435 }
 436 
 437 // Per-CPU process scheduler.
 438 // Each CPU calls scheduler() after setting itself up.
 439 // Scheduler never returns.  It loops, doing:
 440 //  - choose a process to run.
 441 //  - swtch to start running that process.
 442 //  - eventually that process transfers control
 443 //    via swtch back to the scheduler.
 444 void
 445 scheduler(void)
 446 {
 447   struct proc *p;
 448   struct cpu *c = mycpu();
 449   
 450   c->proc = 0;
 451   for(;;){
 452     // Avoid deadlock by ensuring that devices can interrupt.
 453     intr_on();
 454 
 455     for(p = proc; p < &proc[NPROC]; p++) {
 456       acquire(&p->lock);
 457       if(p->state == RUNNABLE) {
 458         // Switch to chosen process.  It is the process's job
 459         // to release its lock and then reacquire it
 460         // before jumping back to us.
 461         p->state = RUNNING;
 462         c->proc = p;
 463         swtch(&c->context, &p->context);
 464 
 465         // Process is done running for now.
 466         // It should have changed its p->state before coming back.
 467         c->proc = 0;
 468       }
 469       release(&p->lock);
 470     }
 471   }
 472 }
 473 
 474 // Switch to scheduler.  Must hold only p->lock
 475 // and have changed proc->state. Saves and restores
 476 // intena because intena is a property of this
 477 // kernel thread, not this CPU. It should
 478 // be proc->intena and proc->noff, but that would
 479 // break in the few places where a lock is held but
 480 // there's no process.
 481 void
 482 sched(void)
 483 {
 484   int intena;
 485   struct proc *p = myproc();
 486 
 487   if(!holding(&p->lock))
 488     panic("sched p->lock");
 489   if(mycpu()->noff != 1)
 490     panic("sched locks");
 491   if(p->state == RUNNING)
 492     panic("sched running");
 493   if(intr_get())
 494     panic("sched interruptible");
 495 
 496   intena = mycpu()->intena;
 497   swtch(&p->context, &mycpu()->context);
 498   mycpu()->intena = intena;
 499 }
 500 
 501 // Give up the CPU for one scheduling round.
 502 void
 503 yield(void)
 504 {
 505   struct proc *p = myproc();
 506   acquire(&p->lock);
 507   p->state = RUNNABLE;
 508   sched();
 509   release(&p->lock);
 510 }
 511 
 512 // A fork child's very first scheduling by scheduler()
 513 // will swtch to forkret.
 514 void
 515 forkret(void)
 516 {
 517   static int first = 1;
 518 
 519   // Still holding p->lock from scheduler.
 520   release(&myproc()->lock);
 521 
 522   if (first) {
 523     // File system initialization must be run in the context of a
 524     // regular process (e.g., because it calls sleep), and thus cannot
 525     // be run from main().
 526     first = 0;
 527     fsinit(ROOTDEV);
 528   }
 529 
 530   usertrapret();
 531 }
 532 
 533 // Atomically release lock and sleep on chan.
 534 // Reacquires lock when awakened.
 535 void
 536 sleep(void *chan, struct spinlock *lk)
 537 {
 538   struct proc *p = myproc();
 539   
 540   // Must acquire p->lock in order to
 541   // change p->state and then call sched.
 542   // Once we hold p->lock, we can be
 543   // guaranteed that we won't miss any wakeup
 544   // (wakeup locks p->lock),
 545   // so it's okay to release lk.
 546 
 547   acquire(&p->lock);  //DOC: sleeplock1
 548   release(lk);
 549 
 550   // Go to sleep.
 551   p->chan = chan;
 552   p->state = SLEEPING;
 553 
 554   sched();
 555 
 556   // Tidy up.
 557   p->chan = 0;
 558 
 559   // Reacquire original lock.
 560   release(&p->lock);
 561   acquire(lk);
 562 }
 563 
 564 // Wake up all processes sleeping on chan.
 565 // Must be called without any p->lock.
 566 void
 567 wakeup(void *chan)
 568 {
 569   struct proc *p;
 570 
 571   for(p = proc; p < &proc[NPROC]; p++) {
 572     if(p != myproc()){
 573       acquire(&p->lock);
 574       if(p->state == SLEEPING && p->chan == chan) {
 575         p->state = RUNNABLE;
 576       }
 577       release(&p->lock);
 578     }
 579   }
 580 }
 581 
 582 // Kill the process with the given pid.
 583 // The victim won't exit until it tries to return
 584 // to user space (see usertrap() in trap.c).
 585 int
 586 kill(int pid)
 587 {
 588   struct proc *p;
 589 
 590   for(p = proc; p < &proc[NPROC]; p++){
 591     acquire(&p->lock);
 592     if(p->pid == pid){
 593       p->killed = 1;
 594       if(p->state == SLEEPING){
 595         // Wake process from sleep().
 596         p->state = RUNNABLE;
 597       }
 598       release(&p->lock);
 599       return 0;
 600     }
 601     release(&p->lock);
 602   }
 603   return -1;
 604 }
 605 
 606 void
 607 setkilled(struct proc *p)
 608 {
 609   acquire(&p->lock);
 610   p->killed = 1;
 611   release(&p->lock);
 612 }
 613 
 614 int
 615 killed(struct proc *p)
 616 {
 617   int k;
 618   
 619   acquire(&p->lock);
 620   k = p->killed;
 621   release(&p->lock);
 622   return k;
 623 }
 624 
 625 // Copy to either a user address, or kernel address,
 626 // depending on usr_dst.
 627 // Returns 0 on success, -1 on error.
 628 int
 629 either_copyout(int user_dst, uint64 dst, void *src, uint64 len)
 630 {
 631   struct proc *p = myproc();
 632   if(user_dst){
 633     return copyout(p->pagetable, dst, src, len);
 634   } else {
 635     memmove((char *)dst, src, len);
 636     return 0;
 637   }
 638 }
 639 
 640 // Copy from either a user address, or kernel address,
 641 // depending on usr_src.
 642 // Returns 0 on success, -1 on error.
 643 int
 644 either_copyin(void *dst, int user_src, uint64 src, uint64 len)
 645 {
 646   struct proc *p = myproc();
 647   if(user_src){
 648     return copyin(p->pagetable, dst, src, len);
 649   } else {
 650     memmove(dst, (char*)src, len);
 651     return 0;
 652   }
 653 }
 654 
 655 // Print a process listing to console.  For debugging.
 656 // Runs when user types ^P on console.
 657 // No lock to avoid wedging a stuck machine further.
 658 void
 659 procdump(void)
 660 {
 661   static char *states[] = {
 662   [UNUSED]    "unused",
 663   [USED]      "used",
 664   [SLEEPING]  "sleep ",
 665   [RUNNABLE]  "runble",
 666   [RUNNING]   "run   ",
 667   [ZOMBIE]    "zombie"
 668   };
 669   struct proc *p;
 670   char *state;
 671 
 672   printf("\n");
 673   for(p = proc; p < &proc[NPROC]; p++){
 674     if(p->state == UNUSED)
 675       continue;
 676     if(p->state >= 0 && p->state < NELEM(states) && states[p->state])
 677       state = states[p->state];
 678     else
 679       state = "???";
 680     printf("%d %s %s", p->pid, state, p->name);
 681     printf("\n");
 682   }
 683 }

/* [<][>][^][v][top][bottom][index][help] */