root/kernel/spinlock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. initlock
  2. acquire
  3. release
  4. holding
  5. push_off
  6. pop_off

   1 // Mutual exclusion spin locks.
   2 
   3 #include "types.h"
   4 #include "param.h"
   5 #include "memlayout.h"
   6 #include "spinlock.h"
   7 #include "riscv.h"
   8 #include "proc.h"
   9 #include "defs.h"
  10 
  11 void
  12 initlock(struct spinlock *lk, char *name)
  13 {
  14   lk->name = name;
  15   lk->locked = 0;
  16   lk->cpu = 0;
  17 }
  18 
  19 // Acquire the lock.
  20 // Loops (spins) until the lock is acquired.
  21 void
  22 acquire(struct spinlock *lk)
  23 {
  24   push_off(); // disable interrupts to avoid deadlock.
  25   if(holding(lk))
  26     panic("acquire");
  27 
  28   // On RISC-V, sync_lock_test_and_set turns into an atomic swap:
  29   //   a5 = 1
  30   //   s1 = &lk->locked
  31   //   amoswap.w.aq a5, a5, (s1)
  32   while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
  33     ;
  34 
  35   // Tell the C compiler and the processor to not move loads or stores
  36   // past this point, to ensure that the critical section's memory
  37   // references happen strictly after the lock is acquired.
  38   // On RISC-V, this emits a fence instruction.
  39   __sync_synchronize();
  40 
  41   // Record info about lock acquisition for holding() and debugging.
  42   lk->cpu = mycpu();
  43 }
  44 
  45 // Release the lock.
  46 void
  47 release(struct spinlock *lk)
  48 {
  49   if(!holding(lk))
  50     panic("release");
  51 
  52   lk->cpu = 0;
  53 
  54   // Tell the C compiler and the CPU to not move loads or stores
  55   // past this point, to ensure that all the stores in the critical
  56   // section are visible to other CPUs before the lock is released,
  57   // and that loads in the critical section occur strictly before
  58   // the lock is released.
  59   // On RISC-V, this emits a fence instruction.
  60   __sync_synchronize();
  61 
  62   // Release the lock, equivalent to lk->locked = 0.
  63   // This code doesn't use a C assignment, since the C standard
  64   // implies that an assignment might be implemented with
  65   // multiple store instructions.
  66   // On RISC-V, sync_lock_release turns into an atomic swap:
  67   //   s1 = &lk->locked
  68   //   amoswap.w zero, zero, (s1)
  69   __sync_lock_release(&lk->locked);
  70 
  71   pop_off();
  72 }
  73 
  74 // Check whether this cpu is holding the lock.
  75 // Interrupts must be off.
  76 int
  77 holding(struct spinlock *lk)
  78 {
  79   int r;
  80   r = (lk->locked && lk->cpu == mycpu());
  81   return r;
  82 }
  83 
  84 // push_off/pop_off are like intr_off()/intr_on() except that they are matched:
  85 // it takes two pop_off()s to undo two push_off()s.  Also, if interrupts
  86 // are initially off, then push_off, pop_off leaves them off.
  87 
  88 void
  89 push_off(void)
  90 {
  91   int old = intr_get();
  92 
  93   intr_off();
  94   if(mycpu()->noff == 0)
  95     mycpu()->intena = old;
  96   mycpu()->noff += 1;
  97 }
  98 
  99 void
 100 pop_off(void)
 101 {
 102   struct cpu *c = mycpu();
 103   if(intr_get())
 104     panic("pop_off - interruptible");
 105   if(c->noff < 1)
 106     panic("pop_off");
 107   c->noff -= 1;
 108   if(c->noff == 0 && c->intena)
 109     intr_on();
 110 }

/* [<][>][^][v][top][bottom][index][help] */