root/kernel/riscv.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. r_mhartid
  2. r_mstatus
  3. w_mstatus
  4. w_mepc
  5. r_sstatus
  6. w_sstatus
  7. r_sip
  8. w_sip
  9. r_sie
  10. w_sie
  11. r_mie
  12. w_mie
  13. w_sepc
  14. r_sepc
  15. r_medeleg
  16. w_medeleg
  17. r_mideleg
  18. w_mideleg
  19. w_stvec
  20. r_stvec
  21. w_mtvec
  22. w_pmpcfg0
  23. w_pmpaddr0
  24. w_satp
  25. r_satp
  26. w_mscratch
  27. r_scause
  28. r_stval
  29. w_mcounteren
  30. r_mcounteren
  31. r_time
  32. intr_on
  33. intr_off
  34. intr_get
  35. r_sp
  36. r_tp
  37. w_tp
  38. r_ra
  39. sfence_vma

   1 #ifndef __ASSEMBLER__
   2 
   3 // which hart (core) is this?
   4 static inline uint64
   5 r_mhartid()
   6 {
   7   uint64 x;
   8   asm volatile("csrr %0, mhartid" : "=r" (x) );
   9   return x;
  10 }
  11 
  12 // Machine Status Register, mstatus
  13 
  14 #define MSTATUS_MPP_MASK (3L << 11) // previous mode.
  15 #define MSTATUS_MPP_M (3L << 11)
  16 #define MSTATUS_MPP_S (1L << 11)
  17 #define MSTATUS_MPP_U (0L << 11)
  18 #define MSTATUS_MIE (1L << 3)    // machine-mode interrupt enable.
  19 
  20 static inline uint64
  21 r_mstatus()
  22 {
  23   uint64 x;
  24   asm volatile("csrr %0, mstatus" : "=r" (x) );
  25   return x;
  26 }
  27 
  28 static inline void 
  29 w_mstatus(uint64 x)
  30 {
  31   asm volatile("csrw mstatus, %0" : : "r" (x));
  32 }
  33 
  34 // machine exception program counter, holds the
  35 // instruction address to which a return from
  36 // exception will go.
  37 static inline void 
  38 w_mepc(uint64 x)
  39 {
  40   asm volatile("csrw mepc, %0" : : "r" (x));
  41 }
  42 
  43 // Supervisor Status Register, sstatus
  44 
  45 #define SSTATUS_SPP (1L << 8)  // Previous mode, 1=Supervisor, 0=User
  46 #define SSTATUS_SPIE (1L << 5) // Supervisor Previous Interrupt Enable
  47 #define SSTATUS_UPIE (1L << 4) // User Previous Interrupt Enable
  48 #define SSTATUS_SIE (1L << 1)  // Supervisor Interrupt Enable
  49 #define SSTATUS_UIE (1L << 0)  // User Interrupt Enable
  50 
  51 static inline uint64
  52 r_sstatus()
  53 {
  54   uint64 x;
  55   asm volatile("csrr %0, sstatus" : "=r" (x) );
  56   return x;
  57 }
  58 
  59 static inline void 
  60 w_sstatus(uint64 x)
  61 {
  62   asm volatile("csrw sstatus, %0" : : "r" (x));
  63 }
  64 
  65 // Supervisor Interrupt Pending
  66 static inline uint64
  67 r_sip()
  68 {
  69   uint64 x;
  70   asm volatile("csrr %0, sip" : "=r" (x) );
  71   return x;
  72 }
  73 
  74 static inline void 
  75 w_sip(uint64 x)
  76 {
  77   asm volatile("csrw sip, %0" : : "r" (x));
  78 }
  79 
  80 // Supervisor Interrupt Enable
  81 #define SIE_SEIE (1L << 9) // external
  82 #define SIE_STIE (1L << 5) // timer
  83 #define SIE_SSIE (1L << 1) // software
  84 static inline uint64
  85 r_sie()
  86 {
  87   uint64 x;
  88   asm volatile("csrr %0, sie" : "=r" (x) );
  89   return x;
  90 }
  91 
  92 static inline void 
  93 w_sie(uint64 x)
  94 {
  95   asm volatile("csrw sie, %0" : : "r" (x));
  96 }
  97 
  98 // Machine-mode Interrupt Enable
  99 #define MIE_MEIE (1L << 11) // external
 100 #define MIE_MTIE (1L << 7)  // timer
 101 #define MIE_MSIE (1L << 3)  // software
 102 static inline uint64
 103 r_mie()
 104 {
 105   uint64 x;
 106   asm volatile("csrr %0, mie" : "=r" (x) );
 107   return x;
 108 }
 109 
 110 static inline void 
 111 w_mie(uint64 x)
 112 {
 113   asm volatile("csrw mie, %0" : : "r" (x));
 114 }
 115 
 116 // supervisor exception program counter, holds the
 117 // instruction address to which a return from
 118 // exception will go.
 119 static inline void 
 120 w_sepc(uint64 x)
 121 {
 122   asm volatile("csrw sepc, %0" : : "r" (x));
 123 }
 124 
 125 static inline uint64
 126 r_sepc()
 127 {
 128   uint64 x;
 129   asm volatile("csrr %0, sepc" : "=r" (x) );
 130   return x;
 131 }
 132 
 133 // Machine Exception Delegation
 134 static inline uint64
 135 r_medeleg()
 136 {
 137   uint64 x;
 138   asm volatile("csrr %0, medeleg" : "=r" (x) );
 139   return x;
 140 }
 141 
 142 static inline void 
 143 w_medeleg(uint64 x)
 144 {
 145   asm volatile("csrw medeleg, %0" : : "r" (x));
 146 }
 147 
 148 // Machine Interrupt Delegation
 149 static inline uint64
 150 r_mideleg()
 151 {
 152   uint64 x;
 153   asm volatile("csrr %0, mideleg" : "=r" (x) );
 154   return x;
 155 }
 156 
 157 static inline void 
 158 w_mideleg(uint64 x)
 159 {
 160   asm volatile("csrw mideleg, %0" : : "r" (x));
 161 }
 162 
 163 // Supervisor Trap-Vector Base Address
 164 // low two bits are mode.
 165 static inline void 
 166 w_stvec(uint64 x)
 167 {
 168   asm volatile("csrw stvec, %0" : : "r" (x));
 169 }
 170 
 171 static inline uint64
 172 r_stvec()
 173 {
 174   uint64 x;
 175   asm volatile("csrr %0, stvec" : "=r" (x) );
 176   return x;
 177 }
 178 
 179 // Machine-mode interrupt vector
 180 static inline void 
 181 w_mtvec(uint64 x)
 182 {
 183   asm volatile("csrw mtvec, %0" : : "r" (x));
 184 }
 185 
 186 // Physical Memory Protection
 187 static inline void
 188 w_pmpcfg0(uint64 x)
 189 {
 190   asm volatile("csrw pmpcfg0, %0" : : "r" (x));
 191 }
 192 
 193 static inline void
 194 w_pmpaddr0(uint64 x)
 195 {
 196   asm volatile("csrw pmpaddr0, %0" : : "r" (x));
 197 }
 198 
 199 // use riscv's sv39 page table scheme.
 200 #define SATP_SV39 (8L << 60)
 201 
 202 #define MAKE_SATP(pagetable) (SATP_SV39 | (((uint64)pagetable) >> 12))
 203 
 204 // supervisor address translation and protection;
 205 // holds the address of the page table.
 206 static inline void 
 207 w_satp(uint64 x)
 208 {
 209   asm volatile("csrw satp, %0" : : "r" (x));
 210 }
 211 
 212 static inline uint64
 213 r_satp()
 214 {
 215   uint64 x;
 216   asm volatile("csrr %0, satp" : "=r" (x) );
 217   return x;
 218 }
 219 
 220 static inline void 
 221 w_mscratch(uint64 x)
 222 {
 223   asm volatile("csrw mscratch, %0" : : "r" (x));
 224 }
 225 
 226 // Supervisor Trap Cause
 227 static inline uint64
 228 r_scause()
 229 {
 230   uint64 x;
 231   asm volatile("csrr %0, scause" : "=r" (x) );
 232   return x;
 233 }
 234 
 235 // Supervisor Trap Value
 236 static inline uint64
 237 r_stval()
 238 {
 239   uint64 x;
 240   asm volatile("csrr %0, stval" : "=r" (x) );
 241   return x;
 242 }
 243 
 244 // Machine-mode Counter-Enable
 245 static inline void 
 246 w_mcounteren(uint64 x)
 247 {
 248   asm volatile("csrw mcounteren, %0" : : "r" (x));
 249 }
 250 
 251 static inline uint64
 252 r_mcounteren()
 253 {
 254   uint64 x;
 255   asm volatile("csrr %0, mcounteren" : "=r" (x) );
 256   return x;
 257 }
 258 
 259 // machine-mode cycle counter
 260 static inline uint64
 261 r_time()
 262 {
 263   uint64 x;
 264   asm volatile("csrr %0, time" : "=r" (x) );
 265   return x;
 266 }
 267 
 268 // enable device interrupts
 269 static inline void
 270 intr_on()
 271 {
 272   w_sstatus(r_sstatus() | SSTATUS_SIE);
 273 }
 274 
 275 // disable device interrupts
 276 static inline void
 277 intr_off()
 278 {
 279   w_sstatus(r_sstatus() & ~SSTATUS_SIE);
 280 }
 281 
 282 // are device interrupts enabled?
 283 static inline int
 284 intr_get()
 285 {
 286   uint64 x = r_sstatus();
 287   return (x & SSTATUS_SIE) != 0;
 288 }
 289 
 290 static inline uint64
 291 r_sp()
 292 {
 293   uint64 x;
 294   asm volatile("mv %0, sp" : "=r" (x) );
 295   return x;
 296 }
 297 
 298 // read and write tp, the thread pointer, which xv6 uses to hold
 299 // this core's hartid (core number), the index into cpus[].
 300 static inline uint64
 301 r_tp()
 302 {
 303   uint64 x;
 304   asm volatile("mv %0, tp" : "=r" (x) );
 305   return x;
 306 }
 307 
 308 static inline void 
 309 w_tp(uint64 x)
 310 {
 311   asm volatile("mv tp, %0" : : "r" (x));
 312 }
 313 
 314 static inline uint64
 315 r_ra()
 316 {
 317   uint64 x;
 318   asm volatile("mv %0, ra" : "=r" (x) );
 319   return x;
 320 }
 321 
 322 // flush the TLB.
 323 static inline void
 324 sfence_vma()
 325 {
 326   // the zero, zero means flush all TLB entries.
 327   asm volatile("sfence.vma zero, zero");
 328 }
 329 
 330 typedef uint64 pte_t;
 331 typedef uint64 *pagetable_t; // 512 PTEs
 332 
 333 #endif // __ASSEMBLER__
 334 
 335 #define PGSIZE 4096 // bytes per page
 336 #define PGSHIFT 12  // bits of offset within a page
 337 
 338 #define PGROUNDUP(sz)  (((sz)+PGSIZE-1) & ~(PGSIZE-1))
 339 #define PGROUNDDOWN(a) (((a)) & ~(PGSIZE-1))
 340 
 341 #define PTE_V (1L << 0) // valid
 342 #define PTE_R (1L << 1)
 343 #define PTE_W (1L << 2)
 344 #define PTE_X (1L << 3)
 345 #define PTE_U (1L << 4) // user can access
 346 
 347 // shift a physical address to the right place for a PTE.
 348 #define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
 349 
 350 #define PTE2PA(pte) (((pte) >> 10) << 12)
 351 
 352 #define PTE_FLAGS(pte) ((pte) & 0x3FF)
 353 
 354 // extract the three 9-bit page table indices from a virtual address.
 355 #define PXMASK          0x1FF // 9 bits
 356 #define PXSHIFT(level)  (PGSHIFT+(9*(level)))
 357 #define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK)
 358 
 359 // one beyond the highest possible virtual address.
 360 // MAXVA is actually one bit less than the max allowed by
 361 // Sv39, to avoid having to sign-extend virtual addresses
 362 // that have the high bit set.
 363 #define MAXVA (1L << (9 + 9 + 9 + 12 - 1))

/* [<][>][^][v][top][bottom][index][help] */