va 155 kernel/exec.c loadseg(pagetable_t pagetable, uint64 va, struct inode *ip, uint offset, uint sz) va 161 kernel/exec.c pa = walkaddr(pagetable, va + i); va 41 kernel/proc.c uint64 va = KSTACK((int) (p - proc)); va 42 kernel/proc.c kvmmap(kpgtbl, va, (uint64)pa, PGSIZE, PTE_R | PTE_W); va 374 kernel/riscv.h #define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK) va 58 kernel/vm.c kvmmap(pagetable_t kpgtbl, uint64 va, uint64 pa, uint64 sz, int perm) va 60 kernel/vm.c if(mappages(kpgtbl, va, sz, pa, perm) != 0) va 98 kernel/vm.c walk(pagetable_t pagetable, uint64 va, int alloc) va 100 kernel/vm.c if(va >= MAXVA) va 104 kernel/vm.c pte_t *pte = &pagetable[PX(level, va)]; va 114 kernel/vm.c return &pagetable[PX(0, va)]; va 121 kernel/vm.c walkaddr(pagetable_t pagetable, uint64 va) va 126 kernel/vm.c if(va >= MAXVA) va 129 kernel/vm.c pte = walk(pagetable, va, 0); va 146 kernel/vm.c mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm) va 151 kernel/vm.c if((va % PGSIZE) != 0) va 160 kernel/vm.c a = va; va 161 kernel/vm.c last = va + size - PGSIZE; va 193 kernel/vm.c uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free) va 198 kernel/vm.c if((va % PGSIZE) != 0) va 201 kernel/vm.c for(a = va; a < va + npages*PGSIZE; a += PGSIZE){ va 329 kernel/vm.c uvmclear(pagetable_t pagetable, uint64 va) va 333 kernel/vm.c pte = walk(pagetable, va, 0); va 453 kernel/vm.c vmfault(pagetable_t pagetable, uint64 va, int read) va 458 kernel/vm.c if (va >= p->sz) va 460 kernel/vm.c va = PGROUNDDOWN(va); va 461 kernel/vm.c if(ismapped(pagetable, va)) { va 468 kernel/vm.c if (mappages(p->pagetable, va, PGSIZE, mem, PTE_W|PTE_U|PTE_R) != 0) { va 476 kernel/vm.c ismapped(pagetable_t pagetable, uint64 va) va 478 kernel/vm.c pte_t *pte = walk(pagetable, va, 0);