log                48 kernel/log.c   struct log log;
log                59 kernel/log.c     initlock(&log.lock, "log");
log                60 kernel/log.c     log.start = sb->logstart;
log                61 kernel/log.c     log.dev = dev;
log                71 kernel/log.c     for (tail = 0; tail < log.lh.n; tail++) {
log                73 kernel/log.c         printf("recovering tail %d dst %d\n", tail, log.lh.block[tail]);
log                75 kernel/log.c       struct buf *lbuf = bread(log.dev, log.start+tail+1); // read log block
log                76 kernel/log.c       struct buf *dbuf = bread(log.dev, log.lh.block[tail]); // read dst
log                90 kernel/log.c     struct buf *buf = bread(log.dev, log.start);
log                93 kernel/log.c     log.lh.n = lh->n;
log                94 kernel/log.c     for (i = 0; i < log.lh.n; i++) {
log                95 kernel/log.c       log.lh.block[i] = lh->block[i];
log               106 kernel/log.c     struct buf *buf = bread(log.dev, log.start);
log               109 kernel/log.c     hb->n = log.lh.n;
log               110 kernel/log.c     for (i = 0; i < log.lh.n; i++) {
log               111 kernel/log.c       hb->block[i] = log.lh.block[i];
log               122 kernel/log.c     log.lh.n = 0;
log               130 kernel/log.c     acquire(&log.lock);
log               132 kernel/log.c       if(log.committing){
log               133 kernel/log.c         sleep(&log, &log.lock);
log               134 kernel/log.c       } else if(log.lh.n + (log.outstanding+1)*MAXOPBLOCKS > LOGBLOCKS){
log               136 kernel/log.c         sleep(&log, &log.lock);
log               138 kernel/log.c         log.outstanding += 1;
log               139 kernel/log.c         release(&log.lock);
log               152 kernel/log.c     acquire(&log.lock);
log               153 kernel/log.c     log.outstanding -= 1;
log               154 kernel/log.c     if(log.committing)
log               156 kernel/log.c     if(log.outstanding == 0){
log               158 kernel/log.c       log.committing = 1;
log               163 kernel/log.c       wakeup(&log);
log               165 kernel/log.c     release(&log.lock);
log               171 kernel/log.c       acquire(&log.lock);
log               172 kernel/log.c       log.committing = 0;
log               173 kernel/log.c       wakeup(&log);
log               174 kernel/log.c       release(&log.lock);
log               184 kernel/log.c     for (tail = 0; tail < log.lh.n; tail++) {
log               185 kernel/log.c       struct buf *to = bread(log.dev, log.start+tail+1); // log block
log               186 kernel/log.c       struct buf *from = bread(log.dev, log.lh.block[tail]); // cache block
log               197 kernel/log.c     if (log.lh.n > 0) {
log               201 kernel/log.c       log.lh.n = 0;
log               220 kernel/log.c     acquire(&log.lock);
log               221 kernel/log.c     if (log.lh.n >= LOGBLOCKS)
log               223 kernel/log.c     if (log.outstanding < 1)
log               226 kernel/log.c     for (i = 0; i < log.lh.n; i++) {
log               227 kernel/log.c       if (log.lh.block[i] == b->blockno)   // log absorption
log               230 kernel/log.c     log.lh.block[i] = b->blockno;
log               231 kernel/log.c     if (i == log.lh.n) {  // Add new block to log?
log               233 kernel/log.c       log.lh.n++;
log               235 kernel/log.c     release(&log.lock);