def _after(self, t, rv): try: state.check_sane() rv = self._after1(t, rv) state.commit() finally: self._after2(rv)
def _start_unlocked(self, dirty): # out-of-band redo of some sub-objects. This happens when we're not # quite sure if t needs to be built or not (because some children # look dirty, but might turn out to be clean thanks to checksums). # We have to call redo-unlocked to figure it all out. # # Note: redo-unlocked will handle all the updating of sf, so we # don't have to do it here, nor call _after1. However, we have to # hold onto the lock because otherwise we would introduce a race # condition; that's why it's called redo-unlocked, because it doesn't # grab a lock. argv = ['redo-unlocked', self.sf.name] + [d.name for d in dirty] log('(%s)\n' % _nice(self.t)) state.commit() def run(): os.chdir(vars.BASE) os.environ['REDO_DEPTH'] = vars.DEPTH + ' ' signal.signal(signal.SIGPIPE, signal.SIG_DFL) # python ignores SIGPIPE os.execvp(argv[0], argv) assert (0) # returns only if there's an exception def after(t, rv): return self._after2(rv) jwack.start_job(self.t, run, after)
def _start_unlocked(self, dirty): # out-of-band redo of some sub-objects. This happens when we're not # quite sure if t needs to be built or not (because some children # look dirty, but might turn out to be clean thanks to checksums). # We have to call redo-unlocked to figure it all out. # # Note: redo-unlocked will handle all the updating of sf, so we # don't have to do it here, nor call _after1. However, we have to # hold onto the lock because otherwise we would introduce a race # condition; that's why it's called redo-unlocked, because it doesn't # grab a lock. argv = ['redo-unlocked', self.sf.name] + [d.name for d in dirty] log('(%s)\n' % _nice(self.t)) state.commit() def run(): os.chdir(vars.BASE) os.environ['REDO_DEPTH'] = vars.DEPTH + ' ' os.execvp(argv[0], argv) assert (0) # returns only if there's an exception def after(t, rv): return self._after2(rv) jwack.start_job(self.t, run, after)
def _start_oob(self, dirty): # out-of-band redo of some sub-objects. This happens when we're not # quite sure if t needs to be built or not (because some children look # dirty, but might turn out to be clean thanks to checksums). We have # to call redo-oob to figure it all out. # # Note: redo-oob will handle all the updating of sf, so we don't have # to do it here, nor call _after1. argv = ['redo-oob', self.sf.name] + [d.name for d in dirty] log('(%s)\n' % _nice(self.t)) state.commit() def run(): os.chdir(vars.BASE) os.environ['REDO_DEPTH'] = vars.DEPTH + ' ' os.execvp(argv[0], argv) assert(0) # returns only if there's an exception def after(t, rv): return self._after2(rv) jwack.start_job(self.t, run, after)
def _start_do(self): assert(self.lock.owned) t = self.t sf = self.sf newstamp = sf.read_stamp() if (sf.is_generated and not sf.failed_runid and newstamp != state.STAMP_MISSING and (sf.stamp != newstamp or sf.is_override)): state.warn_override(_nice(t)) sf.set_override() sf.set_checked() sf.save() return self._after2(0) if (os.path.exists(t) and not os.path.exists(t + '/.') and not sf.is_generated): # an existing source file that was not generated by us. # This step is mentioned by djb in his notes. # For example, a rule called default.c.do could be used to try # to produce hello.c, but we don't want that to happen if # hello.c was created by the end user. # FIXME: always refuse to redo any file that was modified outside # of redo? That would make it easy for someone to override a # file temporarily, and could be undone by deleting the file. debug2("-- static (%r)\n" % t) sf.set_static() sf.save() return self._after2(0) sf.zap_deps1() (dodir, dofile, basedir, basename, ext) = _find_do_file(sf) if not dofile: if os.path.exists(t): sf.set_static() sf.save() return self._after2(0) else: err('no rule to make %r\n' % t) return self._after2(1) unlink(self.tmpname1) unlink(self.tmpname2) ffd = os.open(self.tmpname1, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666) close_on_exec(ffd, True) self.f = os.fdopen(ffd, 'w+') # this will run in the dofile's directory, so use only basenames here argv = ['sh', '-e', dofile, basename, # target name (no extension) ext, # extension (if any), including leading dot os.path.join(basedir, os.path.basename(self.tmpname2)) # temp output file name ] if vars.VERBOSE: argv[1] += 'v' if vars.XTRACE: argv[1] += 'x' if vars.VERBOSE or vars.XTRACE: log_('\n') firstline = open(os.path.join(dodir, dofile)).readline().strip() if firstline.startswith('#!/'): argv[0:2] = firstline[2:].split(' ') log('%s\n' % _nice(t)) self.dodir = dodir self.basename = basename self.ext = ext self.argv = argv sf.is_generated = True sf.save() dof = state.File(name=os.path.join(dodir, dofile)) dof.set_static() dof.save() state.commit() jwack.start_job(t, self._do_subproc, self._after)
def main(targets, shouldbuildfunc): retcode = [0] # a list so that it can be reassigned from done() if vars.SHUFFLE: import random random.shuffle(targets) locked = [] def done(t, rv): if rv: retcode[0] = 1 # In the first cycle, we just build as much as we can without worrying # about any lock contention. If someone else has it locked, we move on. seen = {} for t in targets: if t in seen: continue seen[t] = 1 if not jwack.has_token(): state.commit() jwack.get_token(t) if retcode[0] and not vars.KEEP_GOING: break if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break f = state.File(name=t) lock = state.Lock(f.id) if vars.UNLOCKED: lock.owned = True else: lock.trylock() if not lock.owned: if vars.DEBUG_LOCKS: log('%s (locked...)\n' % _nice(t)) locked.append((f.id,t)) else: BuildJob(t, f, lock, shouldbuildfunc, done).start() del lock # Now we've built all the "easy" ones. Go back and just wait on the # remaining ones one by one. There's no reason to do it any more # efficiently, because if these targets were previously locked, that # means someone else was building them; thus, we probably won't need to # do anything. The only exception is if we're invoked as redo instead # of redo-ifchange; then we have to redo it even if someone else already # did. But that should be rare. while locked or jwack.running(): state.commit() jwack.wait_all() # at this point, we don't have any children holding any tokens, so # it's okay to block below. if retcode[0] and not vars.KEEP_GOING: break if locked: if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break fid,t = locked.pop(0) lock = state.Lock(fid) lock.trylock() while not lock.owned: if vars.DEBUG_LOCKS: warn('%s (WAITING)\n' % _nice(t)) # this sequence looks a little silly, but the idea is to # give up our personal token while we wait for the lock to # be released; but we should never run get_token() while # holding a lock, or we could cause deadlocks. jwack.release_mine() lock.waitlock() lock.unlock() jwack.get_token(t) lock.trylock() assert(lock.owned) if vars.DEBUG_LOCKS: log('%s (...unlocked!)\n' % _nice(t)) if state.File(name=t).is_failed(): err('%s: failed in another thread\n' % _nice(t)) retcode[0] = 2 lock.unlock() else: BuildJob(t, state.File(id=fid), lock, shouldbuildfunc, done).start() state.commit() return retcode[0]
sh = hashlib.sha1() while 1: b = os.read(0, 4096) sh.update(b) if not b: break csum = sh.hexdigest() if not vars.TARGET: sys.exit(0) me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) changed = (csum != f.csum) debug2('%s: old = %s\n' % (f.name, f.csum)) debug2('%s: sum = %s (%s)\n' % (f.name, csum, changed and 'changed' or 'unchanged')) f.is_generated = True f.is_override = False f.failed_runid = None if changed: f.set_changed() # update_stamp might not do this if the mtime is identical f.csum = csum else: # unchanged f.set_checked() f.save() state.commit()
f = state.File(name=t) if f.is_failed(): raise builder.ImmediateReturn(32) dirty = deps.isdirty(f, depth = '', max_changed = vars.RUNID) return dirty==[f] and deps.DIRTY or dirty rv = 202 try: if vars.TARGET and not vars.UNLOCKED: me = os.path.join(vars.STARTDIR, os.path.join(vars.PWD, vars.TARGET)) f = state.File(name=me) debug2('TARGET: %r %r %r\n' % (vars.STARTDIR, vars.PWD, vars.TARGET)) else: f = me = None debug2('redo-ifchange: not adding depends.\n') try: targets = sys.argv[1:] if f: for t in targets: f.add_dep('m', t) f.save() rv = builder.main(targets, should_build) finally: jwack.force_return_tokens() except KeyboardInterrupt: sys.exit(200) state.commit() sys.exit(rv)
def _start_do(self): assert (self.lock.owned) t = self.t sf = self.sf newstamp = sf.read_stamp() if (sf.is_generated and newstamp != state.STAMP_MISSING and (sf.stamp != newstamp or sf.is_override)): state.warn_override(_nice(t)) if not sf.is_override: warn('%s - old: %r\n' % (_nice(t), sf.stamp)) warn('%s - new: %r\n' % (_nice(t), newstamp)) sf.set_override() sf.set_checked() sf.save() return self._after2(0) if (os.path.exists(t) and not os.path.isdir(t + '/.') and not sf.is_generated): # an existing source file that was not generated by us. # This step is mentioned by djb in his notes. # For example, a rule called default.c.do could be used to try # to produce hello.c, but we don't want that to happen if # hello.c was created by the end user. debug2("-- static (%r)\n" % t) sf.set_static() sf.save() return self._after2(0) sf.zap_deps1() (dodir, dofile, basedir, basename, ext) = paths.find_do_file(sf) if not dofile: if os.path.exists(t): sf.set_static() sf.save() return self._after2(0) else: err('no rule to make %r\n' % t) return self._after2(1) unlink(self.tmpname1) unlink(self.tmpname2) ffd = os.open(self.tmpname1, os.O_CREAT | os.O_RDWR | os.O_EXCL, 0666) close_on_exec(ffd, True) self.f = os.fdopen(ffd, 'w+') # this will run in the dofile's directory, so use only basenames here arg1 = basename + ext # target name (including extension) arg2 = basename # target name (without extension) argv = [ 'sh', '-e', dofile, arg1, arg2, # temp output file name state.relpath(os.path.abspath(self.tmpname2), dodir), ] if vars.VERBOSE: argv[1] += 'v' if vars.XTRACE: argv[1] += 'x' if vars.VERBOSE or vars.XTRACE: log_('\n') firstline = open(os.path.join(dodir, dofile)).readline().strip() if firstline.startswith('#!/'): argv[0:2] = firstline[2:].split(' ') log('%s\n' % _nice(t)) self.dodir = dodir self.basename = basename self.ext = ext self.argv = argv sf.is_generated = True sf.save() dof = state.File(name=os.path.join(dodir, dofile)) dof.set_static() dof.save() state.commit() jwack.start_job(t, self._do_subproc, self._after)
def main(targets, shouldbuildfunc): retcode = [0] # a list so that it can be reassigned from done() if vars.SHUFFLE: import random random.shuffle(targets) locked = [] def done(t, rv): if rv: retcode[0] = 1 # In the first cycle, we just build as much as we can without worrying # about any lock contention. If someone else has it locked, we move on. seen = {} lock = None for t in targets: if not t: err('cannot build the empty target ("").\n') retcode[0] = 204 break assert (state.is_flushed()) if t in seen: continue seen[t] = 1 if not jwack.has_token(): state.commit() jwack.get_token(t) if retcode[0] and not vars.KEEP_GOING: break if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break f = state.File(name=t) lock = state.Lock(f.id) if vars.UNLOCKED: lock.owned = True else: lock.trylock() if not lock.owned: if vars.DEBUG_LOCKS: log('%s (locked...)\n' % _nice(t)) locked.append((f.id, t)) else: # We had to create f before we had a lock, because we need f.id # to make the lock. But someone may have updated the state # between then and now. # FIXME: separate obtaining the fid from creating the File. # FIXME: maybe integrate locking into the File object? f.refresh() BuildJob(t, f, lock, shouldbuildfunc, done).start() state.commit() assert (state.is_flushed()) lock = None del lock # Now we've built all the "easy" ones. Go back and just wait on the # remaining ones one by one. There's no reason to do it any more # efficiently, because if these targets were previously locked, that # means someone else was building them; thus, we probably won't need to # do anything. The only exception is if we're invoked as redo instead # of redo-ifchange; then we have to redo it even if someone else already # did. But that should be rare. while locked or jwack.running(): state.commit() jwack.wait_all() # at this point, we don't have any children holding any tokens, so # it's okay to block below. if retcode[0] and not vars.KEEP_GOING: break if locked: if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break fid, t = locked.pop(0) lock = state.Lock(fid) backoff = 0.01 lock.trylock() while not lock.owned: # Don't spin with 100% CPU while we fight for the lock. import random time.sleep(random.random() * min(backoff, 1.0)) backoff *= 2 if vars.DEBUG_LOCKS: warn('%s (WAITING)\n' % _nice(t)) # this sequence looks a little silly, but the idea is to # give up our personal token while we wait for the lock to # be released; but we should never run get_token() while # holding a lock, or we could cause deadlocks. jwack.release_mine() try: lock.waitlock() except state.CyclicDependencyError: err('cyclic dependency while building %s\n' % _nice(t)) jwack.get_token(t) retcode[0] = 208 return retcode[0] lock.unlock() jwack.get_token(t) lock.trylock() assert (lock.owned) if vars.DEBUG_LOCKS: log('%s (...unlocked!)\n' % _nice(t)) if state.File(name=t).is_failed(): err('%s: failed in another thread\n' % _nice(t)) retcode[0] = 2 lock.unlock() else: BuildJob(t, state.File(id=fid), lock, shouldbuildfunc, done).start() lock = None state.commit() return retcode[0]
def main(targets, shouldbuildfunc): retcode = [0] # a list so that it can be reassigned from done() if vars.SHUFFLE: import random random.shuffle(targets) locked = [] def done(t, rv): if rv: retcode[0] = 1 # In the first cycle, we just build as much as we can without worrying # about any lock contention. If someone else has it locked, we move on. seen = {} lock = None for t in targets: if t in seen: continue seen[t] = 1 if not jobs.has_token(): state.commit() jobs.get_token(t) if retcode[0] and not vars.KEEP_GOING: break if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break f = state.File(name=t) lock = state.Lock(f.id) if vars.UNLOCKED: lock.owned = True else: lock.trylock() if not lock.owned: if vars.DEBUG_LOCKS: log('%s (locked...)\n' % _nice(t)) locked.append((f.id, t)) else: BuildJob(t, f, lock, shouldbuildfunc, done).start() del lock # Now we've built all the "easy" ones. Go back and just wait on the # remaining ones one by one. There's no reason to do it any more # efficiently, because if these targets were previously locked, that # means someone else was building them; thus, we probably won't need to # do anything. The only exception is if we're invoked as redo instead # of redo-ifchange; then we have to redo it even if someone else already # did. But that should be rare. while locked or jobs.running(): state.commit() jobs.wait_all() # at this point, we don't have any children holding any tokens, so # it's okay to block below. if retcode[0] and not vars.KEEP_GOING: break if locked: if not state.check_sane(): err('.redo directory disappeared; cannot continue.\n') retcode[0] = 205 break fid, t = locked.pop(0) target_list = targets_seen.get() nice_t = _nice(t) if nice_t in target_list: # Target locked by parent: cyclic dependence err('encountered a dependence cycle:\n') _print_cycle(target_list, nice_t) retcode[0] = 209 break lock = state.Lock(fid) lock.trylock() while not lock.owned: if vars.DEBUG_LOCKS: warn('%s (WAITING)\n' % _nice(t)) # this sequence looks a little silly, but the idea is to # give up our personal token while we wait for the lock to # be released; but we should never run get_token() while # holding a lock, or we could cause deadlocks. jobs.put_token() lock.waitlock() lock.unlock() jobs.get_token(t) lock.trylock() assert (lock.owned) if vars.DEBUG_LOCKS: log('%s (...unlocked!)\n' % _nice(t)) if state.File(name=t).is_failed(): err('%s: failed in another thread\n' % _nice(t)) retcode[0] = 2 lock.unlock() else: BuildJob(t, state.File(id=fid), lock, shouldbuildfunc, done).start() state.commit() return retcode[0]