Example #1
0
def should_build(t):
    f = state.File(name=t)
    if f.is_failed():
        raise builder.ImmediateReturn(32)
    dirty = deps.isdirty(f, depth = '', max_changed = vars.RUNID,
                         already_checked=[])
    return f.is_generated, dirty==[f] and deps.DIRTY or dirty
Example #2
0
def should_build(t):
    f = state.File(name=t)
    if f.is_failed():
        raise builder.ImmediateReturn(32)
    status = deps.isdirty(f, depth='', max_changed=vars.RUNID)
    if status == [f]:
        return deps.DIRTY
    else:
        # FIXME: This is really confusing!
        # The status can be any one of deps.CLEAN or deps.DIRTY or
        # a list of targets to build with redo-unlocked.
        return status
Example #3
0
File: redo.py Project: tonyg/redo
        os.environ['REDO_COLOR'] = '0'

import vars_init
vars_init.init(targets)

import vars, state, builder, jwack
from logs import warn, err

try:
    if vars_init.is_toplevel:
        builder.start_stdin_log_reader(status=opt.status, details=opt.details,
            pretty=opt.pretty, color=opt.color,
            debug_locks=opt.debug_locks, debug_pids=opt.debug_pids)
    for t in targets:
        if os.path.exists(t):
            f = state.File(name=t)
            if not f.is_generated:
                warn('%s: exists and not marked as generated; not redoing.\n'
                     % f.nicename())
    state.rollback()
    
    j = atoi(opt.jobs or 1)
    if j < 1 or j > 1000:
        err('invalid --jobs value: %r\n' % opt.jobs)
    jwack.setup(j)
    try:
        assert(state.is_flushed())
        retcode = builder.main(targets, lambda t: (True, True))
        assert(state.is_flushed())
    finally:
        try:
Example #4
0
#!/usr/bin/env python
import sys, os
import state
from log import err

if len(sys.argv[1:]) < 2:
    err('%s: at least 2 arguments expected.\n' % sys.argv[0])
    sys.exit(1)

target = sys.argv[1]
deps = sys.argv[2:]

for d in deps:
    assert(d != target)

me = state.File(name=target)

# Build the known dependencies of our primary target.  This *does* require
# grabbing locks.
os.environ['REDO_NO_OOB'] = '1'
argv = ['redo-ifchange'] + deps
rv = os.spawnvp(os.P_WAIT, argv[0], argv)
if rv:
    sys.exit(rv)

# We know our caller already owns the lock on target, so we don't have to
# acquire another one; tell redo-ifchange about that.  Also, REDO_NO_OOB
# persists from up above, because we don't want to do OOB now either.
# (Actually it's most important for the primary target, since it's the one
# who initiated the OOB in the first place.)
os.environ['REDO_UNLOCKED'] = '1'
Example #5
0
from log import debug, debug2, err

def should_build(t):
    f = state.File(name=t)
    if f.is_failed():
        raise builder.ImmediateReturn(32)
    dirty = deps.isdirty(f, depth = '', max_changed = vars.RUNID)
    return dirty==[f] and deps.DIRTY or dirty


rv = 202
try:
    if vars.TARGET and not vars.UNLOCKED:
        me = os.path.join(vars.STARTDIR, 
                          os.path.join(vars.PWD, vars.TARGET))
        f = state.File(name=me)
        debug2('TARGET: %r %r %r\n' % (vars.STARTDIR, vars.PWD, vars.TARGET))
    else:
        f = me = None
        debug2('redo-ifchange: not adding depends.\n')
    try:
        targets = sys.argv[1:]
        if f:
            for t in targets:
                f.add_dep('m', t)
            f.save()
        rv = builder.main(targets, should_build)
    finally:
        jwack.force_return_tokens()
except KeyboardInterrupt:
    sys.exit(200)
Example #6
0
 def _start_do(self):
     assert (self.lock.owned)
     t = self.t
     sf = self.sf
     newstamp = sf.read_stamp()
     if (sf.is_generated and newstamp != state.STAMP_MISSING
             and (sf.stamp != newstamp or sf.is_override)):
         state.warn_override(_nice(t))
         if not sf.is_override:
             warn('%s - old: %r\n' % (_nice(t), sf.stamp))
             warn('%s - new: %r\n' % (_nice(t), newstamp))
             sf.set_override()
         sf.set_checked()
         sf.save()
         return self._after2(0)
     if (os.path.exists(t) and not os.path.isdir(t + '/.')
             and not sf.is_generated):
         # an existing source file that was not generated by us.
         # This step is mentioned by djb in his notes.
         # For example, a rule called default.c.do could be used to try
         # to produce hello.c, but we don't want that to happen if
         # hello.c was created by the end user.
         debug2("-- static (%r)\n" % t)
         sf.set_static()
         sf.save()
         return self._after2(0)
     sf.zap_deps1()
     (dodir, dofile, basedir, basename, ext) = paths.find_do_file(sf)
     if not dofile:
         if os.path.exists(t):
             sf.set_static()
             sf.save()
             return self._after2(0)
         else:
             err('no rule to make %r\n' % t)
             return self._after2(1)
     unlink(self.tmpname1)
     unlink(self.tmpname2)
     ffd = os.open(self.tmpname1, os.O_CREAT | os.O_RDWR | os.O_EXCL, 0666)
     close_on_exec(ffd, True)
     self.f = os.fdopen(ffd, 'w+')
     # this will run in the dofile's directory, so use only basenames here
     arg1 = basename + ext  # target name (including extension)
     arg2 = basename  # target name (without extension)
     argv = [
         'sh',
         '-e',
         dofile,
         arg1,
         arg2,
         # temp output file name
         state.relpath(os.path.abspath(self.tmpname2), dodir),
     ]
     if vars.VERBOSE: argv[1] += 'v'
     if vars.XTRACE: argv[1] += 'x'
     if vars.VERBOSE or vars.XTRACE: log_('\n')
     firstline = open(os.path.join(dodir, dofile)).readline().strip()
     if firstline.startswith('#!/'):
         argv[0:2] = firstline[2:].split(' ')
     log('%s\n' % _nice(t))
     self.dodir = dodir
     self.basename = basename
     self.ext = ext
     self.argv = argv
     sf.is_generated = True
     sf.save()
     dof = state.File(name=os.path.join(dodir, dofile))
     dof.set_static()
     dof.save()
     state.commit()
     jwack.start_job(t, self._do_subproc, self._after)
Example #7
0
def main(targets, shouldbuildfunc):
    retcode = [0]  # a list so that it can be reassigned from done()
    if vars.SHUFFLE:
        import random
        random.shuffle(targets)

    locked = []

    def done(t, rv):
        if rv:
            retcode[0] = 1

    # In the first cycle, we just build as much as we can without worrying
    # about any lock contention.  If someone else has it locked, we move on.
    seen = {}
    lock = None
    for t in targets:
        if not t:
            err('cannot build the empty target ("").\n')
            retcode[0] = 204
            break
        assert (state.is_flushed())
        if t in seen:
            continue
        seen[t] = 1
        if not jwack.has_token():
            state.commit()
        jwack.get_token(t)
        if retcode[0] and not vars.KEEP_GOING:
            break
        if not state.check_sane():
            err('.redo directory disappeared; cannot continue.\n')
            retcode[0] = 205
            break
        f = state.File(name=t)
        lock = state.Lock(f.id)
        if vars.UNLOCKED:
            lock.owned = True
        else:
            lock.trylock()
        if not lock.owned:
            if vars.DEBUG_LOCKS:
                log('%s (locked...)\n' % _nice(t))
            locked.append((f.id, t))
        else:
            # We had to create f before we had a lock, because we need f.id
            # to make the lock.  But someone may have updated the state
            # between then and now.
            # FIXME: separate obtaining the fid from creating the File.
            # FIXME: maybe integrate locking into the File object?
            f.refresh()
            BuildJob(t, f, lock, shouldbuildfunc, done).start()
        state.commit()
        assert (state.is_flushed())
        lock = None

    del lock

    # Now we've built all the "easy" ones.  Go back and just wait on the
    # remaining ones one by one.  There's no reason to do it any more
    # efficiently, because if these targets were previously locked, that
    # means someone else was building them; thus, we probably won't need to
    # do anything.  The only exception is if we're invoked as redo instead
    # of redo-ifchange; then we have to redo it even if someone else already
    # did.  But that should be rare.
    while locked or jwack.running():
        state.commit()
        jwack.wait_all()
        # at this point, we don't have any children holding any tokens, so
        # it's okay to block below.
        if retcode[0] and not vars.KEEP_GOING:
            break
        if locked:
            if not state.check_sane():
                err('.redo directory disappeared; cannot continue.\n')
                retcode[0] = 205
                break
            fid, t = locked.pop(0)
            lock = state.Lock(fid)
            backoff = 0.01
            lock.trylock()
            while not lock.owned:
                # Don't spin with 100% CPU while we fight for the lock.
                import random
                time.sleep(random.random() * min(backoff, 1.0))
                backoff *= 2
                if vars.DEBUG_LOCKS:
                    warn('%s (WAITING)\n' % _nice(t))
                # this sequence looks a little silly, but the idea is to
                # give up our personal token while we wait for the lock to
                # be released; but we should never run get_token() while
                # holding a lock, or we could cause deadlocks.
                jwack.release_mine()
                try:
                    lock.waitlock()
                except state.CyclicDependencyError:
                    err('cyclic dependency while building %s\n' % _nice(t))
                    jwack.get_token(t)
                    retcode[0] = 208
                    return retcode[0]
                lock.unlock()
                jwack.get_token(t)
                lock.trylock()
            assert (lock.owned)
            if vars.DEBUG_LOCKS:
                log('%s (...unlocked!)\n' % _nice(t))
            if state.File(name=t).is_failed():
                err('%s: failed in another thread\n' % _nice(t))
                retcode[0] = 2
                lock.unlock()
            else:
                BuildJob(t, state.File(id=fid), lock, shouldbuildfunc,
                         done).start()
            lock = None
    state.commit()
    return retcode[0]
Example #8
0
def main(targets, shouldbuildfunc):
    retcode = [0]  # a list so that it can be reassigned from done()
    if vars.SHUFFLE:
        import random
        random.shuffle(targets)

    locked = []

    def done(t, rv):
        if rv:
            retcode[0] = 1

    # In the first cycle, we just build as much as we can without worrying
    # about any lock contention.  If someone else has it locked, we move on.
    seen = {}
    lock = None
    for t in targets:
        if t in seen:
            continue
        seen[t] = 1
        if not jobs.has_token():
            state.commit()
        jobs.get_token(t)
        if retcode[0] and not vars.KEEP_GOING:
            break
        if not state.check_sane():
            err('.redo directory disappeared; cannot continue.\n')
            retcode[0] = 205
            break
        f = state.File(name=t)
        lock = state.Lock(f.id)
        if vars.UNLOCKED:
            lock.owned = True
        else:
            lock.trylock()
        if not lock.owned:
            if vars.DEBUG_LOCKS:
                log('%s (locked...)\n' % _nice(t))
            locked.append((f.id, t))
        else:
            BuildJob(t, f, lock, shouldbuildfunc, done).start()

    del lock

    # Now we've built all the "easy" ones.  Go back and just wait on the
    # remaining ones one by one.  There's no reason to do it any more
    # efficiently, because if these targets were previously locked, that
    # means someone else was building them; thus, we probably won't need to
    # do anything.  The only exception is if we're invoked as redo instead
    # of redo-ifchange; then we have to redo it even if someone else already
    # did.  But that should be rare.
    while locked or jobs.running():
        state.commit()
        jobs.wait_all()
        # at this point, we don't have any children holding any tokens, so
        # it's okay to block below.
        if retcode[0] and not vars.KEEP_GOING:
            break
        if locked:
            if not state.check_sane():
                err('.redo directory disappeared; cannot continue.\n')
                retcode[0] = 205
                break
            fid, t = locked.pop(0)
            target_list = targets_seen.get()
            nice_t = _nice(t)
            if nice_t in target_list:
                # Target locked by parent: cyclic dependence
                err('encountered a dependence cycle:\n')
                _print_cycle(target_list, nice_t)
                retcode[0] = 209
                break
            lock = state.Lock(fid)
            lock.trylock()
            while not lock.owned:
                if vars.DEBUG_LOCKS:
                    warn('%s (WAITING)\n' % _nice(t))
                # this sequence looks a little silly, but the idea is to
                # give up our personal token while we wait for the lock to
                # be released; but we should never run get_token() while
                # holding a lock, or we could cause deadlocks.
                jobs.put_token()
                lock.waitlock()
                lock.unlock()
                jobs.get_token(t)
                lock.trylock()
            assert (lock.owned)
            if vars.DEBUG_LOCKS:
                log('%s (...unlocked!)\n' % _nice(t))
            if state.File(name=t).is_failed():
                err('%s: failed in another thread\n' % _nice(t))
                retcode[0] = 2
                lock.unlock()
            else:
                BuildJob(t, state.File(id=fid), lock, shouldbuildfunc,
                         done).start()
    state.commit()
    return retcode[0]
Example #9
0
def catlog(t):
    global total_lines, status
    if t in already:
        return
    if t != '-':
        depth.append(t)
    _fix_depth()
    already.add(t)
    mydir = os.path.dirname(t)
    if t == '-':
        f = sys.stdin
        fid = None
        loglock = None
        logname = None
    else:
        try:
            sf = state.File(name=t, allow_add=False)
        except KeyError:
            sys.stderr.write('redo-log: [%s] %r: not known to redo.\n' % (
                os.getcwd(),
                t,
            ))
            sys.exit(24)
        fid = sf.id
        del sf
        state.rollback()
        logname = state.logname(fid)
        loglock = state.Lock(fid + state.LOG_LOCK_MAGIC)
        loglock.waitlock(shared=True)
        f = None
    delay = 0.01
    was_locked = is_locked(fid)
    line_head = ''
    width = _tty_width()
    while 1:
        if not f:
            try:
                f = open(logname)
            except IOError, e:
                if e.errno == errno.ENOENT:
                    # ignore files without logs
                    pass
                else:
                    raise
        if f:
            # Note: normally includes trailing \n.
            # In 'follow' mode, might get a line with no trailing \n
            # (eg. when ./configure is halfway through a test), which we
            # deal with below.
            line = f.readline()
        else:
            line = None
        if not line and (not opt.follow or not was_locked):
            # file not locked, and no new lines: done
            break
        if not line:
            was_locked = is_locked(fid)
            if opt.follow:
                # Don't display status line for extremely short-lived runs
                if opt.status and time.time() - start_time > 1.0:
                    width = _tty_width()
                    head = 'redo %s ' % ('{:,}'.format(total_lines))
                    tail = ''
                    for n in reversed(depth):
                        remain = width - len(head) - len(tail)
                        # always leave room for a final '... ' prefix
                        if remain < len(n) + 4 + 1 or remain <= 4:
                            if len(n) < 6 or remain < 6 + 1 + 4:
                                tail = '... %s' % tail
                            else:
                                start = len(n) - (remain - 3 - 1)
                                tail = '...%s %s' % (n[start:], tail)
                            break
                        elif n != '-':
                            tail = n + ' ' + tail
                    status = head + tail
                    if len(status) > width:
                        sys.stderr.write('\nOVERSIZE STATUS (%d):\n%r\n' %
                                         (len(status), status))
                    assert (len(status) <= width)
                    sys.stdout.flush()
                    sys.stderr.write('\r%-*.*s\r' % (width, width, status))
                time.sleep(min(delay, 1.0))
                delay += 0.01
            continue
        total_lines += 1
        delay = 0.01
        if not line.endswith('\n'):
            line_head += line
            continue
        if line_head:
            line = line_head + line
            line_head = ''
        if status:
            sys.stdout.flush()
            sys.stderr.write('\r%-*.*s\r' % (width, width, ''))
            status = None
        g = re.match(REDO_LINE_RE, line)
        if g:
            # FIXME: print prefix if @@REDO is not at start of line.
            #   logs.PrettyLog does it, but only if we actually call .write().
            words, text = g.groups()
            kind, pid, when = words.split(':')[0:3]
            pid = atoi(pid)
            relname = _rel(topdir, mydir, text)
            fixname = os.path.normpath(os.path.join(mydir, text))
            if kind == 'unchanged':
                if opt.unchanged:
                    if opt.debug_locks:
                        logs.meta(kind, relname, pid=pid)
                    elif fixname not in already:
                        logs.meta('do', relname, pid=pid)
                    if opt.recursive:
                        if loglock: loglock.unlock()
                        catlog(os.path.join(mydir, text))
                        if loglock: loglock.waitlock(shared=True)
                    already.add(fixname)
            elif kind in ('do', 'waiting', 'locked', 'unlocked'):
                if opt.debug_locks:
                    logs.meta(kind, relname, pid=pid)
                    logs.write(line.rstrip())
                elif fixname not in already:
                    logs.meta('do', relname, pid=pid)
                if opt.recursive:
                    assert text
                    if loglock: loglock.unlock()
                    catlog(os.path.join(mydir, text))
                    if loglock: loglock.waitlock(shared=True)
                already.add(fixname)
            elif kind == 'done':
                rv, name = text.split(' ', 1)
                logs.meta(kind, rv + ' ' + _rel(topdir, mydir, name))
            else:
                logs.write(line.rstrip())
        else:
            if opt.details:
                logs.write(line.rstrip())
Example #10
0
#!/usr/bin/env python2
import sys, os
import vars, state
from log import err


try:
    me = os.path.join(vars.STARTDIR, 
                      os.path.join(vars.PWD, vars.TARGET))
    f = state.File(name=me)
    f.add_dep('m', state.ALWAYS)
    always = state.File(name=state.ALWAYS)
    always.stamp = state.STAMP_MISSING
    always.set_changed()
    always.save()
    state.commit()
except KeyboardInterrupt:
    sys.exit(200)