Ejemplo n.º 1
0
def start_job(reason, jobfunc, donefunc):
    global _mytokens
    assert(_mytokens <= 1)
    get_token(reason)
    assert(_mytokens >= 1)
    assert(_mytokens == 1)
    _mytokens -= 1
    r,w = _make_pipe(50)
    pid = os.fork()
    if pid == 0:
        # child
        os.close(r)
        rv = 201
        try:
            try:
                rv = jobfunc() or 0
                _debug('jobfunc completed (%r, %r)\n' % (jobfunc,rv))
            except Exception:
                import traceback
                traceback.print_exc()
        finally:
            _debug('exit: %d\n' % rv)
            os._exit(rv)
    close_on_exec(r, True)
    os.close(w)
    pd = Job(reason, pid, donefunc)
    _waitfds[r] = pd
Ejemplo n.º 2
0
def start_job(reason, jobfunc, donefunc):
    assert state.is_flushed()
    global _mytokens
    assert _mytokens <= 1
    assert _mytokens == 1
    # Subprocesses always start with 1 token, so we have to destroy ours
    # in order for the universe to stay in balance.
    _destroy_tokens(1)
    r, w = _make_pipe(50)
    pid = os.fork()
    if pid == 0:
        # child
        os.close(r)
        rv = 201
        try:
            try:
                rv = jobfunc() or 0
                _debug('jobfunc completed (%r, %r)\n' % (jobfunc, rv))
            except Exception:
                import traceback
                traceback.print_exc()
        finally:
            _debug('exit: %d\n' % rv)
            os._exit(rv)
    close_on_exec(r, True)
    os.close(w)
    pd = Job(reason, pid, donefunc)
    _waitfds[r] = pd
Ejemplo n.º 3
0
def start_job(name, jobfunc, donefunc):
    """
    Start a job
    @param name The name of the job
    @param jobfunc The function representing the job
    @param donefunc The function to call when the job is done
    """
    global _has_token
    get_token(name)
    assert (_has_token)
    _has_token = False
    r, w = _make_pipe(50)
    # Fork a child process to run the job
    pid = os.fork()
    if pid == 0:
        # The child process
        os.close(r)
        rv = 201
        try:
            # Run the job
            rv = jobfunc() or 0
            _debug('jobfunc completed (%r, %r)\n' % (jobfunc, rv))
        except Exception:
            import traceback
            traceback.print_exc()
        finally:
            _debug('exit: %d\n' % rv)
            os._exit(rv)
    # The main process
    close_on_exec(r, True)
    os.close(w)
    # Add the job completion to _completion_map
    _completion_map[r] = Completion(name, pid, donefunc)
Ejemplo n.º 4
0
def start_job(reason, jobfunc, donefunc):
    assert(state.is_flushed())
    global _mytokens
    assert(_mytokens <= 1)
    get_token(reason)
    assert(_mytokens >= 1)
    assert(_mytokens == 1)
    _mytokens -= 1
    r,w = _make_pipe(50)
    pid = os.fork()
    if pid == 0:
        # child
        os.close(r)
        rv = 201
        try:
            try:
                rv = jobfunc() or 0
                _debug('jobfunc completed (%r, %r)\n' % (jobfunc,rv))
            except Exception:
                import traceback
                traceback.print_exc()
        finally:
            _debug('exit: %d\n' % rv)
            os._exit(rv)
    close_on_exec(r, True)
    os.close(w)
    pd = Job(reason, pid, donefunc)
    _waitfds[r] = pd
Ejemplo n.º 5
0
Archivo: state.py Proyecto: reckbo/redo
 def _open_lock(self):
     if not self.lockfile:
         try: os.makedirs(os.path.dirname(self.name))
         except: pass
         self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0666)
         self.close_on_del = True
         close_on_exec(self.lockfile, True)
Ejemplo n.º 6
0
def start_job(reason, jobfunc, donefunc):
    """
    Start a job
    jobfunc:  executed in the child process
    doncfunc: executed in the parent process during a wait or wait_all call
    """
    global _mytokens
    assert(_mytokens <= 1)
    get_token(reason)
    assert(_mytokens >= 1)
    assert(_mytokens == 1)
    _mytokens -= 1
    r,w = _make_pipe(50)
    pid = os.fork()
    if pid == 0:
        # child
        os.close(r)
        os.environ['REDO_JWACK'] = "%s,%d" % (os.environ.get('REDO_JWACK'), w)
        rv = 201
        try:
            try:
                rv = jobfunc() or 0
                _debug('jobfunc completed (%r, %r)\n' % (jobfunc,rv))
            except Exception:
                import traceback
                traceback.print_exc()
        finally:
            _debug('exit: %d\n' % rv)
            os._exit(rv)
    close_on_exec(r, True)
    os.close(w)
    pd = Job(reason, pid, donefunc)
    _waitfds[r] = pd
Ejemplo n.º 7
0
    def build(self):
        debug3('running build job for %r\n', self.target.name)

        (dodir, dofile, basedir, basename, ext) = (
            self.dodir, self.dofile, self.dobasedir, self.dobasename, self.doext)

        # this will run in the dofile's directory, so use only basenames here
        if vars.OLD_ARGS:
            arg1 = basename  # target name (no extension)
            arg2 = ext       # extension (if any), including leading dot
        else:
            arg1 = basename + ext  # target name (including extension)
            arg2 = basename        # target name (without extension)
        argv = ['sh', '-e',
                dofile,
                arg1,
                arg2,
                # temp output file name
                os.path.relpath(self.tmpname_arg3, dodir),
                ]
        if vars.VERBOSE: argv[1] += 'v'
        if vars.XTRACE: argv[1] += 'x'
        if vars.VERBOSE or vars.XTRACE: log_e('\n')

        firstline = open(os.path.join(dodir, dofile)).readline().strip()
        if firstline.startswith('#!.../'):
            _, _, interp_argv = firstline.partition("/")
            interp_argv = interp_argv.split(' ')
            interpreter = _find_interpreter(self.dodir, interp_argv[0])
            if not interpreter:
                err('%s unable to find interpreter %s.\n', self.dofile, interp_argv[0])
                os._exit(208)
            self.target.add_dep(state.File(interpreter))
            argv[0:2] = [interpreter] + interp_argv[1:]
        elif firstline.startswith('#!/'):
            argv[0:2] = firstline[2:].split(' ')
        log('%s\n', self.target.printable_name())
        log_cmd("redo", self.target.name + "\n")

        try:
            dn = dodir
            os.environ['REDO_PWD'] = os.path.join(vars.PWD, dn)
            os.environ['REDO_TARGET'] = basename + ext
            os.environ['REDO_DEPTH'] = vars.DEPTH + '  '
            if dn:
                os.chdir(dn)
            l = logger.Logger(self.log_fd, self.tmp_sout_fd)
            l.fork()
            os.close(self.tmp_sout_fd)
            close_on_exec(1, False)
            if vars.VERBOSE or vars.XTRACE: log_e('* %s\n' % ' '.join(argv))
            os.execvp(argv[0], argv)
        except:
            import traceback
            sys.stderr.write(traceback.format_exc())
            err('internal exception - see above\n')
            raise
        finally:
            # returns only if there's an exception (exec in other case)
            os._exit(127)
Ejemplo n.º 8
0
Archivo: state.py Proyecto: emaste/redo
 def __init__(self, fid):
     self.owned = False
     self.fid = fid
     self.lockfile = os.open(os.path.join(vars.BASE, '.redo/lock.%d' % fid),
                             os.O_RDWR | os.O_CREAT, 0666)
     close_on_exec(self.lockfile, True)
     assert (_locks.get(fid, 0) == 0)
     _locks[fid] = 1
Ejemplo n.º 9
0
Archivo: state.py Proyecto: mk-fg/redo
 def __init__(self, fid):
     self.owned = False
     self.fid = fid
     self.lockfile = os.open(os.path.join(vars.BASE, '.redo/lock.%d' % fid),
                             os.O_RDWR | os.O_CREAT, 0666)
     close_on_exec(self.lockfile, True)
     assert(_locks.get(fid,0) == 0)
     _locks[fid] = 1
Ejemplo n.º 10
0
    def prepare(self):
        assert self.target.dolock().owned == state.LOCK_EX
        self.target.build_starting()
        self.before_t = _try_stat(self.target.name)

        newstamp = self.target.read_stamp()
        if newstamp.is_override_or_missing(self.target):
            if newstamp.is_missing():
                # was marked generated, but is now deleted
                debug3('oldstamp=%r newstamp=%r\n', self.target.stamp, newstamp)
                self.target.forget()
                self.target.refresh()
            elif vars.OVERWRITE:
                warn('%s: you modified it; overwrite\n', self.target.printable_name())
            else:
                warn('%s: you modified it; skipping\n', self.target.printable_name())
                return 0
        if self.target.exists_not_dir() and not self.target.is_generated:
            # an existing source file that was not generated by us.
            # This step is mentioned by djb in his notes.
            # For example, a rule called default.c.do could be used to try
            # to produce hello.c, but we don't want that to happen if
            # hello.c was created in advance by the end user.
            if vars.OVERWRITE:
                warn('%s: exists and not marked as generated; overwrite.\n',
                     self.target.printable_name())
            else:
                warn('%s: exists and not marked as generated; not redoing.\n',
                     self.target.printable_name())
                debug2('-- static (%r)\n', self.target.name)
                return 0

        (self.dodir, self.dofile, self.dobasedir, self.dobasename, self.doext) = _find_do_file(self.target)
        if not self.dofile:
            if newstamp.is_missing():
                err('no rule to make %r\n', self.target.name)
                return 1
            else:
                self.target.forget()
                debug2('-- forget (%r)\n', self.target.name)
                return 0  # no longer a generated target, but exists, so ok

        self.outdir = self._mkoutdir()
        # name connected to stdout
        self.tmpname_sout = self.target.tmpfilename('out.tmp')
        # name provided as $3
        self.tmpname_arg3 = os.path.join(self.outdir, self.target.basename())
        # name for the log file
        unlink(self.tmpname_sout)
        unlink(self.tmpname_arg3)
        self.log_fd = logger.open_log(self.target, truncate=True)
        self.tmp_sout_fd = os.open(self.tmpname_sout, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666)
        close_on_exec(self.tmp_sout_fd, True)
        self.tmp_sout_f = os.fdopen(self.tmp_sout_fd, 'w+')

        return None
Ejemplo n.º 11
0
 def __init__(self, name=None, f=None):
     self.owned = False
     self.name  = name
     if not f:
         self.lockfile = os.open(self.name, os.O_RDWR | os.O_CREAT, 0666)
         close_on_exec(self.lockfile, True)
         self.close_on_del = True
     else:
         self.lockfile = f
         self.close_on_del = False
     self.shared = fcntl.LOCK_SH
     self.exclusive = fcntl.LOCK_EX
Ejemplo n.º 12
0
 def _do_subproc(self):
     # careful: REDO_PWD was the PWD relative to the STARTPATH at the time
     # we *started* building the current target; but that target ran
     # redo-ifchange, and it might have done it from a different directory
     # than we started it in.  So os.getcwd() might be != REDO_PWD right
     # now.
     dn = self.dodir
     newp = os.path.realpath(dn)
     os.environ['REDO_PWD'] = state.relpath(newp, vars.STARTDIR)
     os.environ['REDO_TARGET'] = self.basename + self.ext
     os.environ['REDO_DEPTH'] = vars.DEPTH + '  '
     if dn:
         os.chdir(dn)
     os.dup2(self.f.fileno(), 1)
     os.close(self.f.fileno())
     close_on_exec(1, False)
     if vars.VERBOSE or vars.XTRACE: log_('* %s\n' % ' '.join(self.argv))
     os.execvp(self.argv[0], self.argv)
     assert (0)
Ejemplo n.º 13
0
 def _do_subproc(self):
     # careful: REDO_PWD was the PWD relative to the STARTPATH at the time
     # we *started* building the current target; but that target ran
     # redo-ifchange, and it might have done it from a different directory
     # than we started it in.  So os.getcwd() might be != REDO_PWD right
     # now.
     dn = self.dodir
     newp = os.path.realpath(dn)
     os.environ['REDO_PWD'] = state.relpath(newp, vars.STARTDIR)
     os.environ['REDO_TARGET'] = self.basename + self.ext
     os.environ['REDO_DEPTH'] = vars.DEPTH + '  '
     if dn:
         os.chdir(dn)
     os.dup2(self.f.fileno(), 1)
     os.close(self.f.fileno())
     close_on_exec(1, False)
     if vars.VERBOSE or vars.XTRACE: log_('* %s\n' % ' '.join(self.argv))
     os.execvp(self.argv[0], self.argv)
     assert(0)
Ejemplo n.º 14
0
def cleanup_on_exec():
    "Close file descriptors"
    fds = _find_fds()
    if fds:
        a, b = fds
        close_on_exec(a, True)
        close_on_exec(b, True)
    for waitfd in os.environ.get('REDO_JWACK', '').split(','):
        waitfd = atoi(waitfd, None)
        if waitfd != None: close_on_exec(waitfd, True)
    del os.environ['REDO_JWACK']
    os.environ['MAKEFLAGS'] = cleanup_makeflags(os.getenv('MAKEFLAGS'))
Ejemplo n.º 15
0
    def fork(self):
        if not vars.LOG:
            if vars.OLD_STDOUT or vars.WARN_STDOUT:
                os.dup2(self.stdoutfd, 1)
            return
        pid = os.fork()
        if pid == 0:
            os.close(self.fd_std_out)
            os.close(self.fd_err_out)
            os.close(self.fd_log_out)
            os.close(self.fd_err_in)
            os.close(self.fd_log_in)
            sysout = sys.stdout
            if vars.OLD_STDOUT: sysout = None
            self._main(os.fdopen(self.fd_std_in), "std", sysout, self.stdoutfd)
            os._exit(0)
        pid2 = os.fork()
        if pid2 == 0:
            os.close(self.fd_std_out)
            os.close(self.fd_err_out)
            os.close(self.fd_log_out)
            os.close(self.fd_std_in)
            os.close(self.fd_log_in)
            os.close(self.stdoutfd)
            self._main(os.fdopen(self.fd_err_in), "err", sys.stderr)
            os._exit(0)
        pid3 = os.fork()
        if pid3 == 0:
            os.close(self.fd_std_out)
            os.close(self.fd_err_out)
            os.close(self.fd_log_out)
            os.close(self.fd_std_in)
            os.close(self.fd_err_in)
            os.close(self.stdoutfd)
            self._main(os.fdopen(self.fd_log_in), "log", LOGFILE)
            os._exit(0)
        os.dup2(self.fd_std_out, 1)
        os.dup2(self.fd_err_out, 2)
        os.close(self.fd_std_out)
        os.close(self.fd_err_out)
        os.close(self.fd_std_in)
        os.close(self.fd_err_in)
        os.close(self.fd_log_in)
        close_on_exec(self.fd_log_out, False)
        close_on_exec(1, False)
        close_on_exec(2, False)

        if vars.LOGFD: os.close(vars.LOGFD)
        os.environ["REDO_LOGFD"] = str(self.fd_log_out)
        vars.reinit()

        os.close(self.logfd)
Ejemplo n.º 16
0
 def _start_do(self):
     assert(self.lock.owned)
     t = self.t
     sf = self.sf
     newstamp = sf.read_stamp()
     if (sf.is_generated and
         not sf.failed_runid and
         newstamp != state.STAMP_MISSING and 
         (sf.stamp != newstamp or sf.is_override)):
             state.warn_override(_nice(t))
             sf.set_override()
             sf.set_checked()
             sf.save()
             return self._after2(0)
     if (os.path.exists(t) and not os.path.exists(t + '/.')
          and not sf.is_generated):
         # an existing source file that was not generated by us.
         # This step is mentioned by djb in his notes.
         # For example, a rule called default.c.do could be used to try
         # to produce hello.c, but we don't want that to happen if
         # hello.c was created by the end user.
         # FIXME: always refuse to redo any file that was modified outside
         # of redo?  That would make it easy for someone to override a
         # file temporarily, and could be undone by deleting the file.
         debug2("-- static (%r)\n" % t)
         sf.set_static()
         sf.save()
         return self._after2(0)
     sf.zap_deps1()
     (dodir, dofile, basedir, basename, ext) = _find_do_file(sf)
     if not dofile:
         if os.path.exists(t):
             sf.set_static()
             sf.save()
             return self._after2(0)
         else:
             err('no rule to make %r\n' % t)
             return self._after2(1)
     unlink(self.tmpname1)
     unlink(self.tmpname2)
     ffd = os.open(self.tmpname1, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666)
     close_on_exec(ffd, True)
     self.f = os.fdopen(ffd, 'w+')
     # this will run in the dofile's directory, so use only basenames here
     argv = ['sh', '-e',
             dofile,
             basename, # target name (no extension)
             ext,  # extension (if any), including leading dot
             os.path.join(basedir, os.path.basename(self.tmpname2))  # temp output file name
             ]
     if vars.VERBOSE: argv[1] += 'v'
     if vars.XTRACE: argv[1] += 'x'
     if vars.VERBOSE or vars.XTRACE: log_('\n')
     firstline = open(os.path.join(dodir, dofile)).readline().strip()
     if firstline.startswith('#!/'):
         argv[0:2] = firstline[2:].split(' ')
     log('%s\n' % _nice(t))
     self.dodir = dodir
     self.basename = basename
     self.ext = ext
     self.argv = argv
     sf.is_generated = True
     sf.save()
     dof = state.File(name=os.path.join(dodir, dofile))
     dof.set_static()
     dof.save()
     state.commit()
     jwack.start_job(t, self._do_subproc, self._after)
Ejemplo n.º 17
0
def cleanup_on_exec():
    if LOGFD:
        close_on_exec(LOGFD, True)

    close_on_exec(0, True)
    close_on_exec(1, True)
    close_on_exec(2, True)
    STDIO = os.environ.get('REDO_STDIO')
    if STDIO:
        try:
            a, b, c = [int(x) for x in STDIO.split(',')]
            os.dup2(a, 0)
            os.dup2(b, 1)
            os.dup2(c, 2)
            close_on_exec(a, True)
            close_on_exec(b, True)
            close_on_exec(c, True)
        except: pass

    for env in ['REDO', 'REDO_STARTDIR', 'REDO_PWD', 'REDO_TARGET',
        'REDO_DEPTH', 'REDO_OVERWRITE', 'REDO_DEBUG', 'REDO_DEBUG_PIDS',
        'REDO_DEBUG_LOCKS', 'REDO_OLD_ARGS', 'REDO_OLD_STDOUT',
        'REDO_WARN_STDOUT', 'REDO_ONLY_LOG', 'REDO_VERBOSE', 'REDO_XTRACE',
        'REDO_KEEP_GOING', 'REDO_SHUFFLE', 'REDO_LOGFD', 'REDO_RUNID_FILE',
        'REDO_STDIO']:
        try:    del os.environ[env]
        except: pass
Ejemplo n.º 18
0
Archivo: state.py Proyecto: tonyg/redo
    if _db:
        return _db

    dbdir = '%s/.redo' % vars.BASE
    dbfile = '%s/db.sqlite3' % dbdir
    try:
        os.mkdir(dbdir)
    except OSError, e:
        if e.errno == errno.EEXIST:
            pass  # if it exists, that's okay
        else:
            raise

    _lockfile = os.open(os.path.join(vars.BASE, '.redo/locks'),
                        os.O_RDWR | os.O_CREAT, 0666)
    close_on_exec(_lockfile, True)

    must_create = not os.path.exists(dbfile)
    if not must_create:
        _db = _connect(dbfile)
        try:
            row = _db.cursor().execute("select version from Schema").fetchone()
        except sqlite3.OperationalError:
            row = None
        ver = row and row[0] or None
        if ver != SCHEMA_VER:
            # Don't use err() here because this might happen before
            # redo-log spawns.
            sys.stderr.write('redo: %s: found v%s (expected v%s)\n' %
                             (dbfile, ver, SCHEMA_VER))
            sys.stderr.write(
Ejemplo n.º 19
0
 def _start_do(self):
     assert (self.lock.owned)
     t = self.t
     sf = self.sf
     newstamp = sf.read_stamp()
     if (sf.is_generated and newstamp != state.STAMP_MISSING
             and (sf.stamp != newstamp or sf.is_override)):
         state.warn_override(_nice(t))
         if not sf.is_override:
             warn('%s - old: %r\n' % (_nice(t), sf.stamp))
             warn('%s - new: %r\n' % (_nice(t), newstamp))
             sf.set_override()
         sf.set_checked()
         sf.save()
         return self._after2(0)
     if (os.path.exists(t) and not os.path.isdir(t + '/.')
             and not sf.is_generated):
         # an existing source file that was not generated by us.
         # This step is mentioned by djb in his notes.
         # For example, a rule called default.c.do could be used to try
         # to produce hello.c, but we don't want that to happen if
         # hello.c was created by the end user.
         debug2("-- static (%r)\n" % t)
         sf.set_static()
         sf.save()
         return self._after2(0)
     sf.zap_deps1()
     (dodir, dofile, basedir, basename, ext) = paths.find_do_file(sf)
     if not dofile:
         if os.path.exists(t):
             sf.set_static()
             sf.save()
             return self._after2(0)
         else:
             err('no rule to make %r\n' % t)
             return self._after2(1)
     unlink(self.tmpname1)
     unlink(self.tmpname2)
     ffd = os.open(self.tmpname1, os.O_CREAT | os.O_RDWR | os.O_EXCL, 0666)
     close_on_exec(ffd, True)
     self.f = os.fdopen(ffd, 'w+')
     # this will run in the dofile's directory, so use only basenames here
     arg1 = basename + ext  # target name (including extension)
     arg2 = basename  # target name (without extension)
     argv = [
         'sh',
         '-e',
         dofile,
         arg1,
         arg2,
         # temp output file name
         state.relpath(os.path.abspath(self.tmpname2), dodir),
     ]
     if vars.VERBOSE: argv[1] += 'v'
     if vars.XTRACE: argv[1] += 'x'
     if vars.VERBOSE or vars.XTRACE: log_('\n')
     firstline = open(os.path.join(dodir, dofile)).readline().strip()
     if firstline.startswith('#!/'):
         argv[0:2] = firstline[2:].split(' ')
     log('%s\n' % _nice(t))
     self.dodir = dodir
     self.basename = basename
     self.ext = ext
     self.argv = argv
     sf.is_generated = True
     sf.save()
     dof = state.File(name=os.path.join(dodir, dofile))
     dof.set_static()
     dof.save()
     state.commit()
     jwack.start_job(t, self._do_subproc, self._after)