示例#1
0
文件: context.py 项目: simpkins/eden
def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
    """context needed to perform (fast) annotate on a file

    an annotatecontext of a single file consists of two structures: the
    linelog and the revmap. this function takes care of locking. only 1
    process is allowed to write that file's linelog and revmap at a time.

    when something goes wrong, this function will assume the linelog and the
    revmap are in a bad state, and remove them from disk.

    use this function in the following way:

        with annotatecontext(...) as actx:
            actx. ....
    """
    helper = pathhelper(repo, path, opts)
    util.makedirs(helper.dirname)
    revmappath = helper.revmappath
    linelogpath = helper.linelogpath
    actx = None
    try:
        with helper.lock():
            actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
            if rebuild:
                actx.rebuild()
            yield actx
    except Exception:
        if actx is not None:
            actx.rebuild()
        repo.ui.debug("fastannotate: %s: cache broken and deleted\n" % path)
        raise
    finally:
        if actx is not None:
            actx.close()
示例#2
0
 def writesigtracethread(path, interval):
     dir = os.path.dirname(path)
     util.makedirs(dir)
     while True:
         time.sleep(interval)
         # Keep 10 minutes of sigtraces.
         util.gcdir(dir, 60 * 10)
         writesigtrace(path)
示例#3
0
文件: context.py 项目: simpkins/eden
    def _lockflock(self):
        """the same as 'lock' but use flock instead of lockmod.lock, to avoid
        creating temporary symlinks."""
        import fcntl

        lockpath = self.linelogpath
        util.makedirs(os.path.dirname(lockpath))
        lockfd = os.open(lockpath, os.O_RDONLY | os.O_CREAT, 0o664)
        fcntl.flock(lockfd, fcntl.LOCK_EX)
        try:
            yield
        finally:
            fcntl.flock(lockfd, fcntl.LOCK_UN)
            os.close(lockfd)
示例#4
0
def _iscreatedbyfb(path):
    """Returns True if path was created by FB.

    This function is very slow. So it uses ~/.cache/testutil/authordb/ as cache.
    """
    cachepath = os.path.expanduser("~/.cache/testutil/authordb/%s" %
                                   hashlib.sha1(path).hexdigest())
    if not os.path.exists(cachepath):
        util.makedirs(os.path.dirname(cachepath))
        lines = sorted(
            subprocess.check_output(
                ["hg", "log", "-f", "-T{author|email}\n", path]).splitlines())
        result = all(l.endswith("@fb.com") for l in lines)
        open(cachepath, "w").write(repr(result))
    return ast.literal_eval(util.readfile(cachepath))
示例#5
0
def flock(lockpath, description, timeout=-1):
    """A flock based lock object. Currently it is always non-blocking.

    Note that since it is flock based, you can accidentally take it multiple
    times within one process and the first one to be released will release all
    of them. So the caller needs to be careful to not create more than one
    instance per lock.
    """

    # best effort lightweight lock
    try:
        import fcntl

        fcntl.flock
    except ImportError:
        # fallback to Mercurial lock
        vfs = vfsmod.vfs(os.path.dirname(lockpath))
        with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout):
            yield
        return
    # make sure lock file exists
    util.makedirs(os.path.dirname(lockpath))
    with open(lockpath, "a"):
        pass
    lockfd = os.open(lockpath, os.O_RDWR, 0o664)
    start = time.time()
    while True:
        try:
            fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            break
        except IOError as ex:
            if ex.errno == errno.EAGAIN:
                if timeout != -1 and time.time() - start > timeout:
                    raise error.LockHeld(errno.EAGAIN, lockpath, description,
                                         "")
                else:
                    time.sleep(0.05)
                    continue
            raise

    try:
        yield
    finally:
        fcntl.flock(lockfd, fcntl.LOCK_UN)
        os.close(lockfd)
示例#6
0
文件: dirsync.py 项目: zerkella/eden
def applytomirrors(repo, status, sourcepath, mirrors, action):
    """Applies the changes that are in the sourcepath to all the mirrors."""
    mirroredfiles = set()

    # Detect which mirror this file comes from
    sourcemirror = None
    for mirror in mirrors:
        if sourcepath.startswith(mirror):
            sourcemirror = mirror
            break
    if not sourcemirror:
        raise error.Abort(
            _("unable to detect source mirror of '%s'") % (sourcepath, ))

    relpath = sourcepath[len(sourcemirror):]

    # Apply the change to each mirror one by one
    allchanges = set(status.modified + status.removed + status.added)
    for mirror in mirrors:
        if mirror == sourcemirror:
            continue

        mirrorpath = mirror + relpath
        mirroredfiles.add(mirrorpath)
        if mirrorpath in allchanges:
            wctx = repo[None]
            if (sourcepath not in wctx and mirrorpath not in wctx
                    and sourcepath in status.removed
                    and mirrorpath in status.removed):
                if repo.ui.verbose:
                    repo.ui.status(
                        _("not mirroring remove of '%s' to '%s';"
                          " it is already removed\n") %
                        (sourcepath, mirrorpath))
                continue

            if wctx[sourcepath].data() == wctx[mirrorpath].data():
                if repo.ui.verbose:
                    repo.ui.status(
                        _("not mirroring '%s' to '%s'; it already "
                          "matches\n") % (sourcepath, mirrorpath))
                continue
            raise error.Abort(
                _("path '%s' needs to be mirrored to '%s', but "
                  "the target already has pending changes") %
                (sourcepath, mirrorpath))

        fullsource = repo.wjoin(sourcepath)
        fulltarget = repo.wjoin(mirrorpath)

        dirstate = repo.dirstate
        if action == "m" or action == "a":
            mirrorpathdir, unused = util.split(mirrorpath)
            util.makedirs(repo.wjoin(mirrorpathdir))

            util.copyfile(fullsource, fulltarget)
            if dirstate[mirrorpath] in "?r":
                dirstate.add(mirrorpath)

            if action == "a":
                # For adds, detect copy data as well
                copysource = dirstate.copied(sourcepath)
                if copysource and copysource.startswith(sourcemirror):
                    mirrorcopysource = mirror + copysource[len(sourcemirror):]
                    dirstate.copy(mirrorcopysource, mirrorpath)
                    repo.ui.status(
                        _("mirrored copy '%s -> %s' to '%s -> %s'\n") %
                        (copysource, sourcepath, mirrorcopysource, mirrorpath))
                else:
                    repo.ui.status(
                        _("mirrored adding '%s' to '%s'\n") %
                        (sourcepath, mirrorpath))
            else:
                repo.ui.status(
                    _("mirrored changes in '%s' to '%s'\n") %
                    (sourcepath, mirrorpath))
        elif action == "r":
            try:
                util.unlink(fulltarget)
            except OSError as e:
                if e.errno == errno.ENOENT:
                    repo.ui.status(
                        _("not mirroring remove of '%s' to '%s'; it "
                          "is already removed\n") % (sourcepath, mirrorpath))
                else:
                    raise
            else:
                dirstate.remove(mirrorpath)
                repo.ui.status(
                    _("mirrored remove of '%s' to '%s'\n") %
                    (sourcepath, mirrorpath))

    return mirroredfiles
示例#7
0
def mkdir(*args):
    for path in args:
        if path.startswith("-"):
            continue
        util.makedirs(path)
示例#8
0
def backgroundbackup(repo, command=None, dest=None):
    """start background backup"""
    ui = repo.ui
    if command is not None:
        background_cmd = command
    elif workspace.currentworkspace(repo):
        background_cmd = ["hg", "cloud", "sync"]
    else:
        background_cmd = ["hg", "cloud", "backup"]
    infinitepush_bgssh = ui.config("infinitepush", "bgssh")
    if infinitepush_bgssh:
        background_cmd += ["--config", "ui.ssh=%s" % infinitepush_bgssh]

    # developer config: infinitepushbackup.bgdebuglocks
    if ui.configbool("infinitepushbackup", "bgdebuglocks"):
        background_cmd += ["--config", "devel.debug-lockers=true"]

    # developer config: infinitepushbackup.bgdebug
    if ui.configbool("infinitepushbackup", "bgdebug", False):
        background_cmd.append("--debug")

    if dest:
        background_cmd += ["--dest", dest]

    logfile = None
    logdir = ui.config("infinitepushbackup", "logdir")
    if logdir:
        # make newly created files and dirs non-writable
        oldumask = os.umask(0o022)
        try:
            try:
                # the user name from the machine
                username = util.getuser()
            except Exception:
                username = "******"

            if not _checkcommonlogdir(logdir):
                raise WrongPermissionsException(logdir)

            userlogdir = os.path.join(logdir, username)
            util.makedirs(userlogdir)

            if not _checkuserlogdir(userlogdir):
                raise WrongPermissionsException(userlogdir)

            reponame = os.path.basename(repo.sharedroot)
            _removeoldlogfiles(userlogdir, reponame)
            logfile = getlogfilename(logdir, username, reponame)
        except (OSError, IOError) as e:
            ui.debug("background backup log is disabled: %s\n" % e)
        except WrongPermissionsException as e:
            ui.debug(
                (
                    "%s directory has incorrect permission, "
                    + "background backup logging will be disabled\n"
                )
                % e.logdir
            )
        finally:
            os.umask(oldumask)

    if not logfile:
        logfile = os.devnull

    with open(logfile, "a") as f:
        timestamp = util.datestr(util.makedate(), "%Y-%m-%d %H:%M:%S %z")
        fullcmd = " ".join(util.shellquote(arg) for arg in background_cmd)
        f.write("\n%s starting: %s\n" % (timestamp, fullcmd))

    Stdio = bindings.process.Stdio
    out = Stdio.open(logfile, append=True, create=True)
    bindings.process.Command.new(background_cmd[0]).args(
        background_cmd[1:]
    ).avoidinherithandles().newsession().stdin(Stdio.null()).stdout(out).stderr(
        out
    ).spawn()
示例#9
0
 def __init__(self, dirname):
     util.makedirs(dirname)
     super(linkrevdbreadwrite, self).__init__(dirname)