Beispiel #1
0
def getsyncingobsmarkers(repo):
    """Transfers any pending obsmarkers, and returns all syncing obsmarkers.

    The caller must hold the backup lock.
    """
    # Move any new obsmarkers from the pending file to the syncing file
    with lockmod.lock(repo.svfs,
                      _obsmarkerslockname,
                      timeout=_obsmarkerslocktimeout):
        if repo.svfs.exists(_obsmarkerspending):
            with repo.svfs.open(_obsmarkerspending) as f:
                _version, markers = obsolete._readmarkers(f.read())
            with repo.sharedvfs.open(_obsmarkerssyncing, "ab") as f:
                offset = f.tell()
                # offset == 0: new file - add the version header
                data = b"".join(
                    obsolete.encodemarkers(markers, offset == 0,
                                           obsolete._fm1version))
                f.write(data)
            repo.svfs.unlink(_obsmarkerspending)

    # Load the syncing obsmarkers
    markers = []
    if repo.sharedvfs.exists(_obsmarkerssyncing):
        with repo.sharedvfs.open(_obsmarkerssyncing) as f:
            _version, markers = obsolete._readmarkers(f.read())
    return markers
Beispiel #2
0
    def testrustonlymode(self):
        with self.ui.configoverride({("devel", "lockmode"): "rust_only"}):
            with lock.lock(self.vfs, "foo", ui=self.ui):
                self.assertLocked("foo")
                self.assertLegacyLock("foo", False)

            self.assertNotLocked("foo")
Beispiel #3
0
 def jlock(self, vfs):
     """Create a lock for the journal file"""
     if self._currentlock(self._lockref) is not None:
         raise error.Abort(_("journal lock does not support nesting"))
     desc = _("journal of %s") % vfs.base
     try:
         l = lock.lock(vfs, "namejournal.lock", 0, desc=desc)
     except error.LockHeld as inst:
         self.ui.warn(
             _("waiting for lock on %s held by %r\n") % (desc, inst.lockinfo)
         )
         # default to 600 seconds timeout
         l = lock.lock(
             vfs, "namejournal.lock", self.ui.configint("ui", "timeout"), desc=desc
         )
         self.ui.warn(_("got lock after %s seconds\n") % l.delay)
     self._lockref = weakref.ref(l)
     return l
Beispiel #4
0
    def testrecursivelock(self):
        state = teststate(self, tempfile.mkdtemp(dir=os.getcwd()))
        lock = state.makelock()
        state.assertacquirecalled(True)

        state.resetacquirefn()
        lock.lock()
        # recursive lock should not call acquirefn again
        state.assertacquirecalled(False)

        lock.release()  # brings lock refcount down from 2 to 1
        state.assertreleasecalled(False)
        state.assertpostreleasecalled(False)
        state.assertlockexists(True)

        lock.release()  # releases the lock
        state.assertreleasecalled(True)
        state.assertpostreleasecalled(True)
        state.assertlockexists(False)
Beispiel #5
0
def addpendingobsmarkers(repo, markers):
    with lockmod.lock(repo.svfs,
                      _obsmarkerslockname,
                      timeout=_obsmarkerslocktimeout):
        with repo.svfs.open(_obsmarkerspending, "ab") as f:
            offset = f.tell()
            # offset == 0: new file - add the version header
            data = b"".join(
                obsolete.encodemarkers(markers, offset == 0,
                                       obsolete._fm1version))
            f.write(data)
Beispiel #6
0
def waitbackup(ui, repo, timeout):
    """wait for backup operations to complete"""
    try:
        if timeout:
            timeout = int(timeout)
        else:
            timeout = -1
    except ValueError:
        raise error.Abort("timeout should be integer")

    try:
        with lockmod.lock(repo.sharedvfs, backuplock.lockfilename, timeout=timeout):
            pass
    except error.LockHeld as e:
        if e.errno == errno.ETIMEDOUT:
            raise error.Abort(_("timeout while waiting for backup"))
        raise
Beispiel #7
0
def flock(lockpath, description, timeout=-1):
    """A flock based lock object. Currently it is always non-blocking.

    Note that since it is flock based, you can accidentally take it multiple
    times within one process and the first one to be released will release all
    of them. So the caller needs to be careful to not create more than one
    instance per lock.
    """

    # best effort lightweight lock
    try:
        import fcntl

        fcntl.flock
    except ImportError:
        # fallback to Mercurial lock
        vfs = vfsmod.vfs(os.path.dirname(lockpath))
        with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout):
            yield
        return
    # make sure lock file exists
    util.makedirs(os.path.dirname(lockpath))
    with open(lockpath, "a"):
        pass
    lockfd = os.open(lockpath, os.O_RDWR, 0o664)
    start = time.time()
    while True:
        try:
            fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            break
        except IOError as ex:
            if ex.errno == errno.EAGAIN:
                if timeout != -1 and time.time() - start > timeout:
                    raise error.LockHeld(errno.EAGAIN, lockpath, description,
                                         "")
                else:
                    time.sleep(0.05)
                    continue
            raise

    try:
        yield
    finally:
        fcntl.flock(lockfd, fcntl.LOCK_UN)
        os.close(lockfd)
Beispiel #8
0
def safelog(repo, command):
    """boilerplate for log command

    input:
        repo: mercurial.localrepo
        command: list of strings, first is string of command run
    output: bool
        True if changes have been recorded, False otherwise
    """
    changes = False
    if repo is not None:  # some hg commands don't require repo
        # undolog specific lock
        # allows running command during other commands when
        # otherwise legal.  Could cause weird undolog states,
        # which gap handling generally covers.
        try:
            try:
                repo.localvfs.makedirs("undolog")
            except OSError:
                repo.ui.debug("can't make undolog folder in .hg\n")
                return changes
            with lockmod.lock(repo.localvfs,
                              "undolog/lock",
                              desc="undolog",
                              timeout=2):
                repo.ui.log("undologlock", "lock acquired\n")
                tr = lighttransaction(repo)
                with tr:
                    changes = log(repo.filtered("visible"), command, tr)
                    if changes and not ("undo" == command[0]
                                        or "redo" == command[0]):
                        _delundoredo(repo)
        except error.LockUnavailable:  # no write permissions
            repo.ui.debug("undolog lacks write permission\n")
        except error.LockHeld:  # timeout, not fatal: don't abort actual command
            # This shouldn't happen too often as it would
            # create gaps in the undo log
            repo.ui.debug("undolog lock timeout\n")
            _logtoscuba(repo.ui, "undolog lock timeout")
    return changes
Beispiel #9
0
def lock(repo):
    # First speculatively try to lock so that we immediately print info about
    # the lock if it is locked.
    if repo.ui.interactive():
        try:
            with trylock(repo) as lock:
                yield lock
                return
        except error.LockHeld:
            pass

    # Now just wait for the lock.  Wait up to 120 seconds, because cloud sync
    # can take a while.
    with lockmod.lock(
            repo.sharedvfs,
            lockfilename,
            timeout=120,
            ui=repo.ui,
            showspinner=True,
            spinnermsg=_("waiting for background process to complete"),
    ) as lock:
        yield lock
Beispiel #10
0
def getsyncingobsmarkers(repo):
    """Transfers any pending obsmarkers, and returns all syncing obsmarkers.

    The caller must hold the backup lock.
    """
    if not obsolete.isenabled(repo, obsolete.createmarkersopt):
        return []

    # Move any new obsmarkers from the pending file to the syncing file
    with lockmod.lock(repo.svfs, _obsmarkerslockname, timeout=_obsmarkerslocktimeout):
        if repo.svfs.exists(_obsmarkerspending):
            with repo.svfs.open(_obsmarkerspending) as f:
                _version, markers = obsolete._readmarkers(f.read())
            with repo.sharedvfs.open(_obsmarkerssyncing, "ab") as f:
                offset = f.tell()
                # offset == 0: new file - add the version header
                data = b"".join(
                    obsolete.encodemarkers(markers, offset == 0, obsolete._fm1version)
                )
                f.write(data)
            repo.svfs.unlink(_obsmarkerspending)

    # Load the syncing obsmarkers
    markers = []
    if repo.sharedvfs.exists(_obsmarkerssyncing):
        with repo.sharedvfs.open(_obsmarkerssyncing) as f:
            _version, markers = obsolete._readmarkers(f.read())

    # developer config: commitcloud.maxsendmarkers
    # set to -1 to disable this completely
    maxsendmarkers = repo.ui.configint("commitcloud", "maxsendmarkers", 500)
    if maxsendmarkers >= 0 and len(markers) > maxsendmarkers:
        # Sending too many markers is unlikely to work.  Just send the most recent
        # ones.
        markers = markers[-maxsendmarkers:]
    return markers
Beispiel #11
0
 def lock(self):
     return lockmod.lock(
         self._repo.localvfs, self._vfspath + ".lock", ui=self._repo.ui
     )