Ejemplo n.º 1
0
def bisectmsg(repo, ui):
    msg = _("To mark the changeset good:    hg bisect --good\n"
            "To mark the changeset bad:     hg bisect --bad\n"
            "To abort:                      hg bisect --reset\n")

    state = hbisect.load_state(repo)
    bisectstatus = _(
        """Current bisect state: {} good commit(s), {} bad commit(s), {} skip commit(s)"""
    ).format(len(state["good"]), len(state["bad"]), len(state["skip"]))
    ui.write_err(prefixlines(bisectstatus))

    if len(state["good"]) > 0 and len(state["bad"]) > 0:
        try:
            nodes, commitsremaining, searching, badnode, goodnode = hbisect.bisect(
                repo, state)
            searchesremaining = (int(math.ceil(math.log(commitsremaining, 2)))
                                 if commitsremaining > 0 else 0)
            bisectstatus = _("""
Current Tracker: bad commit     current        good commit
                 {}...{}...{}
Commits remaining:           {}
Estimated bisects remaining: {}
""").format(
                nodeutil.short(badnode),
                nodeutil.short(nodes[0]),
                nodeutil.short(goodnode),
                commitsremaining,
                searchesremaining,
            )

            ui.write_err(prefixlines(bisectstatus))
        except Abort:
            # ignore the output if bisect() fails
            pass
    ui.warn(prefixlines(msg))
Ejemplo n.º 2
0
def debugremotefilelog(ui, path, **opts):
    decompress = opts.get("decompress")

    size, firstnode, mapping = parsefileblob(path, decompress)
    filename = None
    filenamepath = os.path.join(os.path.dirname(path), "filename")
    if os.path.exists(filenamepath):
        with open(filenamepath, "rb") as f:
            filename = f.read()

    ui.status(_("size: %s bytes\n") % size)
    ui.status(_("path: %s \n") % path)
    ui.status(_("key: %s \n") % (short(firstnode)))
    if filename is not None:
        ui.status(_("filename: %s \n") % filename)
    ui.status(_("\n"))
    ui.status(
        _("%12s => %12s %13s %13s %12s\n") %
        ("node", "p1", "p2", "linknode", "copyfrom"))

    queue = [firstnode]
    while queue:
        node = queue.pop(0)
        p1, p2, linknode, copyfrom = mapping[node]
        ui.status(
            _("%s => %s  %s  %s  %s\n") %
            (short(node), short(p1), short(p2), short(linknode), copyfrom))
        if p1 != nullid:
            queue.append(p1)
        if p2 != nullid:
            queue.append(p2)
Ejemplo n.º 3
0
def _sanitycheck(ui, nodes, bases):
    """
    Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
    mapping of node->base):

    - Each deltabase must itself be a node elsewhere in the pack
    - There must be no cycles
    """
    failures = 0
    for node in nodes:
        seen = set()
        current = node
        deltabase = bases[current]

        while deltabase != nullid:
            if deltabase not in nodes:
                ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" %
                         (short(node), short(deltabase))))
                failures += 1
                break

            if deltabase in seen:
                ui.warn(("Bad entry: %s has a cycle (at %s)\n" %
                         (short(node), short(deltabase))))
                failures += 1
                break

            current = deltabase
            seen.add(current)
            deltabase = bases[current]
        # Since ``node`` begins a valid chain, reset/memoize its base to nullid
        # so we don't traverse it again.
        bases[node] = nullid
    return failures
Ejemplo n.º 4
0
def _printupdatednode(repo, oldnode, newnodes):
    # oldnode was not updated if newnodes is an iterable
    if len(newnodes) == 1:
        newnode = newnodes[0]
        firstline = encoding.trim(repo[newnode].description().split("\n")[0],
                                  50, "...")
        repo.ui.status(
            _('%s -> %s "%s"\n') % (short(oldnode), short(newnode), firstline))
Ejemplo n.º 5
0
 def _logchange(self, oldheads, newheads):
     newheads = set(newheads)
     oldheads = set(oldheads)
     addedheads = newheads - oldheads
     removedheads = oldheads - newheads
     util.log(
         "visibility",
         "removed %s heads [%s]; added %s heads [%s]\n",
         len(removedheads),
         ", ".join(node.short(n) for n in removedheads),
         len(addedheads),
         ", ".join(node.short(n) for n in addedheads),
     )
Ejemplo n.º 6
0
def progresspulling(repo, heads):
    if len(heads) == 1:
        msg = "pulling %s" % nodemod.short(heads[0])
    else:
        msg = "pulling %d new heads" % len(heads)
    hexheads = [nodemod.hex(head) for head in heads]
    progress(repo, msg, pulling=hexheads)
Ejemplo n.º 7
0
def sigwalk(repo):
    """
    walk over every sigs, yields a couple
    ((node, version, sig), (filename, linenumber))
    """
    def parsefile(fileiter, context):
        ln = 1
        for l in fileiter:
            if not l:
                continue
            yield (l.split(" ", 2), (context, ln))
            ln += 1

    # read the heads
    fl = repo.file(".hgsigs")
    for r in reversed(fl.heads()):
        fn = ".hgsigs|%s" % hgnode.short(r)
        for item in parsefile(fl.read(r).splitlines(), fn):
            yield item
    try:
        # read local signatures
        fn = "localsigs"
        for item in parsefile(repo.localvfs(fn), fn):
            yield item
    except IOError:
        pass
Ejemplo n.º 8
0
 def _logheads(self, op, **opts):
     util.log(
         "visibility", "%s %d heads: %s%s\n", op, len(self.heads),
         ", ".join(
             node.short(h)
             for h in reversed(self.heads[-self.LOGHEADLIMIT:])),
         ", ..." if len(self.heads) > self.LOGHEADLIMIT else "", **opts)
Ejemplo n.º 9
0
 def _checkoutlinelogwithedits(self):
     """() -> [str]. prompt all lines for edit"""
     alllines = self.linelog.getalllines()
     # header
     editortext = (
         _(
             'HG: editing %s\nHG: "y" means the line to the right '
             "exists in the changeset to the top\nHG:\n"
         )
         % self.fctxs[-1].path()
     )
     # [(idx, fctx)]. hide the dummy emptyfilecontext
     visiblefctxs = [
         (i, f)
         for i, f in enumerate(self.fctxs)
         if not isinstance(f, emptyfilecontext)
     ]
     for i, (j, f) in enumerate(visiblefctxs):
         editortext += _("HG: %s/%s %s %s\n") % (
             "|" * i,
             "-" * (len(visiblefctxs) - i + 1),
             node.short(f.node()),
             f.description().split("\n", 1)[0],
         )
     editortext += _("HG: %s\n") % ("|" * len(visiblefctxs))
     # figure out the lifetime of a line, this is relatively inefficient,
     # but probably fine
     lineset = defaultdict(lambda: set())  # {(llrev, linenum): {llrev}}
     for i, f in visiblefctxs:
         self.linelog.annotate((i + 1) * 2)
         for l in self.linelog.annotateresult:
             lineset[l].add(i)
     # append lines
     for l in alllines:
         editortext += "    %s : %s" % (
             "".join([("y" if i in lineset[l] else " ") for i, _f in visiblefctxs]),
             decodeutf8(self._getline(l)),
         )
     # run editor
     editedtext = self.ui.edit(editortext, "", action="absorb")
     if not editedtext:
         raise error.Abort(_("empty editor text"))
     # parse edited result
     contents = [b"" for i in self.fctxs]
     leftpadpos = 4
     colonpos = leftpadpos + len(visiblefctxs) + 1
     for l in editedtext.splitlines(True):
         if l.startswith("HG:"):
             continue
         if l[colonpos - 1 : colonpos + 2] != " : ":
             raise error.Abort(_("malformed line: %s") % l)
         linecontent = encodeutf8(l[colonpos + 2 :])
         for i, ch in enumerate(l[leftpadpos : colonpos - 1]):
             if ch == "y":
                 contents[visiblefctxs[i][0]] += linecontent
     # chunkstats is hard to calculate if anything changes, therefore
     # set them to just a simple value (1, 1).
     if editedtext != editortext:
         self.chunkstats = [1, 1]
     return contents
Ejemplo n.º 10
0
 def _verifynodeconstraints(self, prev, expected, seen):
     if self.node in expected:
         msg = _('%s "%s" changeset was an edited list candidate')
         raise error.ParseError(
             msg % (self.verb, node.short(self.node)),
             hint=_("graft must only use unlisted changesets"),
         )
Ejemplo n.º 11
0
def _smartlog(ui, repo, *pats, **opts):
    masterfallback = "interestingmaster()"

    masterstring = (opts.get("master") or ui.config("smartlog", "master")
                    or masterfallback)

    masterrev = repo.anyrevs([masterstring], user=True).first()
    revs = getrevs(ui, repo, masterstring, **opts)

    if -1 in revs:
        revs.remove(-1)

    if len(revs) == 0:
        return

    # Print it!
    template = opts.get("template") or ""
    revdag, reserved = getdag(ui, repo, sorted(revs), masterrev, template)
    displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
    ui.pager("smartlog")
    cmdutil.displaygraph(ui, repo, revdag, displayer, reserved=reserved)

    try:
        with open(repo.localvfs.join("completionhints"), "w+") as f:
            for rev in revdag:
                commit_hash = rev[2].node()
                f.write(nodemod.short(commit_hash) + "\n")
    except IOError:
        # No write access. No big deal.
        pass
Ejemplo n.º 12
0
def progressbackingup(repo, nodes):
    if len(nodes) == 1:
        msg = "backing up %s" % nodemod.short(nodes[0])
    else:
        msg = "backing up %d commits" % len(nodes)
    hexnodes = [nodemod.hex(node) for node in nodes]
    progress(repo, msg, backingup=hexnodes)
Ejemplo n.º 13
0
def editmessages(repo, revs):
    """Invoke editor to edit messages in batch. Return {node: new message}"""
    nodebanners = []
    editortext = ""

    for rev in revs:
        ctx = repo[rev]
        message = ctx.description()
        short = nodemod.short(ctx.node())
        bannerstart = cmdutil.hgprefix(_("Begin of commit %s") % short)
        bannerend = cmdutil.hgprefix(_("End of commit %s") % short)
        nodebanners.append((ctx.node(), bannerstart, bannerend))
        if editortext:
            editortext += cmdutil.hgprefix("-" * 77) + "\n"
        else:
            editortext += (cmdutil.hgprefix(
                _("Editing %s commits in batch. Do not change lines starting with 'HG:'."
                  ) % len(revs)) + "\n")

        editortext += "%s\n%s\n%s\n" % (bannerstart, message, bannerend)

    result = {}
    ui = repo.ui
    newtext = ui.edit(editortext,
                      ui.username(),
                      action="metaedit",
                      repopath=repo.path)
    for node, bannerstart, bannerend in nodebanners:
        if bannerstart in newtext and bannerend in newtext:
            newmessage = newtext.split(bannerstart, 1)[1].split(bannerend,
                                                                1)[0]
            result[node] = newmessage

    return result
Ejemplo n.º 14
0
def _commitworkingcopychanges(ui, repo, opts, tmpwctx):
    """Temporarily commit working copy changes before moving unshelve commit"""
    # Store pending changes in a commit and remember added in case a shelve
    # contains unknown files that are part of the pending change
    s = repo.status()
    addedbefore = frozenset(s.added)
    if not (s.modified or s.added or s.removed):
        return tmpwctx, addedbefore
    ui.status(
        _(
            "temporarily committing pending changes "
            "(restore with 'hg unshelve --abort')\n"
        )
    )
    commitfunc = getcommitfunc(extra=None, interactive=False, editor=False)
    tempopts = {}
    tempopts["message"] = "pending changes temporary commit"
    tempopts["date"] = opts.get("date")
    with ui.configoverride({("ui", "quiet"): True}):
        node = cmdutil.commit(ui, repo, commitfunc, [], tempopts)
    tmpwctx = repo[node]
    ui.debug(
        "temporary working copy commit: %s:%s\n" % (tmpwctx.rev(), nodemod.short(node))
    )
    return tmpwctx, addedbefore
Ejemplo n.º 15
0
def _smartlog(ui, repo, *pats, **opts):
    if opts.get("rev"):
        masterfallback = "null"
    else:
        masterfallback = "interestingmaster()"

    masterstring = (opts.get("master") or ui.config("smartlog", "master")
                    or masterfallback)

    masterrev = repo.anyrevs([masterstring], user=True).first()
    revs = getrevs(ui, repo, masterstring, **opts)

    if -1 in revs:
        revs.remove(-1)

    if len(revs) == 0:
        return

    # Print it!
    revdag, reserved = getdag(ui, repo.unfiltered(),
                              sorted(revs, reverse=True), masterrev)
    displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
    ui.pager("smartlog")
    if ui.config("experimental", "graph.renderer") == "legacy":
        overrides = {}
        if ui.config("experimental", "graphstyle.grandparent", "2.") == "|":
            overrides[("experimental", "graphstyle.grandparent")] = "2."
        with ui.configoverride(overrides, "smartlog"):
            if reserved:
                for prev in reserved:
                    addfakerev(revdag, prev)
            cmdutil.displaygraph(ui, repo, revdag, displayer,
                                 graphmod.asciiedges, None, None)
    else:
        cmdutil.rustdisplaygraph(ui,
                                 repo,
                                 revdag,
                                 displayer,
                                 reserved=reserved)

    try:
        with open(repo.localvfs.join("completionhints"), "w+") as f:
            for rev in revdag:
                commit_hash = rev[2].node()
                # Skip fakectxt nodes
                if commit_hash != "...":
                    f.write(nodemod.short(commit_hash) + "\n")
    except IOError:
        # No write access. No big deal.
        pass

    global hiddenchanges
    if hiddenchanges:
        ui.warn(
            _("hiding %s old heads without bookmarks\n") % hiddenchanges,
            notice=_("note"),
        )
        ui.warn(_("(use --all to see them)\n"))
Ejemplo n.º 16
0
def _findprevtarget(ui, repo, n=None, bookmark=False, newest=False):
    """Get the revision n levels down the stack from the current revision.
    If newest is True, if a changeset has multiple parents the newest
    will always be chosen. Otherwise, throws an exception.
    """
    ctx = repo["."]

    # The caller must specify a stopping condition -- either a number
    # of steps to walk or a bookmark to search for.
    if not n and not bookmark:
        raise error.Abort(_("no stop condition specified"))

    for i in count(0):
        # Loop until we're gone the desired number of steps, or we reach a
        # node with a bookmark if the bookmark option was specified.
        if bookmark:
            if i > 0 and ctx.bookmarks():
                break
        elif i >= n:
            break

        parents = ctx.parents()

        # Is this the root of the current branch?
        if not parents or parents[0].rev() == nullrev:
            if ctx.rev() == repo["."].rev():
                raise error.Abort(_("current changeset has no parents"))
            ui.status(_("reached root changeset\n"))
            break

        # Are there multiple parents?
        if len(parents) > 1 and not newest:
            ui.status(
                _("changeset %s has multiple parents, namely:\n") % short(ctx.node())
            )
            parents = _showchangesets(
                ui, repo, contexts=parents, indices=ui.interactive()
            )
            if ui.interactive():
                ctx = _choosenode(ui, parents)
            else:
                raise error.Abort(
                    _("ambiguous previous changeset"),
                    hint=_(
                        "use the --newest flag to always "
                        "pick the newest parent at each step"
                    ),
                )
        else:
            # Get the parent with the highest revision number.
            ctx = max(parents, key=lambda x: x.rev())

    return ctx.node()
Ejemplo n.º 17
0
def snapshot(ui, repo, files, node, tmproot):
    """snapshot files as of some revision
    if not using snapshot, -I/-X does not work and recursive diff
    in tools like kdiff3 and meld displays too many files."""
    dirname = os.path.basename(repo.root)
    if dirname == "":
        dirname = "root"
    if node is not None:
        dirname = "%s.%s" % (dirname, short(node))
    base = os.path.join(tmproot, dirname)
    os.mkdir(base)
    fnsandstat = []

    if node is not None:
        ui.note(
            _("making snapshot of %d files from rev %s\n") %
            (len(files), short(node)))
    else:
        ui.note(
            _("making snapshot of %d files from working directory\n") %
            (len(files)))

    if files:
        repo.ui.setconfig("ui", "archivemeta", False)

        archival.archive(repo,
                         base,
                         node,
                         "files",
                         matchfn=scmutil.matchfiles(repo, files))

        for fn in sorted(files):
            wfn = util.pconvert(fn)
            ui.note("  %s\n" % wfn)

            if node is None:
                dest = os.path.join(base, wfn)

                fnsandstat.append((dest, repo.wjoin(fn), util.lstat(dest)))
    return dirname, fnsandstat
Ejemplo n.º 18
0
def sigcheck(ui, repo, rev):
    """verify all the signatures there may be for a particular revision"""
    mygpg = newgpg(ui)
    rev = repo.lookup(rev)
    hexrev = hgnode.hex(rev)
    keys = []

    for data, context in sigwalk(repo):
        node, version, sig = data
        if node == hexrev:
            k = getkeys(ui, repo, mygpg, data, context)
            if k:
                keys.extend(k)

    if not keys:
        ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
        return

    # print summary
    ui.write(_("%s is signed by:\n") % hgnode.short(rev))
    for key in keys:
        ui.write(" %s\n" % keystr(ui, key))
Ejemplo n.º 19
0
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
    halt = False
    seen = set()
    # we try to walk changesets in reverse order from newest to
    # oldest, so that if we see a file multiple times, we take the
    # newest version as canonical. this prevents us from blocking a
    # changegroup that contains an unacceptable commit followed later
    # by a commit that fixes the problem.
    tip = repo["tip"]
    for rev in range(len(repo) - 1, repo[node].rev() - 1, -1):
        c = repo[rev]
        for f in c.files():
            if f in seen or f not in tip or f not in c:
                continue
            seen.add(f)
            data = c[f].data()
            if not util.binary(data) and newline in data:
                if not halt:
                    ui.warn(
                        _(
                            "attempt to commit or push text file(s) "
                            "using %s line endings\n"
                        )
                        % newlinestr[newline]
                    )
                ui.warn(_("in %s: %s\n") % (short(c.node()), f))
                halt = True
    if halt and hooktype == "pretxnchangegroup":
        crlf = newlinestr[newline].lower()
        filter = filterstr[newline]
        ui.warn(
            _(
                "\nTo prevent this mistake in your local repository,\n"
                "add to Mercurial.ini or .hg/hgrc:\n"
                "\n"
                "[hooks]\n"
                "pretxncommit.%s = python:hgext.win32text.forbid%s\n"
                "\n"
                "and also consider adding:\n"
                "\n"
                "[extensions]\n"
                "win32text =\n"
                "[encode]\n"
                "** = %sencode:\n"
                "[decode]\n"
                "** = %sdecode:\n"
            )
            % (crlf, crlf, filter, filter)
        )
    return halt
Ejemplo n.º 20
0
 def _filterederror(orig, repo, rev):
     # If the number is beyond the changelog, it's a short hash that
     # just happened to be a number.
     intrev = None
     try:
         intrev = int(rev)
     except ValueError:
         pass
     if intrev is not None and intrev < len(repo):
         node = repo.unfiltered()[rev].node()
         shorthash = short(node)
         msg = msgfmt.format(shorthash)
         hint = hintfmt and hintfmt.format(shorthash)
         return error.FilteredRepoLookupError(msg, hint=hint)
     return orig(repo, rev)
Ejemplo n.º 21
0
def pointersfromctx(ctx):
    """return a dict {path: pointer} for given single changectx"""
    result = util.sortdict()
    for f in ctx.files():
        if f not in ctx:
            continue
        fctx = ctx[f]
        if not _islfs(fctx.filelog(), fctx.filenode()):
            continue
        try:
            result[f] = pointer.deserialize(fctx.rawdata())
        except pointer.InvalidPointer as ex:
            raise error.Abort(
                _("lfs: corrupted pointer (%s@%s): %s\n") %
                (f, short(ctx.node()), ex))
    return result
Ejemplo n.º 22
0
    def apply(self):
        """apply self.fixups. update self.linelog, self.finalcontents.

        call this only once, before getfinalcontent(), after diffwith().
        """
        # the following is unnecessary, as it's done by "diffwith":
        #   self.linelog.annotate(self.linelog.maxrev)
        for rev, a1, a2, b1, b2 in reversed(self.fixups):
            blines = self.targetlines[b1:b2]
            if self.ui.debugflag:
                idx = (max(rev - 1, 0)) // 2
                self.ui.write(
                    _("%s: chunk %d:%d -> %d lines\n") %
                    (node.short(self.fctxs[idx].node()), a1, a2, len(blines)))
            self.linelog.replacelines(rev, a1, a2, b1, b2)
        if self.opts.get("edit_lines", False):
            self.finalcontents = self._checkoutlinelogwithedits()
        else:
            self.finalcontents = self._checkoutlinelog()
Ejemplo n.º 23
0
def savepinnednodes(repo, newpin, newunpin, fullargs):
    # take a narrowed lock so it does not affect repo lock
    with extutil.flock(repo.svfs.join("obsinhibit.lock"), "save pinned nodes"):
        orignodes = loadpinnednodes(repo)
        nodes = set(orignodes)
        nodes |= set(newpin)
        nodes -= set(newunpin)
        with util.atomictempfile(repo.svfs.join("obsinhibit")) as f:
            f.write("".join(nodes))

        desc = lambda s: [short(n) for n in s]
        repo.ui.log(
            "pinnednodes",
            "pinnednodes: %r newpin=%r newunpin=%r "
            "before=%r after=%r\n",
            fullargs,
            desc(newpin),
            desc(newunpin),
            desc(orignodes),
            desc(nodes),
        )
Ejemplo n.º 24
0
def _maybeupdateworkingcopy(repo, currentnode):
    ui = repo.ui

    if repo["."].node() != currentnode:
        return 0

    successors = list(repo.nodes("successors(%n) - obsolete()", currentnode))

    if len(successors) == 0:
        return 0

    if len(successors) == 1:
        destination = successors[0]
        if destination not in repo or destination == currentnode:
            return 0
        ui.status(
            _("current revision %s has been moved remotely to %s\n")
            % (nodemod.short(currentnode), nodemod.short(destination)),
            component="commitcloud",
        )
        if ui.configbool("commitcloud", "updateonmove"):
            if repo[destination].mutable():
                backuplock.progress(
                    repo,
                    "updating %s from %s to %s"
                    % (
                        repo.wvfs.base,
                        nodemod.short(currentnode),
                        nodemod.short(destination),
                    ),
                )
                ui.status(_("updating to %s\n") % nodemod.short(destination))
                with repo.wlock(), repo.lock(), repo.transaction("sync-checkout"):
                    return hg.updatetotally(
                        ui, repo, destination, destination, updatecheck="noconflict"
                    )
        else:
            hintutil.trigger("commitcloud-update-on-move")
    else:
        ui.status(
            _(
                "current revision %s has been replaced remotely with multiple revisions\n"
                "(run 'hg update HASH' to go to the desired revision)\n"
            )
            % nodemod.short(currentnode),
            component="commitcloud",
        )
    return 0
Ejemplo n.º 25
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    unfi = repo
    newheads = [head for head in cloudrefs.heads if head not in unfi]
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head
            for head in newheads
            if head in cloudrefs.headdates and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            repo.ui.status(_("omitting heads that are older than %d days:\n") % maxage)
            for head in omittedheads:
                headdatestr = util.datestr(util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = []
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [head for head in cloudrefs.heads if head not in omittedheads]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head
                for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(
                oldvisibleheads + cloudheads + localheads
            )
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [head for head in newvisibleheads if head not in toremove]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates, remotebookmarknewnodes) = _processremotebookmarks(
            repo, cloudrefs.remotebookmarks, lastsyncstate
        )

    try:
        snapshot = extensions.find("snapshot")
    except KeyError:
        snapshot = None
        addedsnapshots = []
        removedsnapshots = []
        newsnapshots = lastsyncstate.snapshots
    else:
        addedsnapshots = [
            s for s in cloudrefs.snapshots if s not in lastsyncstate.snapshots
        ]
        removedsnapshots = [
            s for s in lastsyncstate.snapshots if s not in cloudrefs.snapshots
        ]
        newsnapshots = cloudrefs.snapshots
        newheads += addedsnapshots

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(
            list(remotebookmarknewnodes) + newheads, cloudrefs.headdates
        )
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate)
    )

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        newremotebookmarks, omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates
        )

    if snapshot:
        with repo.lock(), repo.transaction("sync-snapshots") as tr:
            repo.snapshotlist.update(
                tr, addnodes=addedsnapshots, removenodes=removedsnapshots
            )

    _mergeobsmarkers(repo, tr, cloudrefs.obsmarkers)

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo, [nodemod.bin(n) for n in newvisibleheads])

    # Obsmarker sharing is unreliable.  Some of the commits that should now
    # be visible might be hidden still, and some commits that should be
    # hidden might still be visible.  Create local obsmarkers to resolve
    # this.
    if obsolete.isenabled(repo, obsolete.createmarkersopt) and not repo.ui.configbool(
        "mutation", "proxy-obsstore"
    ):
        unfi = repo
        # Commits that are only visible in the cloud are commits that are
        # ancestors of the cloud heads but are hidden locally.
        cloudvisibleonly = list(
            unfi.set(
                "not public() & ::%ls & hidden()",
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        # Commits that are only hidden in the cloud are commits that are
        # ancestors of the previous cloud heads that are not ancestors of the
        # current cloud heads, but have not been hidden or obsoleted locally.
        cloudhiddenonly = list(
            unfi.set(
                "(not public() & ::%ls) - (not public() & ::%ls) - hidden() - obsolete()",
                [
                    head
                    for head in lastsyncstate.heads
                    if head not in lastsyncstate.omittedheads
                ],
                [head for head in cloudrefs.heads if head not in omittedheads],
            )
        )
        if cloudvisibleonly or cloudhiddenonly:
            msg = _(
                "detected obsmarker inconsistency (fixing by obsoleting [%s] and reviving [%s])\n"
            ) % (
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudhiddenonly]),
                ", ".join([nodemod.short(ctx.node()) for ctx in cloudvisibleonly]),
            )
            repo.ui.log("commitcloud_sync", msg)
            repo.ui.warn(msg)
            repo._commitcloudskippendingobsmarkers = True
            with repo.lock():
                obsolete.createmarkers(repo, [(ctx, ()) for ctx in cloudhiddenonly])
                obsolete.revive(cloudvisibleonly)
            repo._commitcloudskippendingobsmarkers = False

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        newsnapshots,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=omittedheads,
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
        newsnapshots=newsnapshots,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Ejemplo n.º 26
0
def _sync(
    repo,
    cloudrefs=None,
    full=False,
    cloudversion=None,
    cloudworkspace=None,
    connect_opts=None,
    dest=None,
):
    ui = repo.ui
    start = util.timer()

    startnode = repo["."].node()

    if full:
        maxage = None
    else:
        maxage = ui.configint("commitcloud", "max_sync_age", None)

    # Work out which repo and workspace we are synchronizing with.
    reponame = ccutil.getreponame(repo)
    workspacename = workspace.currentworkspace(repo)
    if workspacename is None:
        raise ccerror.WorkspaceError(ui, _("undefined workspace"))

    # External services may know the workspacename to trigger the sync
    if cloudworkspace and workspacename != cloudworkspace:
        ui.status(_("current workspace is different than the workspace to sync\n"))
        return (1, None)

    # Connect to the commit cloud service.
    tokenlocator = token.TokenLocator(ui)
    serv = service.get(ui, tokenlocator.token)

    ui.status(
        _("synchronizing '%s' with '%s'\n") % (reponame, workspacename),
        component="commitcloud",
    )
    backuplock.progress(repo, "starting synchronizing with '%s'" % workspacename)

    # Work out what version to fetch updates from.
    lastsyncstate = syncstate.SyncState(repo, workspacename)
    fetchversion = lastsyncstate.version
    if maxage != lastsyncstate.maxage:
        # We are doing a full sync, or maxage has changed since the last sync,
        # so get a fresh copy of the full state.
        fetchversion = 0

    # External services may already know the version number.  Check if we're
    # already up-to-date.
    if cloudversion is not None and cloudversion <= lastsyncstate.version:
        ui.status(
            _("this version has been already synchronized\n"), component="commitcloud"
        )
        # It's possible that we have two cloud syncs for the same repo - one for edenfs backing repo
        # another is for edenfs checkout. If edenfs backing repo sync runs first then it will sync
        # all the commits and bookmarks but it won't move working copy of the checkout.
        # The line below makes sure that working copy is updated.
        return _maybeupdateworkingcopy(repo, startnode), None

    backupsnapshots = False
    try:
        extensions.find("snapshot")
        backupsnapshots = True
    except KeyError:
        pass

    origheads = _getheads(repo)
    origbookmarks = _getbookmarks(repo)

    readonly = not origheads and not origbookmarks
    remotepath = (
        ccutil.getremotereadpath(repo, dest)
        if readonly
        else ccutil.getremotepath(repo, dest)
    )
    getconnection = lambda: repo.connectionpool.get(
        remotepath, connect_opts, reason="cloudsync"
    )

    # Back up all local commits that are not already backed up.
    # Load the backup state under the repo lock to ensure a consistent view.
    with repo.lock():
        state = backupstate.BackupState(repo, remotepath)
    backedup, failed = backup._backup(
        repo, state, remotepath, getconnection, backupsnapshots=backupsnapshots
    )

    # Now that commits are backed up, check that visibleheads are enabled
    # locally, and only sync if visibleheads is enabled.
    # developer config: commitcloud.requirevisibleheads
    if repo.ui.configbool("commitcloud", "requirevisibleheads", True):
        if not visibility.enabled(repo):
            raise error.Abort(_("commit cloud sync requires new-style visibility"))

    # On cloud rejoin we already know what the cloudrefs are.  Otherwise,
    # fetch them from the commit cloud service.
    if cloudrefs is None:
        cloudrefs = serv.getreferences(reponame, workspacename, fetchversion)

    with repo.ui.configoverride(
        {("treemanifest", "prefetchdraftparents"): False}, "cloudsync"
    ), repo.wlock(), repo.lock():

        if origheads != _getheads(repo) or origbookmarks != _getbookmarks(repo):
            # Another transaction changed the repository while we were backing
            # up commits. This may have introduced new commits that also need
            # backing up.  That transaction should have started its own sync
            # process, so give up on this sync, and let the later one perform
            # the sync.
            raise ccerror.SynchronizationError(ui, _("repo changed while backing up"))

        synced = False
        attempt = 0
        while not synced:

            if attempt >= 3:
                raise ccerror.SynchronizationError(
                    ui, _("failed to sync after %s attempts") % attempt
                )
            attempt += 1

            with repo.transaction("cloudsync") as tr:

                # Apply any changes from the cloud to the local repo.
                if cloudrefs.version != fetchversion:
                    _applycloudchanges(
                        repo, remotepath, lastsyncstate, cloudrefs, maxage, state, tr
                    )
                elif (
                    _isremotebookmarkssyncenabled(repo.ui)
                    and not lastsyncstate.remotebookmarks
                ):
                    # We're up-to-date, but didn't sync remote bookmarks last time.
                    # Sync them now.
                    cloudrefs = serv.getreferences(reponame, workspacename, 0)
                    _forcesyncremotebookmarks(
                        repo, cloudrefs, lastsyncstate, remotepath, tr
                    )

                # Check if any omissions are now included in the repo
                _checkomissions(repo, remotepath, lastsyncstate, tr)

            # We committed the transaction so that data downloaded from the cloud is
            # committed.  Start a new transaction for uploading the local changes.
            with repo.transaction("cloudsync") as tr:

                # Send updates to the cloud.  If this fails then we have lost the race
                # to update the server and must start again.
                synced, cloudrefs = _submitlocalchanges(
                    repo, reponame, workspacename, lastsyncstate, failed, serv, tr
                )

    # Update the backup bookmarks with any changes we have made by syncing.
    backupbookmarks.pushbackupbookmarks(repo, remotepath, getconnection, state)

    backuplock.progresscomplete(repo)

    if failed:
        failedset = set(repo.nodes("%ld::", failed))
        if len(failedset) == 1:
            repo.ui.warn(
                _("failed to synchronize %s\n") % nodemod.short(failedset.pop()),
                component="commitcloud",
            )
        else:
            repo.ui.warn(
                _("failed to synchronize %d commits\n") % len(failedset),
                component="commitcloud",
            )
    else:
        ui.status(_("commits synchronized\n"), component="commitcloud")

    elapsed = util.timer() - start
    ui.status(_("finished in %0.2f sec\n") % elapsed)

    # Check that Scm Service is running and a subscription exists
    subscription.check(repo)

    return _maybeupdateworkingcopy(repo, startnode), synced and not failed
Ejemplo n.º 27
0
Archivo: hide.py Proyecto: leszfb/eden
def hide(ui, repo, *revs, **opts):
    """hide commits and their descendants

    Mark the specified commits as hidden. Hidden commits are not included in
    the output of most Mercurial commands, including :hg:`log` and
    :hg:`smartlog.` Any descendants of the specified commits will also be
    hidden.

    Hidden commits are not deleted. They will remain in the repo indefinitely
    and are still accessible by their hashes. However, :hg:`hide` will delete
    any bookmarks pointing to hidden commits.

    Use the :hg:`unhide` command to make hidden commits visible again. See
    :hg:`help unhide` for more information.

    To view hidden commits, run :hg:`journal`.

    When you hide the current commit, the most recent visible ancestor is
    checked out.

    To hide obsolete stacks (stacks that have a newer version), run
    :hg:`hide --cleanup`. This command is equivalent to:

    :hg:`hide 'obsolete() - ancestors(draft() & not obsolete())'`

    --cleanup skips obsolete commits with non-obsolete descendants.
    """
    if opts.get("cleanup") and len(opts.get("rev") + list(revs)) != 0:
        raise error.Abort(_("--rev and --cleanup are incompatible"))
    elif opts.get("cleanup"):
        # hides all the draft, obsolete commits that
        # don't have non-obsolete descendants
        revs = ["obsolete() - (draft() & ::(draft() & not obsolete()))"]
    else:
        revs = list(revs) + opts.pop("rev", [])

    with repo.wlock(), repo.lock(), repo.transaction("hide") as tr:
        revs = repo.revs("(%ld)::", scmutil.revrange(repo, revs))

        bookmarks = set(opts.get("bookmark", ()))
        if bookmarks:
            revs += bookmarksmod.reachablerevs(repo, bookmarks)
            if not revs:
                # No revs are reachable exclusively from these bookmarks, just
                # delete the bookmarks.
                if not ui.quiet:
                    for bookmark in sorted(bookmarks):
                        ui.status(
                            _("removing bookmark '%s' (was at: %s)\n")
                            % (bookmark, short(repo._bookmarks[bookmark]))
                        )
                bookmarksmod.delete(repo, tr, bookmarks)
                ui.status(
                    _n(
                        "%i bookmark removed\n",
                        "%i bookmarks removed\n",
                        len(bookmarks),
                    )
                    % len(bookmarks)
                )
                return 0

        if not revs:
            raise error.Abort(_("nothing to hide"))

        hidectxs = [repo[r] for r in revs]

        # revs to be hidden
        for ctx in hidectxs:
            if not ctx.mutable():
                raise error.Abort(
                    _("cannot hide immutable changeset: %s") % ctx,
                    hint="see 'hg help phases' for details",
                )
            if not ui.quiet:
                ui.status(
                    _('hiding commit %s "%s"\n')
                    % (ctx, ctx.description().split("\n")[0][:50])
                )

        wdp = repo["."]
        newnode = wdp

        while newnode in hidectxs:
            newnode = newnode.parents()[0]

        if newnode.node() != wdp.node():
            cmdutil.bailifchanged(repo, merge=False)
            hg.update(repo, newnode, False)
            ui.status(
                _("working directory now at %s\n") % ui.label(str(newnode), "node")
            )

        # create markers
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            obsolete.createmarkers(repo, [(r, []) for r in hidectxs], operation="hide")
        visibility.remove(repo, [c.node() for c in hidectxs])
        ui.status(
            _n("%i changeset hidden\n", "%i changesets hidden\n", len(hidectxs))
            % len(hidectxs)
        )

        # remove bookmarks pointing to hidden changesets
        hnodes = [r.node() for r in hidectxs]
        deletebookmarks = set(bookmarks)
        for bookmark, node in sorted(bookmarksmod.listbinbookmarks(repo)):
            if node in hnodes:
                deletebookmarks.add(bookmark)
        if deletebookmarks:
            for bookmark in sorted(deletebookmarks):
                if not ui.quiet:
                    ui.status(
                        _('removing bookmark "%s (was at: %s)"\n')
                        % (bookmark, short(repo._bookmarks[bookmark]))
                    )
            bookmarksmod.delete(repo, tr, deletebookmarks)
            ui.status(
                _n(
                    "%i bookmark removed\n",
                    "%i bookmarks removed\n",
                    len(deletebookmarks),
                )
                % len(deletebookmarks)
            )
        hintutil.trigger("undo")
Ejemplo n.º 28
0
def fold(ui, repo, *revs, **opts):
    """combine multiple commits into a single commit

    With --from, folds all the revisions linearly between the current revision
    and the specified revision.

    With --exact, folds only the specified revisions while ignoring the revision
    currently checked out. The given revisions must form a linear unbroken
    chain.

    .. container:: verbose

     Some examples:

     - Fold from the current revision to its parent::

         hg fold --from .^

     - Fold all draft revisions into the current revision::

         hg fold --from 'draft()'

       See :hg:`help phases` for more about draft revisions and
       :hg:`help revsets` for more about the `draft()` keyword

     - Fold revisions between 3 and 6 into the current revision::

         hg fold --from 3::6

     - Fold revisions 3 and 4:

        hg fold "3 + 4" --exact

     - Only fold revisions linearly between foo and @::

         hg fold foo::@ --exact
    """
    revs = list(revs)
    revs.extend(opts["rev"])
    if not revs:
        raise error.Abort(_("no revisions specified"))

    revs = scmutil.revrange(repo, revs)

    if opts.get("no_rebase"):
        torebase = ()
    else:
        torebase = repo.revs("descendants(%ld) - (%ld)", revs, revs)

    if opts["from"] and opts["exact"]:
        raise error.Abort(_("cannot use both --from and --exact"))
    elif opts["from"]:
        # Try to extend given revision starting from the working directory
        extrevs = repo.revs("(%ld::.) or (.::%ld)", revs, revs)
        discardedrevs = [r for r in revs if r not in extrevs]
        if discardedrevs:
            msg = _("cannot fold non-linear revisions")
            hint = _("given revisions are unrelated to parent of working"
                     " directory")
            raise error.Abort(msg, hint=hint)
        revs = extrevs
    elif opts["exact"]:
        # Nothing to do; "revs" is already set correctly
        pass
    else:
        raise error.Abort(_("must specify either --from or --exact"))

    if not revs:
        raise error.Abort(
            _("specified revisions evaluate to an empty set"),
            hint=_("use different revision arguments"),
        )
    elif len(revs) == 1:
        ui.write_err(_("single revision specified, nothing to fold\n"))
        return 1

    with repo.wlock(), repo.lock(), ui.formatter("fold", opts) as fm:
        fm.startitem()
        root, head = _foldcheck(repo, revs)

        with repo.transaction("fold") as tr:
            commitopts = opts.copy()
            allctx = [repo[r] for r in revs]
            targetphase = max(c.phase() for c in allctx)

            if (commitopts.get("message") or commitopts.get("logfile")
                    or commitopts.get("reuse_message")):
                commitopts["edit"] = False
            else:
                msgs = ["HG: This is a fold of %d changesets." % len(allctx)]
                msgs += [
                    "HG: Commit message of %s.\n\n%s\n" %
                    (node.short(c.node()), c.description()) for c in allctx
                ]
                commitopts["message"] = "\n".join(msgs)
                commitopts["edit"] = True

            newid, unusedvariable = common.rewrite(
                repo,
                root,
                allctx,
                head,
                [root.p1().node(), root.p2().node()],
                commitopts=commitopts,
                mutop="fold",
            )
            phases.retractboundary(repo, tr, targetphase, [newid])

            replacements = {ctx.node(): (newid, ) for ctx in allctx}
            nodechanges = {
                fm.hexfunc(ctx.node()): [fm.hexfunc(newid)]
                for ctx in allctx
            }
            fm.data(nodechanges=fm.formatdict(nodechanges))
            scmutil.cleanupnodes(repo, replacements, "fold")
            fm.condwrite(not ui.quiet, "count", "%i changesets folded\n",
                         len(revs))
            if repo["."].rev() in revs:
                hg.update(repo, newid)

            if torebase:
                common.restackonce(ui, repo, repo[newid].rev())
Ejemplo n.º 29
0
def _dosign(ui, repo, *revs, **opts):
    mygpg = newgpg(ui, **opts)
    sigver = "0"
    sigmessage = ""

    date = opts.get("date")
    if date:
        opts["date"] = util.parsedate(date)

    if revs:
        nodes = [repo.lookup(n) for n in revs]
    else:
        nodes = [
            node for node in repo.dirstate.parents() if node != hgnode.nullid
        ]
        if len(nodes) > 1:
            raise error.Abort(
                _("uncommitted merge - please provide a "
                  "specific revision"))
        if not nodes:
            nodes = [repo.changelog.tip()]

    for n in nodes:
        hexnode = hgnode.hex(n)
        ui.write(_("signing %s\n") % (hgnode.short(n)))
        # build data
        data = node2txt(repo, n, sigver)
        sig = mygpg.sign(data)
        if not sig:
            raise error.Abort(_("error while signing"))
        sig = binascii.b2a_base64(sig)
        sig = sig.replace("\n", "")
        sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)

    # write it
    if opts["local"]:
        repo.localvfs.append("localsigs", sigmessage)
        return

    if not opts["force"]:
        msigs = match.exact(repo.root, "", [".hgsigs"])
        if any(repo.status(match=msigs, unknown=True, ignored=True)):
            raise error.Abort(
                _("working copy of .hgsigs is changed "),
                hint=_("please commit .hgsigs manually"),
            )

    sigsfile = repo.wvfs(".hgsigs", "ab")
    sigsfile.write(sigmessage)
    sigsfile.close()

    if ".hgsigs" not in repo.dirstate:
        with repo.lock(), repo.transaction("add-signatures"):
            repo[None].add([".hgsigs"])

    if opts["no_commit"]:
        return

    message = opts["message"]
    if not message:
        # we don't translate commit messages
        message = "\n".join([
            "Added signature for changeset %s" % hgnode.short(n) for n in nodes
        ])
    try:
        editor = cmdutil.getcommiteditor(editform="gpg.sign", **opts)
        repo.commit(message,
                    opts["user"],
                    opts["date"],
                    match=msigs,
                    editor=editor)
    except ValueError as inst:
        raise error.Abort(str(inst))
Ejemplo n.º 30
0
 def _ctx2str(self, ctx):
     if self.ui.debugflag:
         return ctx.hex()
     else:
         return node.short(ctx.node())