Exemple #1
0
def sigs(ui, repo):
    """list signed changesets"""
    mygpg = newgpg(ui)
    revs = {}

    for data, context in sigwalk(repo):
        node, version, sig = data
        fn, ln = context
        try:
            n = repo.lookup(node)
        except KeyError:
            ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
            continue
        r = repo.changelog.rev(n)
        keys = getkeys(ui, repo, mygpg, data, context)
        if not keys:
            continue
        revs.setdefault(r, [])
        revs[r].extend(keys)
    for rev in sorted(revs, reverse=True):
        for k in revs[rev]:
            r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
            ui.write("%-30s %s\n" % (keystr(ui, k), r))
Exemple #2
0
def search(repo, diffid):
    """Perform a GraphQL query first. If it fails, fallback to local search.

    Returns (node, None) or (None, graphql_response) tuple.
    """

    repo.ui.debug("[diffrev] Starting graphql call\n")
    if repo.ui.configbool("phrevset", "graphqlonly"):
        return (None, graphqlgetdiff(repo, diffid))

    try:
        return (None, graphqlgetdiff(repo, diffid))
    except Exception as ex:
        repo.ui.warn(_("cannot resolve D%s via GraphQL: %s\n") % (diffid, ex))
        repo.ui.warn(_("falling back to search commits locally\n"))
        repo.ui.debug("[diffrev] Starting log walk\n")
        node = localgetdiff(repo, diffid)
        repo.ui.debug("[diffrev] Parallel log walk completed with %s\n" %
                      hex(node))
        if node is None:
            # walked the entire repo and couldn't find the diff
            raise error.Abort("Could not find diff D%s in changelog" % diffid)
        return (node, None)
Exemple #3
0
def snapshotcreate(ui, repo, *args, **opts):
    """creates a snapshot of the working copy
    """
    def removesnapshotfiles(ui, repo, metadata):
        match = scmutil.match(repo[None])
        files, dirs, error = repo.dirstate._fs.purge(match, [], True, True,
                                                     False, False)
        for m in error:
            ui.warn(_("warning: %s\n") % m)
        tr = repo.currenttransaction()
        if tr:
            for f in metadata.localvfsfiles:
                tr.removefilegenerator(f.path)
        for f in metadata.localvfsfiles:
            try:
                repo.localvfs.unlinkpath(f.path, ignoremissing=True)
            except OSError:
                ui.warn(_("%s cannot be removed") % f.path)

    with repo.wlock(), repo.lock():
        result = createsnapshotcommit(ui, repo, opts)
        if not result:
            ui.status(_("nothing changed\n"))
            return
        node, metadata = result
        node = nodemod.hex(node)
        with repo.transaction("update-snapshot-list") as tr:
            repo.snapshotlist.update(tr, addnodes=[node])
        ui.status(_("snapshot %s created\n") % (node))
        if opts.get("clean"):
            try:
                # We want to bring the working copy to the p1 state
                rev = repo[None].p1()
                hg.updatetotally(ui, repo, rev, rev, clean=True)
                removesnapshotfiles(ui, repo, metadata)
            except (KeyboardInterrupt, Exception) as exc:
                ui.warn(_("failed to clean the working copy: %s\n") % exc)
Exemple #4
0
def buildtemprevlog(repo, file):
    # get filename key
    filekey = hashlib.sha1(file).hexdigest()
    filedir = os.path.join(repo.path, "store/data", filekey)

    # sort all entries based on linkrev
    fctxs = []
    for filenode in os.listdir(filedir):
        if "_old" not in filenode:
            fctxs.append(repo.filectx(file, fileid=bin(filenode)))

    fctxs = sorted(fctxs, key=lambda x: x.linkrev())

    # add to revlog
    temppath = repo.sjoin("data/temprevlog.i")
    if os.path.exists(temppath):
        os.remove(temppath)
    r = filelog.filelog(repo.svfs, "temprevlog")

    class faket(object):
        def add(self, a, b, c):
            pass

    t = faket()
    for fctx in fctxs:
        if fctx.node() not in repo:
            continue

        p = fctx.filelog().parents(fctx.filenode())
        meta = {}
        if fctx.renamed():
            meta["copy"] = fctx.renamed()[0]
            meta["copyrev"] = hex(fctx.renamed()[1])

        r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])

    return r
Exemple #5
0
    def _lookup(repo, proto, key):
        localkey = encoding.tolocal(key)

        if isinstance(localkey,
                      str) and repo._scratchbranchmatcher.match(localkey):
            scratchnode = repo.bundlestore.index.getnode(localkey)
            if scratchnode:
                return "%s %s\n" % (1, scratchnode)
            else:
                return "%s %s\n" % (0,
                                    "scratch branch %s not found" % localkey)
        else:
            try:
                r = nodemod.hex(repo.lookup(localkey))
                return "%s %s\n" % (1, r)
            except Exception as inst:
                try:
                    node = repo.bundlestore.index.getnodebyprefix(localkey)
                    if node:
                        return "%s %s\n" % (1, node)
                    else:
                        return "%s %s\n" % (0, str(inst))
                except Exception as inst:
                    return "%s %s\n" % (0, str(inst))
Exemple #6
0
    def cmp(self, node, text):
        """compare text with a given file revision

        returns True if text is different than what is stored.
        """

        if node == nullid:
            return True

        # If it appears to be a redacted file, do a full comparison. Normally
        # we'd do a flags comparison, but the flags coming from Mononoke in the
        # tests don't seem to include the redacted flag.
        if text == constants.REDACTED_MESSAGE:
            return self.read(node) != text

        # remotefilectx.cmp uses the size as a shortcircuit. Unfortunately the
        # size comparison is expensive for lfs files, since reading the size
        # from the store currently also involves reading the content.
        #
        # The content comparison is expensive as well, since we have to load
        # the content from the store and from disk. Let's just check the
        # node instead.
        p1, p2, linknode, copyfrom = self.repo.fileslog.metadatastore.getnodeinfo(
            self.filename, node
        )

        if copyfrom or text.startswith(b"\1\n"):
            meta = {}
            if copyfrom:
                meta["copy"] = copyfrom
                meta["copyrev"] = hex(p1)
                p1 = nullid
            text = filelog.packmeta(meta, text)

        newnode = revlog.hash(text, p1, p2)
        return node != newnode
Exemple #7
0
 def append(self, hsh, sidebranch=False, path=None, flush=False):
     """add a binary hg hash and return the mapped linelog revision.
     if flush is True, incrementally update the file.
     """
     if hsh in self._hsh2rev:
         raise error.CorruptedFileError("%r is in revmap already" %
                                        hex(hsh))
     if len(hsh) != _hshlen:
         raise hgerror.ProgrammingError("hsh must be %d-char long" %
                                        _hshlen)
     idx = len(self._rev2hsh)
     flag = 0
     if sidebranch:
         flag |= sidebranchflag
     if path is not None and path != self._renamepaths[-1]:
         flag |= renameflag
         self._renamerevs.append(idx)
         self._renamepaths.append(path)
     self._rev2hsh.append(hsh)
     self._rev2flag.append(flag)
     self._hsh2rev[hsh] = idx
     if flush:
         self.flush()
     return idx
Exemple #8
0
def _submitlocalchanges(repo, reponame, workspacename, lastsyncstate, failed, serv, tr):
    localheads = _getheads(repo)
    localbookmarks = _getbookmarks(repo)
    localremotebookmarks = _getremotebookmarks(repo)
    localsnapshots = _getsnapshots(repo, lastsyncstate)
    obsmarkers = obsmarkersmod.getsyncingobsmarkers(repo)

    # If any commits failed to back up, exclude them.  Revert any bookmark changes
    # that point to failed commits.
    if failed:
        localheads = [
            nodemod.hex(head)
            for head in repo.nodes("heads(draft() & ::%ls - %ld::)", localheads, failed)
        ]
        failedset = set(repo.nodes("draft() & %ld::", failed))
        for name, bookmarknode in list(localbookmarks.items()):
            if nodemod.bin(bookmarknode) in failedset:
                if name in lastsyncstate.bookmarks:
                    localbookmarks[name] = lastsyncstate.bookmarks[name]
                else:
                    del localbookmarks[name]

    # Work out what we should have synced locally (and haven't deliberately
    # omitted)
    omittedheads = set(lastsyncstate.omittedheads)
    omittedbookmarks = set(lastsyncstate.omittedbookmarks)
    omittedremotebookmarks = set(lastsyncstate.omittedremotebookmarks)
    localsyncedheads = [
        head for head in lastsyncstate.heads if head not in omittedheads
    ]
    localsyncedbookmarks = {
        name: node
        for name, node in lastsyncstate.bookmarks.items()
        if name not in omittedbookmarks
    }
    localsyncedremotebookmarks = {
        name: node
        for name, node in lastsyncstate.remotebookmarks.items()
        if name not in omittedremotebookmarks
    }

    remotebookmarkschanged = (
        _isremotebookmarkssyncenabled(repo.ui)
        and localremotebookmarks != localsyncedremotebookmarks
    )

    localsnapshotsset = set(localsnapshots)

    if (
        set(localheads) == set(localsyncedheads)
        and localbookmarks == localsyncedbookmarks
        and not remotebookmarkschanged
        and lastsyncstate.version != 0
        and not obsmarkers
        and localsnapshotsset == set(lastsyncstate.snapshots)
    ):
        # Nothing to send.
        return True, None

    # The local repo has changed.  We must send these changes to the
    # cloud.

    # Work out the new cloud heads and bookmarks by merging in the
    # omitted items.  We need to preserve the ordering of the cloud
    # heads so that smartlogs generally match.
    localandomittedheads = set(localheads).union(lastsyncstate.omittedheads)
    newcloudheads = util.removeduplicates(
        [head for head in lastsyncstate.heads if head in localandomittedheads]
        + localheads
    )
    newcloudbookmarks = {
        name: localbookmarks.get(name, lastsyncstate.bookmarks.get(name))
        for name in set(localbookmarks.keys()).union(lastsyncstate.omittedbookmarks)
    }

    # Work out what the new omitted heads and bookmarks are.
    newomittedheads = list(set(newcloudheads).difference(localheads))
    newomittedbookmarks = list(
        set(newcloudbookmarks.keys()).difference(localbookmarks.keys())
    )

    oldremotebookmarks = []
    newremotebookmarks = {}
    newomittedremotebookmarks = []
    if _isremotebookmarkssyncenabled(repo.ui):
        # do not need to submit local remote bookmarks if the feature is not enabled
        oldremotebookmarks = lastsyncstate.remotebookmarks.keys()
        newremotebookmarks = {
            name: localremotebookmarks.get(
                name, lastsyncstate.remotebookmarks.get(name)
            )
            for name in set(localremotebookmarks.keys()).union(
                lastsyncstate.omittedremotebookmarks
            )
        }
        newomittedremotebookmarks = list(
            set(newremotebookmarks.keys()).difference(localremotebookmarks.keys())
        )

    backuplock.progress(repo, "finishing synchronizing with '%s'" % workspacename)
    synced, cloudrefs = serv.updatereferences(
        reponame,
        workspacename,
        lastsyncstate.version,
        lastsyncstate.heads,
        newcloudheads,
        lastsyncstate.bookmarks.keys(),
        newcloudbookmarks,
        obsmarkers,
        oldremotebookmarks,
        newremotebookmarks,
        lastsyncstate.snapshots,
        localsnapshots,
        logopts={"metalogroot": hex(repo.svfs.metalog.root())},
    )
    if synced:
        logsyncop(
            repo,
            "to_cloud",
            cloudrefs.version,
            lastsyncstate.heads,
            newcloudheads,
            lastsyncstate.bookmarks,
            newcloudbookmarks,
            oldremotebookmarks,
            newremotebookmarks,
            lastsyncstate.snapshots,
            localsnapshots,
        )
        lastsyncstate.update(
            tr,
            newversion=cloudrefs.version,
            newheads=newcloudheads,
            newbookmarks=newcloudbookmarks,
            newremotebookmarks=newremotebookmarks,
            newomittedheads=newomittedheads,
            newomittedbookmarks=newomittedbookmarks,
            newomittedremotebookmarks=newomittedremotebookmarks,
            newsnapshots=localsnapshots,
        )
        obsmarkersmod.clearsyncingobsmarkers(repo)

    return synced, cloudrefs
Exemple #9
0
        def log(self, event, *msg, **opts):
            global lastui
            super(blackboxui, self).log(event, *msg, **opts)

            if not "*" in self.track and not event in self.track:
                return

            if not msg or not msg[0]:
                return

            if self._bbvfs:
                ui = self
            else:
                # certain ui instances exist outside the context of
                # a repo, so just default to the last blackbox that
                # was seen.
                ui = lastui()

            if not ui:
                return
            vfs = ui._bbvfs
            if not vfs:
                return

            repo = getattr(ui, "_bbrepo", lambda: None)()
            if not lastui() or repo:
                lastui = weakref.ref(ui)
            if getattr(ui, "_bbinlog", False):
                # recursion and failure guard
                return
            ui._bbinlog = True
            default = self.configdate("devel", "default-date")
            date = util.datestr(default, "%Y/%m/%d %H:%M:%S")
            user = util.getuser()
            pid = "%d" % util.getpid()
            if len(msg) == 1:
                # Don't even try to format the string if there is only one
                # argument.
                formattedmsg = msg[0]
            else:
                try:
                    formattedmsg = msg[0] % msg[1:]
                except TypeError:
                    # If fails with `TypeError: not enough arguments for format
                    # string`, concatenate the arguments gracefully.
                    formattedmsg = " ".join(msg)
            rev = "(unknown)"
            changed = ""
            # Only log the current commit if the changelog has already been
            # loaded.
            if repo and "changelog" in repo.__dict__:
                try:
                    ctx = repo[None]
                    parents = ctx.parents()
                    rev = "+".join([hex(p.node()) for p in parents])
                except Exception:
                    # This can happen if the dirstate file is sufficiently
                    # corrupt that we can't extract the parents. In that case,
                    # just don't set the rev.
                    pass
                if ui.configbool("blackbox", "dirty") and ctx.dirty(
                        missing=True, merge=False, branch=False):
                    changed = "+"
            if ui.configbool("blackbox", "logsource"):
                src = " [%s]" % event
            else:
                src = ""
            requestid = ui.environ.get("HGREQUESTID") or ""
            if requestid:
                src += "[%s]" % requestid
            try:
                fmt = "%s %s @%s%s (%s)%s> %s"
                args = (date, user, rev, changed, pid, src, formattedmsg)
                with _openlogfile(ui, vfs) as fp:
                    line = fmt % args
                    if not line.endswith("\n"):
                        line += "\n"
                    fp.write(encodeutf8(line))
            except (IOError, OSError) as err:
                self.debug("warning: cannot write to blackbox.log: %s\n" %
                           err.strerror)
                # do not restore _bbinlog intentionally to avoid failed
                # logging again
            else:
                ui._bbinlog = False
Exemple #10
0
    def prefetch(self,
                 fileids,
                 force=False,
                 fetchdata=True,
                 fetchhistory=True):
        """downloads the given file versions to the cache
        """
        repo = self.repo
        idstocheck = set()
        for file, id in fileids:
            # hack
            # - we don't use .hgtags
            # - workingctx produces ids with length 42,
            #   which we skip since they aren't in any cache
            if file == ".hgtags" or len(
                    id) == 42 or not repo.shallowmatch(file):
                continue

            idstocheck.add((file, bin(id)))

        batchlfsdownloads = self.ui.configbool("remotefilelog",
                                               "_batchlfsdownloads", True)
        dolfsprefetch = self.ui.configbool("remotefilelog", "dolfsprefetch",
                                           True)

        idstocheck = list(idstocheck)
        if repo.fileslog._ruststore:
            if not force:
                contentstore = repo.fileslog.contentstore
                metadatastore = repo.fileslog.metadatastore
            else:
                contentstore, metadatastore = repo.fileslog.makesharedonlyruststore(
                    repo)

            if fetchdata:
                contentstore.prefetch(idstocheck)
            if fetchhistory:
                metadatastore.prefetch(idstocheck)

            if batchlfsdownloads and dolfsprefetch:
                self._lfsprefetch(fileids)

            if force:
                # Yay, since the shared-only stores and the regular ones aren't
                # shared, we need to commit data to force the stores to be
                # rebuilt. Forced prefetch are very rare and thus it is most
                # likely OK to do this.
                contentstore = None
                metadatastore = None
                repo.commitpending()

            return

        datastore = self.datastore
        historystore = self.historystore
        if force:
            datastore = unioncontentstore(*repo.fileslog.shareddatastores)
            historystore = unionmetadatastore(
                *repo.fileslog.sharedhistorystores)

        perftrace.tracevalue("Keys", len(idstocheck))
        missingids = set()
        if fetchdata:
            missingids.update(datastore.getmissing(idstocheck))
            perftrace.tracevalue("Missing Data", len(missingids))
        if fetchhistory:
            missinghistory = historystore.getmissing(idstocheck)
            missingids.update(missinghistory)
            perftrace.tracevalue("Missing History", len(missinghistory))

        # partition missing nodes into nullid and not-nullid so we can
        # warn about this filtering potentially shadowing bugs.
        nullids = len([None for unused, id in missingids if id == nullid])
        if nullids:
            missingids = [(f, id) for f, id in missingids if id != nullid]
            repo.ui.develwarn(
                ("remotefilelog not fetching %d null revs"
                 " - this is likely hiding bugs" % nullids),
                config="remotefilelog-ext",
            )
        if missingids:
            global fetches, fetched, fetchcost
            fetches += 1

            missingids = [(file, hex(id)) for file, id in missingids]

            fetched += len(missingids)

            start = time.time()
            with self.ui.timesection("fetchingfiles"):
                self.request(missingids, fetchdata, fetchhistory)
            fetchcost += time.time() - start
            if not batchlfsdownloads and dolfsprefetch:
                self._lfsprefetch(fileids)
        if batchlfsdownloads and dolfsprefetch:
            self._lfsprefetch(fileids)
Exemple #11
0
def unamend(ui, repo, **opts):
    """undo the last amend operation on the current commit

    Reverse the effects of an :hg:`amend` operation. Hides the current commit
    and checks out the previous version of the commit. :hg:`unamend` does not
    revert the state of the working copy, so changes that were added to the
    commit in the last amend operation become pending changes in the working
    copy.

    :hg:`unamend` cannot be run on amended commits that have children. In
    other words, you cannot unamend an amended commit in the middle of a
    stack.

    .. note::

        Running :hg:`unamend` is similar to running :hg:`undo --keep`
        immediately after :hg:`amend`. However, unlike :hg:`undo`, which can
        only undo an amend if it was the last operation you performed,
        :hg:`unamend` can unamend any draft amended commit in the graph that
        does not have children.

    .. container:: verbose

      Although :hg:`unamend` is typically used to reverse the effects of
      :hg:`amend`, it actually rolls back the current commit to its previous
      version, regardless of whether the changes resulted from an :hg:`amend`
      operation or from another operation, such as :hg:`rebase`.
    """
    unfi = repo

    # identify the commit from which to unamend
    curctx = repo["."]

    # identify the commit to which to unamend
    if mutation.enabled(repo):
        prednodes = curctx.mutationpredecessors()
        if not prednodes:
            prednodes = []
    else:
        prednodes = [marker.prednode() for marker in predecessormarkers(curctx)]

    if len(prednodes) != 1:
        e = _("changeset must have one predecessor, found %i predecessors")
        raise error.Abort(e % len(prednodes))
    prednode = prednodes[0]

    if prednode not in unfi:
        # Trigger autopull.
        autopull.trypull(unfi, [nodemod.hex(prednode)])

    predctx = unfi[prednode]

    if curctx.children():
        raise error.Abort(_("cannot unamend in the middle of a stack"))

    with repo.wlock(), repo.lock():
        ctxbookmarks = curctx.bookmarks()
        changedfiles = []
        wctx = repo[None]
        wm = wctx.manifest()
        cm = predctx.manifest()
        dirstate = repo.dirstate
        diff = cm.diff(wm)
        changedfiles.extend(pycompat.iterkeys(diff))

        tr = repo.transaction("unamend")
        with dirstate.parentchange():
            dirstate.rebuild(prednode, cm, changedfiles)
            # we want added and removed files to be shown
            # properly, not with ? and ! prefixes
            for filename, data in pycompat.iteritems(diff):
                if data[0][0] is None:
                    dirstate.add(filename)
                if data[1][0] is None:
                    dirstate.remove(filename)
        changes = []
        for book in ctxbookmarks:
            changes.append((book, prednode))
        repo._bookmarks.applychanges(repo, tr, changes)
        if obsolete.isenabled(repo, obsolete.createmarkersopt):
            obsolete.createmarkers(repo, [(curctx, (predctx,))])
        visibility.remove(repo, [curctx.node()])
        visibility.add(repo, [predctx.node()])
        tr.close()
Exemple #12
0
 def hex(self):
     return nodemod.hex(self._nodeinfo.node)
Exemple #13
0
 def getheads(self):
     return [nodemod.hex(h) for h in self._heads if self.keep(h)]
Exemple #14
0
def upload(repo, revs, force=False):
    """Upload draft commits using EdenApi Uploads

    Commits that have already been uploaded will be skipped.
    If no revision is specified, uploads all visible commits.

    Returns list of uploaded heads (as nodes) and list of failed commits (as nodes).
    """
    ui = repo.ui

    if revs is None:
        heads = [ctx.node() for ctx in repo.set("heads(not public())")]
    else:
        heads = [
            ctx.node() for ctx in repo.set(
                "heads((not public() & ::%ld))",
                revs,
            )
        ]
    if not heads:
        ui.status(_("nothing to upload\n"), component="commitcloud")
        return [], []

    # Check what heads have been already uploaded and what heads are missing
    missingheads = heads if force else edenapi_upload._filtercommits(
        repo, heads)

    if not missingheads:
        ui.status(_("nothing to upload\n"), component="commitcloud")
        return heads, []

    # Print the heads missing on the server
    _maxoutput = 20
    for counter, node in enumerate(missingheads):
        if counter == _maxoutput:
            left = len(missingheads) - counter
            repo.ui.status(
                _n(
                    "  and %d more head...\n",
                    "  and %d more heads...\n",
                    left,
                ) % left)
            break
        ui.status(
            _("head '%s' hasn't been uploaded yet\n") % nodemod.hex(node)[:12],
            component="commitcloud",
        )

    draftrevs = repo.changelog.torevset(
        repo.dageval(lambda: ancestors(missingheads) & draft()))

    # If the only draft revs are the missing heads then we can skip the
    # known checks, as we know they are all missing.
    skipknowncheck = len(draftrevs) == len(missingheads)
    newuploaded, failed = edenapi_upload.uploadhgchangesets(
        repo, draftrevs, force, skipknowncheck)

    failednodes = {repo[r].node() for r in failed}

    # Uploaded heads are all heads that have been filtered or uploaded and also heads of the 'newuploaded' revs.

    # Example (5e4faf031 must be included in uploadedheads):
    #  o  4bb40f883 (failed)
    #  │
    #  @  5e4faf031 (uploaded)

    uploadedheads = list(
        repo.nodes("heads(%ld) + %ln - heads(%ln)", newuploaded, heads,
                   failednodes))

    return uploadedheads, failednodes
Exemple #15
0
def node2txt(repo, node, ver):
    """map a manifest into some text"""
    if ver == "0":
        return "%s\n" % hgnode.hex(node)
    else:
        raise error.Abort(_("unknown signature version"))
Exemple #16
0
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, expectedfiles, *args)

    newfiles = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    with progress.bar(repo.ui, _("files"), total=expectedfiles) as prog:
        while True:
            chunkdata = source.filelogheader()
            if not chunkdata:
                break
            f = chunkdata["filename"]
            repo.ui.debug("adding %s revisions\n" % f)
            prog.value += 1

            if not repo.shallowmatch(f):
                fl = repo.file(f)
                deltas = source.deltaiter()
                fl.addgroup(deltas, revmap, trp)
                continue

            chain = None
            while True:
                # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
                revisiondata = source.deltachunk(chain)
                if not revisiondata:
                    break

                chain = revisiondata[0]

                revisiondatas[(f, chain)] = revisiondata
                queue.append((f, chain))

                if f not in visited:
                    newfiles += 1
                    visited.add(f)

            if chain is None:
                raise error.Abort(_("received file revlog group is empty"))

    processed = set()

    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Prefetch the non-bundled revisions that we will need
    prefetchfiles = []
    for f, node in queue:
        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]

        for dependent in dependents:
            if dependent == nullid or (f, dependent) in revisiondatas:
                continue
            prefetchfiles.append((f, hex(dependent)))

    repo.fileservice.prefetch(prefetchfiles)

    # Get rawtext by applying delta chains.
    @util.lrucachefunc
    def reconstruct(f, node):
        revisiondata = revisiondatas.get((f, node), None)
        if revisiondata is None:
            # Read from repo.
            return repo.file(f).revision(node, raw=False)
        else:
            # Apply delta-chain.
            # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
            deltabase, delta, flags = revisiondata[4:]
            if deltabase == nullid:
                base = ""
            else:
                if flags:
                    # LFS (flags != 0) should always use nullid as deltabase.
                    raise error.Abort("unexpected deltabase")
                base = reconstruct(f, deltabase)
            rawtext = mdiff.patch(base, delta)
            if isinstance(rawtext, pycompat.buffer):  # noqa
                rawtext = bytes(rawtext)
            return rawtext

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    maxskipcount = len(queue) + 1
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > maxskipcount:
            raise error.Abort(_("circular node dependency on ancestormap"))

        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        node, p1, p2, linknode, deltabase, delta, flags = revisiondata

        # Deltas are always against flags=0 rawtext (see revdiff and its
        # callers), if deltabase is not nullid.
        if flags and deltabase != nullid:
            raise error.Abort("unexpected deltabase")

        rawtext = reconstruct(f, node)
        meta, text = shallowutil.parsemeta(rawtext, flags)
        if "copy" in meta:
            copyfrom = meta["copy"]
            copynode = bin(meta["copyrev"])
            if not available(f, node, copyfrom, copynode):
                continue

        if any(not available(f, node, f, p) for p in [p1, p2] if p != nullid):
            continue

        # Use addrawrevision so if it's already LFS, take it as-is, do not
        # re-calculate the LFS object.
        fl = repo.file(f)
        fl.addrawrevision(rawtext,
                          trp,
                          linknode,
                          p1,
                          p2,
                          node=node,
                          flags=flags)
        processed.add((f, node))
        skipcount = 0

    return len(revisiondatas), newfiles
Exemple #17
0
    def putcommit(self, files, copies, parents, commit, source, revmap, full,
                  cleanp2):
        files = dict(files)

        def getfilectx(repo, memctx, f):
            if p2ctx and f in p2files and f not in copies:
                self.ui.debug("reusing %s from p2\n" % f)
                try:
                    return p2ctx[f]
                except error.ManifestLookupError:
                    # If the file doesn't exist in p2, then we're syncing a
                    # delete, so just return None.
                    return None
            try:
                v = files[f]
            except KeyError:
                return None
            data, mode = source.getfile(f, v)
            if data is None:
                return None
            return context.memfilectx(self.repo, memctx, f, data, "l" in mode,
                                      "x" in mode, copies.get(f))

        pl = []
        for p in parents:
            if p not in pl:
                pl.append(p)
        parents = pl
        nparents = len(parents)
        if self.filemapmode and nparents == 1:
            m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
            parent = parents[0]

        if len(parents) < 2:
            parents.append(nodemod.nullid)
        if len(parents) < 2:
            parents.append(nodemod.nullid)
        p2 = parents.pop(0)

        text = commit.desc

        sha1s = re.findall(sha1re, text)
        for sha1 in sha1s:
            try:
                oldrev = source.lookuprev(sha1)
                newrev = revmap.get(oldrev)
                if newrev is not None:
                    text = text.replace(sha1, newrev[:len(sha1)])
            except Exception:
                # Don't crash if we find a bad sha in the message
                continue

        extra = commit.extra.copy()

        sourcename = self.repo.ui.config("convert", "hg.sourcename")
        if sourcename:
            extra["convert_source"] = sourcename

        for label in (
                "source",
                "transplant_source",
                "rebase_source",
                "intermediate-source",
        ):
            node = extra.get(label)

            if node is None:
                continue

            # Only transplant stores its reference in binary
            if label == "transplant_source":
                node = nodemod.hex(node)

            newrev = revmap.get(node)
            if newrev is not None:
                if label == "transplant_source":
                    newrev = nodemod.bin(newrev)

                extra[label] = newrev

        if self.branchnames and commit.branch:
            extra["branch"] = commit.branch
        if commit.rev and commit.saverev:
            extra["convert_revision"] = commit.rev

        while parents:
            p1 = p2
            p2 = parents.pop(0)
            p1ctx = self.repo[p1]
            p2ctx = None
            if p2 != nodemod.nullid:
                p2ctx = self.repo[p2]
            fileset = set(files)
            if full:
                fileset.update(self.repo[p1])
                fileset.update(self.repo[p2])

            if p2ctx:
                p2files = set(cleanp2)
                for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
                    p2files.add(file)
                    fileset.add(file)

            ctx = context.memctx(
                self.repo,
                (p1, p2),
                text,
                fileset,
                getfilectx,
                commit.author,
                commit.date,
                extra,
            )

            # We won't know if the conversion changes the node until after the
            # commit, so copy the source's phase for now.
            self.repo.ui.setconfig("phases", "new-commit",
                                   phases.phasenames[commit.phase], "convert")

            with self.repo.transaction("convert") as tr:
                node = nodemod.hex(self.repo.commitctx(ctx))

                # If the node value has changed, but the phase is lower than
                # draft, set it back to draft since it hasn't been exposed
                # anywhere.
                if commit.rev != node:
                    ctx = self.repo[node]
                    if ctx.phase() < phases.draft:
                        phases.registernew(self.repo, tr, phases.draft,
                                           [ctx.node()])

            text = "(octopus merge fixup)\n"
            p2 = node

        if self.filemapmode and nparents == 1:
            mfl = self.repo.manifestlog
            mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
            closed = "close" in commit.extra
            if not closed and not mfl[m1node].read().diff(mfl[mnode].read()):
                self.ui.status(_("filtering out empty revision\n"))
                self.repo.rollback(force=True)
                return parent
        return p2
Exemple #18
0
def pushbackupbookmarks(repo, remotepath, getconnection, backupstate):
    """
    Push a backup bundle to the server that updates the infinitepush backup
    bookmarks.
    """
    unfi = repo

    # Create backup bookmarks for the heads and bookmarks of the user.  We
    # need to include only commit that have been successfully backed up, so
    # that we can sure they are available on the server.
    clrev = unfi.changelog.rev
    ancestors = unfi.changelog.ancestors(
        [clrev(head) for head in backupstate.heads], inclusive=True)
    # Get the heads of visible draft commits that are already backed up,
    # including commits made visible by bookmarks.
    #
    # For historical compatibility, we ignore obsolete and secret commits
    # as they are normally excluded from backup bookmarks.
    with perftrace.trace("Compute Heads"):
        revset = "heads((draft() & ::((draft() - obsolete() - hidden()) + bookmark())) & (draft() & ::%ln))"
        heads = [
            nodemod.hex(head) for head in unfi.nodes(revset, backupstate.heads)
        ]
    # Get the bookmarks that point to ancestors of backed up draft commits or
    # to commits that are public.
    with perftrace.trace("Compute Bookmarks"):
        bookmarks = {}
        for name, node in pycompat.iteritems(repo._bookmarks):
            ctx = repo[node]
            if ctx.rev() in ancestors or ctx.phase() == phases.public:
                bookmarks[name] = ctx.hex()

    infinitepushbookmarks = {}
    prefix = _backupbookmarkprefix(repo)
    localstate = _readlocalbackupstate(repo, remotepath)

    if localstate is None:
        # If there is nothing to backup, don't push any backup bookmarks yet.
        # The user may wish to restore the previous backup.
        if not heads and not bookmarks:
            return

        # Delete all server bookmarks and replace them with the full set.  The
        # server knows to do deletes before adds, and deletes are done by glob
        # pattern (see infinitepush.bundleparts.bundle2scratchbookmarks).
        infinitepushbookmarks["/".join((prefix, "heads", "*"))] = ""
        infinitepushbookmarks["/".join((prefix, "bookmarks", "*"))] = ""
        oldheads = set()
        oldbookmarks = {}
    else:
        # Generate a delta update based on the local state.
        oldheads, oldbookmarks = localstate

        if set(oldheads) == set(heads) and oldbookmarks == bookmarks:
            return

        for oldhead in oldheads:
            if oldhead not in heads:
                infinitepushbookmarks["/".join(
                    (prefix, "heads", oldhead))] = ""
        for oldbookmark in oldbookmarks:
            if oldbookmark not in bookmarks:
                infinitepushbookmarks["/".join(
                    (prefix, "bookmarks", _escapebookmark(oldbookmark)))] = ""

    for bookmark, hexnode in bookmarks.items():
        if bookmark not in oldbookmarks or hexnode != oldbookmarks[bookmark]:
            name = "/".join((prefix, "bookmarks", _escapebookmark(bookmark)))
            infinitepushbookmarks[name] = hexnode
    for hexhead in heads:
        if hexhead not in oldheads:
            name = "/".join((prefix, "heads", hexhead))
            infinitepushbookmarks[name] = hexhead

    if not infinitepushbookmarks:
        return

    # developer config: infinitepushbackup.backupbookmarklimit
    backupbookmarklimit = repo.ui.configint("infinitepushbackup",
                                            "backupbookmarklimit", 1000)
    if len(infinitepushbookmarks) > backupbookmarklimit:
        repo.ui.warn(
            _("not pushing backup bookmarks for %s as there are too many (%s > %s)\n"
              ) % (prefix, len(infinitepushbookmarks), backupbookmarklimit),
            notice=_("warning"),
            component="commitcloud",
        )
        return

    # Push a bundle containing the new bookmarks to the server.
    with perftrace.trace(
            "Push Backup Bookmark Bundle"), getconnection() as conn:
        dependencies.infinitepush.pushbackupbundle(repo.ui, repo, conn.peer,
                                                   None, infinitepushbookmarks)

    # Store the new local backup state.
    _writelocalbackupstate(repo, remotepath, heads, bookmarks)
Exemple #19
0
 def lookuprev(self, rev):
     try:
         return nodemod.hex(self.repo.lookup(rev))
     except (error.RepoError, error.LookupError):
         return None
Exemple #20
0
def _loadfileblob(repo, path, node):
    usesimplecache = repo.ui.configbool("remotefilelog", "simplecacheserverstore")
    cachepath = repo.ui.config("remotefilelog", "servercachepath")
    if cachepath and usesimplecache:
        raise error.Abort(
            "remotefilelog.servercachepath and remotefilelog.simplecacheserverstore can't be both enabled"
        )

    key = os.path.join(path, hex(node))

    # simplecache store for remotefilelogcache
    if usesimplecache:
        try:
            simplecache = extensions.find("simplecache")
        except KeyError:
            raise error.Abort(
                "simplecache extension must be enabled with remotefilelog.simplecacheserverstore enabled"
            )

        # this function doesn't raise exception
        text = simplecache.cacheget(key, trivialserializer, repo.ui)
        if text:
            return text
        else:
            text = readvalue(repo, path, node)
            # this function doesn't raise exception
            simplecache.cacheset(key, text, trivialserializer, repo.ui)
            return text

    # on disk store for remotefilelogcache
    if not cachepath:
        cachepath = os.path.join(repo.path, "remotefilelogcache")

    filecachepath = os.path.join(cachepath, key)
    if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
        text = readvalue(repo, path, node)
        # everything should be user & group read/writable
        oldumask = os.umask(0o002)
        try:
            dirname = os.path.dirname(filecachepath)
            if not os.path.exists(dirname):
                try:
                    os.makedirs(dirname)
                except OSError as ex:
                    if ex.errno != errno.EEXIST:
                        raise
            f = None
            try:
                f = util.atomictempfile(filecachepath, "w")
                f.write(text)
            except (IOError, OSError):
                # Don't abort if the user only has permission to read,
                # and not write.
                pass
            finally:
                if f:
                    f.close()
        finally:
            os.umask(oldumask)
    else:
        with util.posixfile(filecachepath, "r") as f:
            text = f.read()
    return text
Exemple #21
0
 def _write(self, fp):
     fp.write(encodeutf8("%s\n" % FORMAT_VERSION))
     for h in self.heads:
         fp.write(encodeutf8("%s\n" % (node.hex(h), )))
     self.dirty = False
     self._logheads("wrote", visibility_newheadcount=len(self.heads))
Exemple #22
0
def getflogheads(repo, proto, path):
    """A server api for requesting a filelog's heads"""
    flog = repo.file(path)
    heads = flog.heads()
    return "\n".join((hex(head) for head in heads if head != nullid))
Exemple #23
0
 def add(self, filename, node, p1, p2, linknode, copyfrom):
     raise RuntimeError("cannot add to historypackstore (%s:%s)" %
                        (filename, hex(node)))
Exemple #24
0
    def streamer():
        # type: () -> Iterable[bytes]
        """Request format:

        [<filerequest>,...]\0\0
        filerequest = <filename len: 2 byte><filename><count: 4 byte>
                      [<node: 20 byte>,...]

        Response format:
        [<fileresponse>,...]<10 null bytes>
        fileresponse = <filename len: 2 byte><filename><history><deltas>
        history = <count: 4 byte>[<history entry>,...]
        historyentry = <node: 20 byte><p1: 20 byte><p2: 20 byte>
                       <linknode: 20 byte><copyfrom len: 2 byte><copyfrom>
        deltas = <count: 4 byte>[<delta entry>,...]
        deltaentry = <node: 20 byte><deltabase: 20 byte>
                     <delta len: 8 byte><delta>
                     <metadata>

        if version == 1:
            metadata = <nothing>
        elif version == 2:
            metadata = <meta len: 4 bytes><metadata-list>
            metadata-list = [<metadata-item>, ...]
            metadata-item = <metadata-key: 1 byte>
                            <metadata-value len: 2 byte unsigned>
                            <metadata-value>
        """
        files = _receivepackrequest(proto.fin)

        args = []
        responselen = 0
        starttime = time.time()

        invalidatelinkrev = "invalidatelinkrev" in repo.storerequirements

        # Sort the files by name, so we provide deterministic results
        for filename, nodes in sorted(pycompat.iteritems(files)):
            filename = pycompat.decodeutf8(filename)
            args.append([filename, [hex(n) for n in nodes]])
            fl = repo.file(filename)

            # Compute history
            history = []
            for rev in fl.ancestors(list(fl.rev(n) for n in nodes), inclusive=True):
                x, x, x, x, linkrev, p1, p2, node = fl.index[rev]
                copyfrom = ""
                p1node = fl.node(p1)
                p2node = fl.node(p2)
                if invalidatelinkrev:
                    linknode = nullid
                else:
                    linknode = repo.changelog.node(linkrev)
                if p1node == nullid:
                    copydata = fl.renamed(node)
                    if copydata:
                        copyfrom, copynode = copydata
                        p1node = copynode

                history.append((node, p1node, p2node, linknode, copyfrom))

            # Scan and send deltas
            chain = _getdeltachain(fl, nodes, version)

            for chunk in wirepack.sendpackpart(
                filename, history, chain, version=version
            ):
                responselen += len(chunk)
                yield chunk

        close = wirepack.closepart()
        responselen += len(close)
        yield close
        proto.fout.flush()

        if repo.ui.configbool("wireproto", "loggetpack"):
            _logwireprotorequest(
                repo,
                "getpackv1" if version == 1 else "getpackv2",
                starttime,
                responselen,
                args,
            )
Exemple #25
0
def diffidtonode(repo, diffid):
    """Return node that matches a given Differential ID or None.

    The node might exist or not exist in the repo.
    This function does not raise.
    """

    repo_callsigns = repo.ui.configlist("phrevset", "callsign")
    if not repo_callsigns:
        msg = _("phrevset.callsign is not set - doing a linear search\n")
        hint = _("This will be slow if the diff was not committed recently\n")
        repo.ui.warn(msg)
        repo.ui.warn(hint)
        node = localgetdiff(repo, diffid)
        if node is None:
            repo.ui.warn(_("Could not find diff D%s in changelog\n") % diffid)
        return node

    node, resp = search(repo, diffid)

    if node is not None:
        # The log walk found the diff, nothing more to do
        return node

    if resp is None:
        # The graphql query finished but didn't return anything
        return None

    vcs = resp.get("source_control_system")
    localreponame = repo.ui.config("remotefilelog", "reponame")
    diffreponame = None
    repository = resp.get("repository")
    if repository is not None:
        diffreponame = repository.get("scm_name")
    if diffreponame in repo.ui.configlist("phrevset", "aliases"):
        diffreponame = localreponame

    if not util.istest() and (diffreponame != localreponame):
        raise error.Abort(
            "D%s is for repo '%s', not this repo ('%s')"
            % (diffid, diffreponame, localreponame)
        )

    repo.ui.debug("[diffrev] VCS is %s\n" % vcs)

    if vcs == "git":
        gitrev = parsedesc(repo, resp, ignoreparsefailure=False)
        repo.ui.debug("[diffrev] GIT rev is %s\n" % gitrev)

        peerpath = repo.ui.expandpath("default")
        remoterepo = hg.peer(repo, {}, peerpath)
        remoterev = remoterepo.lookup("_gitlookup_git_%s" % gitrev)

        repo.ui.debug("[diffrev] HG rev is %s\n" % hex(remoterev))
        if not remoterev:
            repo.ui.debug("[diffrev] Falling back to linear search\n")
            node = localgetdiff(repo, diffid)
            if node is None:
                repo.ui.warn(_("Could not find diff D%s in changelog\n") % diffid)

            return node

        return remoterev

    elif vcs == "hg":
        rev = parsedesc(repo, resp, ignoreparsefailure=True)
        if rev:
            # The response from phabricator contains a changeset ID.
            # Convert it back to a node.
            try:
                return repo[rev].node()
            except error.RepoLookupError:
                # TODO: 's/svnrev/globalrev' after turning off Subversion
                # servers. We will know about this when we remove the `svnrev`
                # revset.
                #
                # Unfortunately the rev can also be a svnrev/globalrev :(.
                if rev.isdigit():
                    try:
                        return list(repo.nodes("svnrev(%s)" % rev))[0]
                    except (IndexError, error.RepoLookupError):
                        pass

                if len(rev) == len(nullhex):
                    return bin(rev)
                else:
                    return None

        # commit is still local, get its hash

        try:
            props = resp["phabricator_version_properties"]["edges"]
            commits = {}
            for prop in props:
                if prop["node"]["property_name"] == "local:commits":
                    commits = json.loads(prop["node"]["property_value"])
            hexnodes = [c["commit"] for c in commits.values()]
        except (AttributeError, IndexError, KeyError):
            hexnodes = []

        # find a better alternative of the commit hash specified in
        # graphql response by looking up successors.
        for hexnode in hexnodes:
            if len(hexnode) != len(nullhex):
                continue

            node = bin(hexnode)
            unfi = repo
            if node in unfi:
                # Find a successor.
                successors = list(
                    unfi.nodes("last(successors(%n)-%n-obsolete())", node, node)
                )
                if successors:
                    return successors[0]
            return node

        # local:commits is empty
        return None

    else:
        if not vcs:
            msg = (
                "D%s does not have an associated version control system\n"
                "You can view the diff at https:///our.internmc.facebook.com/intern/diff/D%s\n"
            )
            repo.ui.warn(msg % (diffid, diffid))

            return None
        else:
            repo.ui.warn(
                _("Conduit returned unknown sourceControlSystem: '%s'\n") % vcs
            )

            return None
Exemple #26
0
def _applycloudchanges(repo, remotepath, lastsyncstate, cloudrefs, maxage,
                       state, tr):
    # Pull all the new heads and any bookmark hashes we don't have. We need to
    # filter cloudrefs before pull as pull doesn't check if a rev is present
    # locally.
    newheads = [
        nodemod.hex(n) for n in repo.changelog.filternodes(
            [nodemod.bin(h) for h in cloudrefs.heads], inverse=True)
    ]
    assert newheads == newheads
    if maxage is not None and maxage >= 0:
        mindate = time.time() - maxage * 86400
        omittedheads = [
            head for head in newheads if head in cloudrefs.headdates
            and cloudrefs.headdates[head] < mindate
        ]
        if omittedheads:
            omittedheadslen = len(omittedheads)
            repo.ui.status(
                _n(
                    "omitting %d head that is older than %d days:\n",
                    "omitting %d heads that are older than %d days:\n",
                    omittedheadslen,
                ) % (omittedheadslen, maxage))
            counter = 0
            for head in reversed(omittedheads):
                if counter == _maxomittedheadsoutput:
                    remaining = len(omittedheads) - counter
                    repo.ui.status(
                        _n("  and %d older head\n", "  and %d older heads\n",
                           remaining) % remaining)
                    break
                headdatestr = util.datestr(
                    util.makedate(cloudrefs.headdates[head]))
                repo.ui.status(_("  %s from %s\n") % (head[:12], headdatestr))
                counter = counter + 1

        omittedheads = set(omittedheads)
        newheads = [head for head in newheads if head not in omittedheads]
    else:
        omittedheads = set()
    omittedbookmarks = []
    omittedremotebookmarks = []

    newvisibleheads = None
    if visibility.tracking(repo):
        localheads = _getheads(repo)
        localheadsset = set(localheads)
        cloudheads = [
            head for head in cloudrefs.heads if head not in omittedheads
        ]
        cloudheadsset = set(cloudheads)
        if localheadsset != cloudheadsset:
            oldvisibleheads = [
                head for head in lastsyncstate.heads
                if head not in lastsyncstate.omittedheads
            ]
            newvisibleheads = util.removeduplicates(oldvisibleheads +
                                                    cloudheads + localheads)
            toremove = {
                head
                for head in oldvisibleheads
                if head not in localheadsset or head not in cloudheadsset
            }
            newvisibleheads = [
                head for head in newvisibleheads if head not in toremove
            ]

    remotebookmarknewnodes = set()
    remotebookmarkupdates = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        (remotebookmarkupdates,
         remotebookmarknewnodes) = _processremotebookmarks(
             repo, cloudrefs.remotebookmarks, lastsyncstate)

    if remotebookmarknewnodes or newheads:
        # Partition the heads into groups we can pull together.
        headgroups = _partitionheads(repo.ui,
                                     list(remotebookmarknewnodes) + newheads,
                                     cloudrefs.headdates)
        _pullheadgroups(repo, remotepath, headgroups)

    omittedbookmarks.extend(
        _mergebookmarks(repo, tr, cloudrefs.bookmarks, lastsyncstate,
                        omittedheads, maxage))

    newremotebookmarks = {}
    if _isremotebookmarkssyncenabled(repo.ui):
        omittedremotebookmarks = _updateremotebookmarks(
            repo, tr, remotebookmarkupdates)
        newremotebookmarks = cloudrefs.remotebookmarks

    if newvisibleheads is not None:
        visibility.setvisibleheads(repo,
                                   [nodemod.bin(n) for n in newvisibleheads])

    # We have now synced the repo to the cloud version.  Store this.
    logsyncop(
        repo,
        "from_cloud",
        cloudrefs.version,
        lastsyncstate.heads,
        cloudrefs.heads,
        lastsyncstate.bookmarks,
        cloudrefs.bookmarks,
        lastsyncstate.remotebookmarks,
        newremotebookmarks,
    )
    lastsyncstate.update(
        tr,
        newversion=cloudrefs.version,
        newheads=cloudrefs.heads,
        newbookmarks=cloudrefs.bookmarks,
        newremotebookmarks=newremotebookmarks,
        newmaxage=maxage,
        newomittedheads=list(omittedheads),
        newomittedbookmarks=omittedbookmarks,
        newomittedremotebookmarks=omittedremotebookmarks,
    )

    # Also update backup state.  These new heads are already backed up,
    # otherwise the server wouldn't have told us about them.
    state.update([nodemod.bin(head) for head in newheads], tr)
Exemple #27
0
def _getbookmarks(repo):
    return {n: nodemod.hex(v) for n, v in repo._bookmarks.items()}
Exemple #28
0
def _checkomissions(repo, remotepath, lastsyncstate, tr, maxage):
    """check omissions are still not available locally

    Check that the commits that have been deliberately omitted are still not
    available locally.  If they are now available (e.g. because the user pulled
    them manually), then remove the tracking of those heads being omitted, and
    restore any bookmarks that can now be restored.
    """
    unfi = repo
    lastomittedheads = set(lastsyncstate.omittedheads)
    lastomittedbookmarks = set(lastsyncstate.omittedbookmarks)
    lastomittedremotebookmarks = set(lastsyncstate.omittedremotebookmarks)
    omittedheads = set()
    omittedbookmarks = set()
    omittedremotebookmarks = set()
    changes = []
    remotechanges = {}
    mindate = (time.time() - maxage * 86400) if maxage is not None else 0
    foundheads = repo.changelog.filternodes(
        [nodemod.bin(n) for n in lastomittedheads], local=True)
    foundheads = {nodemod.hex(n) for n in foundheads}
    omittedheads = lastomittedheads - foundheads

    lastbookmarknodes = [
        lastsyncstate.bookmarks[name] for name in lastomittedbookmarks
        if name in lastsyncstate.bookmarks
    ]
    lastremotebookmarknodes = [
        lastsyncstate.remotebookmarks[name]
        for name in lastomittedremotebookmarks
        if name in lastsyncstate.remotebookmarks
    ]
    foundbookmarkslocalnodes = {
        nodemod.hex(n)
        for n in repo.changelog.filternodes(
            [
                nodemod.bin(n)
                for n in lastbookmarknodes + lastremotebookmarknodes
            ],
            local=True,
        )
    }

    for name in lastomittedbookmarks:
        # bookmark might be removed from cloud workspace by someone else
        if name not in lastsyncstate.bookmarks:
            continue
        node = lastsyncstate.bookmarks[name]
        if node in foundbookmarkslocalnodes:
            if unfi[node].mutable() or (unfi[node].date()[0] >= mindate):
                changes.append((name, nodemod.bin(node)))
            else:
                omittedbookmarks.add(name)
        else:
            omittedbookmarks.add(name)

    for name in lastomittedremotebookmarks:
        # remotebookmark might be removed from cloud workspace by someone else
        if name not in lastsyncstate.remotebookmarks:
            continue
        node = lastsyncstate.remotebookmarks[name]
        if node in foundbookmarkslocalnodes:
            remotechanges[name] = node
        else:
            omittedremotebookmarks.add(name)
    if (omittedheads != lastomittedheads
            or omittedbookmarks != lastomittedbookmarks
            or omittedremotebookmarks != lastomittedremotebookmarks):
        lastsyncstate.update(
            tr,
            newomittedheads=list(omittedheads),
            newomittedbookmarks=list(omittedbookmarks),
            newomittedremotebookmarks=list(omittedremotebookmarks),
        )
    if changes or remotechanges:
        with repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:
            if changes:
                repo._bookmarks.applychanges(repo, tr, changes)
            if remotechanges:
                remotebookmarks = _getremotebookmarks(repo)
                remotebookmarks.update(remotechanges)
                repo._remotenames.applychanges({"bookmarks": remotebookmarks})
Exemple #29
0
def encodeheads(heads):
    return encodeutf8("%s\n%s" % (FORMAT_VERSION, "".join("%s\n" % node.hex(h)
                                                          for h in heads)))
Exemple #30
0
    def generatefiles(self, changedfiles, linknodes, commonrevs, source):

        if self._repo.ui.configbool("remotefilelog", "server"):
            caps = self._bundlecaps or []
            if requirement in caps:
                # only send files that don't match the specified patterns
                includepattern = None
                excludepattern = None
                for cap in self._bundlecaps or []:
                    if cap.startswith("includepattern="):
                        includepattern = cap[len("includepattern="):].split(
                            "\0")
                    elif cap.startswith("excludepattern="):
                        excludepattern = cap[len("excludepattern="):].split(
                            "\0")

                m = match.always(self._repo.root, "")
                if includepattern or excludepattern:
                    m = match.match(self._repo.root, "", None, includepattern,
                                    excludepattern)
                changedfiles = list([f for f in changedfiles if not m(f)])

        if requirement in self._repo.requirements:
            repo = self._repo
            if isinstance(repo, bundlerepo.bundlerepository):
                # If the bundle contains filelogs, we can't pull from it, since
                # bundlerepo is heavily tied to revlogs. Instead require that
                # the user use unbundle instead.
                # Force load the filelog data.
                bundlerepo.bundlerepository.file(repo, "foo")
                if repo._cgfilespos:
                    raise error.Abort(
                        "cannot pull from full bundles",
                        hint="use `hg unbundle` instead",
                    )
                return []
            filestosend = self.shouldaddfilegroups(source)
            if filestosend == NoFiles:
                changedfiles = list(
                    [f for f in changedfiles if not repo.shallowmatch(f)])
            else:
                files = []

                phasecache = repo._phasecache
                cl = repo.changelog

                # Prefetch the revisions being bundled
                for i, fname in enumerate(sorted(changedfiles)):
                    filerevlog = repo.file(fname)
                    linkrevnodes = linknodes(filerevlog, fname)
                    # Normally we'd prune the linkrevnodes first,
                    # but that would perform the server fetches one by one.
                    for fnode, cnode in list(pycompat.iteritems(linkrevnodes)):
                        # Adjust linknodes so remote file revisions aren't sent
                        if filestosend == LocalFiles:
                            if phasecache.phase(
                                    repo, cl.rev(cnode)
                            ) == phases.public and repo.shallowmatch(fname):
                                del linkrevnodes[fnode]
                            else:
                                files.append((fname, hex(fnode)))
                        else:
                            files.append((fname, hex(fnode)))

                repo.fileservice.prefetch(files)

                # Prefetch the revisions that are going to be diffed against
                prevfiles = []
                for fname, fnode in files:
                    if repo.shallowmatch(fname):
                        fnode = bin(fnode)
                        filerevlog = repo.file(fname)
                        p1, p2, linknode, copyfrom = filerevlog.getnodeinfo(
                            fnode)
                        if p1 != nullid:
                            prevfiles.append((copyfrom or fname, hex(p1)))

                repo.fileservice.prefetch(prevfiles)

        return super(shallowcg1packer,
                     self).generatefiles(changedfiles, linknodes, commonrevs,
                                         source)