示例#1
0
文件: journal.py 项目: simpkins/eden
    def fromstorage(cls, line: bytes) -> "journalentry":
        split = pycompat.decodeutf8(line).split("\n")
        if len(split) != 7:
            raise ValueError("incorrect journalentry '%s'" % line)

        (
            time,
            user,
            command,
            namespace,
            name,
            oldhashes,
            newhashes,
        ) = split
        timestamp, tz = time.split()
        timestamp, tz = float(timestamp), int(tz)
        oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(","))
        newhashes = tuple(node.bin(hash) for hash in newhashes.split(","))
        return cls((timestamp, tz), user, command, namespace, name, oldhashes,
                   newhashes)
示例#2
0
    def testLoadingBookmarks(self):
        bmdir = self.makeTempDir()
        bmstore1 = bookmarkstore.bookmarkstore(bmdir)
        bmstore1.update("test", node.bin("1" * 40))
        bmstore1.flush()

        bmstore2 = bookmarkstore.bookmarkstore(bmdir)
        self.assertEqual(
            b"\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11",
            bmstore2.lookup_bookmark("test"),
        )
示例#3
0
    def testMalformedBookmarks(self):
        bmdir = self.makeTempDir()
        bmstore = bookmarkstore.bookmarkstore(bmdir)
        bmstore.update("test", node.bin("1" * 40))
        bmstore.flush()

        def truncateFilesInDir(d):
            for f in os.listdir(d):
                with open(os.path.join(d, f), "w"):
                    pass

        truncateFilesInDir(bmdir)
        self.assertRaises(IOError, bookmarkstore.bookmarkstore, bmdir)
示例#4
0
文件: fastlog.py 项目: zerkella/eden
    def generate(self, path):
        start = str(self.rev)
        reponame = self.reponame
        revfn = self.changelog.rev
        skip = 0

        while True:
            if self.stopped():
                break

            results = None
            todo = self.gettodo()
            try:
                client = graphql.Client(repo=self.repo)
                results = client.scmquery_log(reponame,
                                              self.scm,
                                              start,
                                              file_paths=[path],
                                              skip=skip,
                                              number=todo)
            except Exception as e:
                if self.ui.config("fastlog", "debug"):
                    self.ui.traceback(force=True)
                self.queue.put((self.id, False, str(e)))
                self.stop()
                return

            if results is None:
                self.queue.put((self.id, False, "Unknown error"))
                self.stop()
                return

            for commit in results:
                try:
                    hash = commit["hash"]
                    if len(hash) != 40:
                        raise ValueError("Received invalid hash %s" % hash)
                    rev = revfn(node.bin(hash))
                    if rev is None:
                        raise KeyError("Hash %s not in local repo" % hash)
                except Exception as e:
                    if self.ui.config("fastlog", "debug"):
                        self.ui.traceback(force=True)
                    self.queue.put((self.id, False, str(e)))
                else:
                    yield rev

            skip += todo
            if len(results) < todo:
                self.finishpath(path)
                return
示例#5
0
    def _makereferences(self, data):
        """Makes a References object from JSON data

        JSON data must represent json serialization of
        //scm/commitcloud/if/CommitCloudService.thrift
        struct ReferencesData

        Result represents struct References from this module
        """
        version = data["version"]
        newheads = [h for h in data.get("heads", [])]
        newbookmarks = {n: v for n, v in data.get("bookmarks", {}).items()}
        newobsmarkers = [
            (
                nodemod.bin(m["pred"]),
                tuple(nodemod.bin(s) for s in m["succs"]),
                m["flags"],
                tuple((k, v) for k, v in json.loads(m["meta"])),
                (m["date"], m["tz"]),
                tuple(nodemod.bin(p) for p in m["predparents"]),
            )
            for m in data.get("new_obsmarkers_data", [])
        ]
        headdates = {h: d for h, d in data.get("head_dates", {}).items()}
        newremotebookmarks = self._decoderemotebookmarks(
            data.get("remote_bookmarks", [])
        )
        newsnapshots = [s for s in data.get("snapshots", [])]

        return References(
            version,
            newheads,
            newbookmarks,
            newobsmarkers,
            headdates,
            newremotebookmarks,
            newsnapshots,
        )
示例#6
0
    def testAddingBookmarksToSameNode(self):
        bmdir = self.makeTempDir()
        bmstore = bookmarkstore.bookmarkstore(bmdir)

        testnode = node.bin("2" * 40)
        self.assertIsNone(bmstore.lookup_node(testnode))

        bmstore.update("test", testnode)
        bmstore.update("test2", testnode)

        self.assertEquals(["test2", "test"], bmstore.lookup_node(testnode))

        bmstore.remove("test2")
        self.assertEquals(["test"], bmstore.lookup_node(testnode))
示例#7
0
    def usecloudnode(cloudnode, localnode):
        """returns True if cloudnode should be a new state for the remote bookmark

        Both cloudnode and localnode are public commits."""
        unfi = repo
        if localnode not in unfi:
            # we somehow don't have the localnode in the repo, probably may want
            # to fetch it
            return False
        if cloudnode not in unfi:
            # we don't have cloudnode in the repo, assume that cloudnode is newer
            # than the local
            return True
        if repo.changelog.isancestor(nodemod.bin(localnode), nodemod.bin(cloudnode)):
            # cloudnode is descendant of the localnode, assume that remote book
            # should move forward to the newer node
            #
            # Note: if remote book was reverted back to the older revision on
            # the server, and current repo in fact has newer working copy, then
            # we'll end up with wrong state by moving the bookmark forward.
            # It will be fixed on the next pull and sync operations.
            return True
        return False
示例#8
0
def _unshelverestorecommit(ui, repo, basename):
    """Recreate commit in the repository during the unshelve"""
    with ui.configoverride({("ui", "quiet"): True}):
        md = shelvedfile(repo, basename, "oshelve").readobsshelveinfo()
        shelvenode = nodemod.bin(md["node"])
        try:
            shelvectx = repo[shelvenode]
        except error.RepoLookupError:
            m = _(
                "shelved node %s not found in repo\nIf you think this shelve "
                "should exist, try running '@prog@ import --no-commit .hg/shelved/%s.patch' "
                "from the root of the repository.")
            raise error.Abort(m % (md["node"], basename))
    return repo, shelvectx
示例#9
0
    def prefetch(self,
                 fileids,
                 force=False,
                 fetchdata=True,
                 fetchhistory=True):
        """downloads the given file versions to the cache
        """
        repo = self.repo
        idstocheck = set()
        for file, id in fileids:
            # hack
            # - we don't use .hgtags
            # - workingctx produces ids with length 42,
            #   which we skip since they aren't in any cache
            if file == ".hgtags" or len(
                    id) == 42 or not repo.shallowmatch(file):
                continue

            idstocheck.add((file, bin(id)))

        batchlfsdownloads = self.ui.configbool("remotefilelog",
                                               "_batchlfsdownloads", True)
        dolfsprefetch = self.ui.configbool("remotefilelog", "dolfsprefetch",
                                           True)

        idstocheck = list(idstocheck)
        if not force:
            contentstore = repo.fileslog.contentstore
            metadatastore = repo.fileslog.metadatastore
        else:
            contentstore, metadatastore = repo.fileslog.makesharedonlyruststore(
                repo)

        if fetchdata:
            contentstore.prefetch(idstocheck)
        if fetchhistory:
            metadatastore.prefetch(idstocheck)

        if batchlfsdownloads and dolfsprefetch:
            self._lfsprefetch(fileids)

        if force:
            # Yay, since the shared-only stores and the regular ones aren't
            # shared, we need to commit data to force the stores to be
            # rebuilt. Forced prefetch are very rare and thus it is most
            # likely OK to do this.
            contentstore = None
            metadatastore = None
            repo.commitpending()
示例#10
0
def _getannotate(repo, proto, path, lastnode):
    # Older fastannotte sent binary nodes. Newer fastannotate sends hex.
    if len(lastnode) == 40:
        lastnode = bin(lastnode)

    # output:
    #   FILE := vfspath + '\0' + str(size) + '\0' + content
    #   OUTPUT := '' | FILE + OUTPUT
    result = b""
    buildondemand = repo.ui.configbool("fastannotate", "serverbuildondemand",
                                       True)
    with context.annotatecontext(repo, path) as actx:
        if buildondemand:
            # update before responding to the client
            master = _getmaster(repo.ui)
            try:
                if not actx.isuptodate(master):
                    actx.annotate(master, master)
            except Exception:
                # non-fast-forward move or corrupted. rebuild automically.
                actx.rebuild()
                try:
                    actx.annotate(master, master)
                except Exception:
                    actx.rebuild()  # delete files
            finally:
                # although the "with" context will also do a close/flush, we
                # need to do it early so we can send the correct respond to
                # client.
                actx.close()
        # send back the full content of revmap and linelog, in the future we
        # may want to do some rsync-like fancy updating.
        # the lastnode check is not necessary if the client and the server
        # agree where the main branch is.
        if actx.lastnode != lastnode:
            for p in [actx.revmappath, actx.linelogpath]:
                if not os.path.exists(p):
                    continue
                content = b""
                with open(p, "rb") as f:
                    content = f.read()
                vfsbaselen = len(repo.localvfs.base + "/")
                relpath = p[vfsbaselen:]
                result += b"%s\0%s\0%s" % (
                    pycompat.encodeutf8(relpath),
                    pycompat.encodeutf8(str(len(content))),
                    content,
                )
    return result
示例#11
0
def getfile(repo, proto, file, node):
    """A server api for requesting a particular version of a file. Can be used
    in batches to request many files at once. The return protocol is:
    <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
    non-zero for an error.

    data is a compressed blob with revlog flag and ancestors information. See
    createfileblob for its content.
    """
    if shallowrepo.requirement in repo.requirements:
        return "1\0" + _("cannot fetch remote files from shallow repo")
    node = bin(node.strip())
    if node == nullid:
        return "0\0"
    return "0\0" + _loadfileblob(repo, file, node)
示例#12
0
def _unshelverestorecommit(ui, repo, basename, obsshelve):
    """Recreate commit in the repository during the unshelve"""
    with ui.configoverride({("ui", "quiet"): True}):
        if obsshelve:
            md = shelvedfile(repo, basename, "oshelve").readobsshelveinfo()
            shelvenode = nodemod.bin(md["node"])
            try:
                shelvectx = repo[shelvenode]
            except error.RepoLookupError:
                m = _("shelved node %s not found in repo")
                raise error.Abort(m % md["node"])
        else:
            shelvedfile(repo, basename, "hg").applybundle()
            shelvectx = repo["tip"]
    return repo, shelvectx
示例#13
0
        def _findtags(self):
            (tags, tagtypes) = super(hgrepo, self)._findtags()

            for tag, rev in self.githandler.tags.iteritems():
                if isinstance(tag, unicode):
                    tag = tag.encode("utf-8")
                tags[tag] = bin(rev)
                tagtypes[tag] = "git"
            for tag, rev in self.githandler.remote_refs.iteritems():
                if isinstance(tag, unicode):
                    tag = tag.encode("utf-8")
                tags[tag] = rev
                tagtypes[tag] = "git-remote"
            tags.update(self.githandler.remote_refs)
            return (tags, tagtypes)
示例#14
0
    def parents(self, n):
        gitrev = self.repo.revmap.get(n)
        if gitrev is None:
            # we've reached a revision we have
            return self.base.parents(n)
        commit = self.repo.handler.git.get_object(_maybehex(n))

        if not commit.parents:
            return [nullid, nullid]

        def gitorhg(n):
            hn = self.repo.handler.map_hg_get(hex(n))
            if hn is not None:
                return bin(hn)
            return n

        # currently ignores the octopus
        p1 = gitorhg(bin(commit.parents[0]))
        if len(commit.parents) > 1:
            p2 = gitorhg(bin(commit.parents[1]))
        else:
            p2 = nullid

        return [p1, p2]
示例#15
0
def _summarizefileconflicts(self, path, workingctx):
    # 'd' = driver-resolved
    # 'r' = marked resolved
    # 'pr', 'pu' = path conflicts
    if self[path] in ("d", "r", "pr", "pu"):
        return None

    stateentry = self._state[path]
    localnode = bin(stateentry[1])
    ancestorfile = stateentry[3]
    ancestornode = bin(stateentry[4])
    otherfile = stateentry[5]
    othernode = bin(stateentry[6])
    otherctx = self._repo[self._other]
    extras = self.extras(path)
    anccommitnode = extras.get("ancestorlinknode")
    ancestorctx = self._repo[anccommitnode] if anccommitnode else None
    workingctx = self._filectxorabsent(localnode, workingctx, path)
    otherctx = self._filectxorabsent(othernode, otherctx, otherfile)
    basectx = self._repo.filectx(
        ancestorfile, fileid=ancestornode, changeid=ancestorctx
    )

    return _summarize(self._repo, workingctx, otherctx, basectx)
示例#16
0
文件: hg.py 项目: xmonader/eden
 def putbookmarks(self, updatedbookmark):
     if not len(updatedbookmark):
         return
     wlock = lock = tr = None
     try:
         wlock = self.repo.wlock()
         lock = self.repo.lock()
         tr = self.repo.transaction("bookmark")
         self.ui.status(_("updating bookmarks\n"))
         destmarks = self.repo._bookmarks
         changes = [(bookmark, nodemod.bin(updatedbookmark[bookmark]))
                    for bookmark in updatedbookmark]
         destmarks.applychanges(self.repo, tr, changes)
         tr.close()
     finally:
         lockmod.release(lock, wlock, tr)
示例#17
0
def _scmquerylookupglobalrev(orig, repo, rev):
    reponame = repo.ui.config("fbscmquery", "reponame")
    if reponame:
        try:
            client = graphql.Client(repo=repo)
            hghash = str(
                client.getmirroredrev(reponame, "GLOBAL_REV", reponame, "hg", str(rev))
            )
            matchedrevs = []
            if hghash:
                matchedrevs.append(bin(hghash))
            return matchedrevs
        except Exception:
            pass

    return orig(repo, rev)
示例#18
0
文件: shelve.py 项目: simpkins/eden
def shelved(repo, subset, x):
    """Shelved changes"""
    # list files with shelves
    shelves = [
        shelvedfile(repo, filename, "oshelve") for filename in listshelvesfiles(repo)
    ]
    # filter valid files
    shelves = filter(lambda f: f.exists(), shelves)
    # read node from each file
    nodes = [nodemod.bin(shelve.readobsshelveinfo()["node"]) for shelve in shelves]
    # filter if some of the revisions are not in repo
    # local=True because shelved commits cannot be public and only public
    # commits can be lazy so we avoid remote lookups.
    nodes = repo.changelog.filternodes(nodes, local=True)
    # returns intersection with shelved commits (including hidden)
    return subset & repo.revs("%ln", nodes)
示例#19
0
    def testAddingBookmarks(self):
        bmdir = self.makeTempDir()
        bmstore = bookmarkstore.bookmarkstore(bmdir)
        self.assertIsNone(bmstore.lookup_bookmark("not_real"))

        bmstore.update("test", node.nullid)
        self.assertEquals(
            "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
            bmstore.lookup_bookmark("test"),
        )

        bmstore.update("test", node.bin("1" * 40))
        self.assertEquals(
            "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11",
            bmstore.lookup_bookmark("test"),
        )

        bmstore.remove("test")
示例#20
0
def shelved(repo, subset, x):
    """Shelved changes"""
    # list files with shelves
    shelves = [
        shelvedfile(repo, filename, "oshelve")
        for filename in listshelvesfiles(repo)
    ]
    # filter valid files
    shelves = filter(lambda f: f.exists(), shelves)
    # read node from each file
    nodes = [
        nodemod.bin(shelve.readobsshelveinfo()["node"]) for shelve in shelves
    ]
    # filter if some of the revisions are not in repo
    nodes = filter(lambda x: x in repo, nodes)
    # convert to full hash
    nodes = [nodemod.hex(repo[x].node()) for x in nodes]
    # returns intersection with shelved commits (including hidden)
    return subset & repo.revs("%ls", nodes)
示例#21
0
 def __init__(
     self,
     repo,
     path,
     changeid=None,
     fileid=None,
     filelog=None,
     changectx=None,
     ancestormap=None,
 ):
     if fileid == nullrev:
         fileid = nullid
     if fileid and len(fileid) == 40:
         fileid = bin(fileid)
     super(remotefilectx, self).__init__(repo, path, changeid, fileid,
                                         filelog, changectx)
     self._ancestormap = ancestormap
     self._descendantrevfastpath = repo.ui.configbool(
         "remotefilelog", "descendantrevfastpath")
示例#22
0
    def addrawrevision(
        self,
        rawtext,
        transaction,
        linknode,
        p1,
        p2,
        node,
        flags,
        cachedelta=None,
        _metatuple=None,
    ):
        if _metatuple:
            # _metatuple: used by "addrevision" internally by remotefilelog
            # meta was parsed confidently
            #
            # NOTE: meta is the "filelog" meta, which contains "copyrev"
            # information. It's *incompatible* with datapack meta, which is
            # about file size and revlog flags.
            meta, metaoffset = _metatuple
        else:
            # Not from self.addrevision, but something else (repo._filecommit)
            # calls addrawrevision directly. remotefilelog needs to get the
            # copy metadata via parsing it.
            meta, unused = shallowutil.parsemeta(rawtext, flags)

        dpack, hpack = self.repo.fileslog.getmutablelocalpacks()

        dpackmeta = {constants.METAKEYFLAG: flags}
        dpack.add(self.filename,
                  node,
                  revlog.nullid,
                  rawtext,
                  metadata=dpackmeta)

        copyfrom = ""
        realp1node = p1
        if meta and "copy" in meta:
            copyfrom = meta["copy"]
            realp1node = bin(meta["copyrev"])
        hpack.add(self.filename, node, realp1node, p2, linknode, copyfrom)

        return node
示例#23
0
def cloudremote(repo, subset, x):
    """pull missing known changesets from the remote store

    Currently only for obsoleted commits, can be extended for any commit.
    """

    args = revset.getargs(x, 1, 50,
                          _("cloudremote takes from 1 to up to 50 hex revs"))
    args = [n[1] for n in args]

    try:
        hexnodespulled = missingcloudrevspull(
            repo, [nodemod.bin(nodehex) for nodehex in args])
        return subset & repo.unfiltered().revs("%ls", hexnodespulled)
    except Exception as e:
        repo.ui.status(
            _("unable to pull all changesets from the remote store\n%s\n") % e,
            component="commitcloud",
        )
    return smartset.baseset([])
示例#24
0
    def _fetchpackfiles(self, fileids, fetchdata, fetchhistory):
        """Requests the given file revisions from the server in a pack files
        format.

        See `remotefilelogserver.getpack` for the file format.
        """

        # Try fetching packs via HTTP first; fall back to SSH on error.
        if edenapi.enabled(self.ui):
            try:
                self._httpfetchpacks(fileids, fetchdata, fetchhistory)
                return
            except Exception as e:
                self.ui.warn(_("encountered error during HTTPS fetching;"))
                self.ui.warn(_(" falling back to SSH\n"))
                edenapi.logexception(self.ui, e)
                self.ui.metrics.gauge("edenapi_fallbacks", 1)

        dpack, hpack = self.repo.fileslog.getmutablesharedpacks()
        fileids = [(filename, bin(node)) for filename, node in fileids]
        self.getpackclient.prefetch(dpack, hpack, fileids)
示例#25
0
 def __init__(self, vfs):
     self.vfs = vfs
     self._invisiblerevs = None
     try:
         lines = self.vfs("visibleheads").readlines()
         if lines and lines[0].strip() != FORMAT_VERSION:
             raise error.Abort("invalid visibleheads file format %r" % lines[0])
         self.heads = [node.bin(head.strip()) for head in lines[1:]]
         self.dirty = False
         self._logheads("read", visibility_headcount=len(self.heads))
     except IOError as err:
         if err.errno != errno.ENOENT:
             raise
         self.heads = []
         self.dirty = True
     self._allheads = bindings.nodemap.nodeset(vfs.join("allheads"))
     if self.heads:
         # Populate allheads with heads
         add = self._allheads.add
         for head in self.heads:
             add(head)
示例#26
0
def _checkomissions(repo, remotepath, lastsyncstate, tr):
    """check omissions are still not available locally

    Check that the commits that have been deliberately omitted are still not
    available locally.  If they are now available (e.g. because the user pulled
    them manually), then remove the tracking of those heads being omitted, and
    restore any bookmarks that can now be restored.
    """
    unfi = repo.unfiltered()
    lastomittedheads = set(lastsyncstate.omittedheads)
    lastomittedbookmarks = set(lastsyncstate.omittedbookmarks)
    omittedheads = set()
    omittedbookmarks = set()
    changes = []
    for head in lastomittedheads:
        if head not in repo:
            omittedheads.add(head)
    for name in lastomittedbookmarks:
        # bookmark might be removed from cloud workspace by someone else
        if name not in lastsyncstate.bookmarks:
            continue
        node = lastsyncstate.bookmarks[name]
        if node in unfi:
            changes.append((name, nodemod.bin(node)))
        else:
            omittedbookmarks.add(name)
    if omittedheads != lastomittedheads or omittedbookmarks != lastomittedbookmarks:
        lastsyncstate.update(
            tr,
            lastsyncstate.version,
            lastsyncstate.heads,
            lastsyncstate.bookmarks,
            list(omittedheads),
            list(omittedbookmarks),
            lastsyncstate.maxage,
            lastsyncstate.remotebookmarks,
        )
    if changes:
        with repo.wlock(), repo.lock(), repo.transaction("cloudsync") as tr:
            repo._bookmarks.applychanges(repo, tr, changes)
示例#27
0
def buildtemprevlog(repo, file):
    # get filename key
    filekey = hashlib.sha1(file).hexdigest()
    filedir = os.path.join(repo.path, "store/data", filekey)

    # sort all entries based on linkrev
    fctxs = []
    for filenode in os.listdir(filedir):
        if "_old" not in filenode:
            fctxs.append(repo.filectx(file, fileid=bin(filenode)))

    fctxs = sorted(fctxs, key=lambda x: x.linkrev())

    # add to revlog
    temppath = repo.sjoin("data/temprevlog.i")
    if os.path.exists(temppath):
        os.remove(temppath)
    r = filelog.filelog(repo.svfs, "temprevlog")

    class faket(object):
        def add(self, a, b, c):
            pass

    t = faket()
    for fctx in fctxs:
        if fctx.node() not in repo:
            continue

        p = fctx.filelog().parents(fctx.filenode())
        meta = {}
        if fctx.renamed():
            meta["copy"] = fctx.renamed()[0]
            meta["copyrev"] = hex(fctx.renamed()[1])

        r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])

    return r
示例#28
0
 def _lfsprefetch(self, fileids):
     if not _lfsmod or not util.safehasattr(self.repo.svfs, "lfslocalblobstore"):
         return
     if not _lfsmod.wrapper.candownload(self.repo):
         return
     pointers = []
     filenames = {}
     store = self.repo.svfs.lfslocalblobstore
     for file, id in fileids:
         node = bin(id)
         rlog = self.repo.file(file)
         if rlog.flags(node) & revlog.REVIDX_EXTSTORED:
             text = rlog.revision(node, raw=True)
             p = _lfsmod.pointer.deserialize(text)
             oid = p.oid()
             if not store.has(oid):
                 pointers.append(p)
                 filenames[oid] = file
     if len(pointers) > 0:
         perftrace.tracevalue("Missing", len(pointers))
         self.repo.svfs.lfsremoteblobstore.readbatch(
             pointers, store, objectnames=filenames
         )
         assert all(store.has(p.oid()) for p in pointers)
示例#29
0
def gitnode(repo, subset, x):
    """``gitnode(id)``
    Return the hg revision corresponding to a given git rev."""
    l = revset.getargs(x, 1, 1, _("id requires one argument"))
    n = revset.getstring(l[0], _("id requires a string"))

    reponame = repo.ui.config("fbscmquery", "reponame")
    if not reponame:
        # We don't know who we are, so we can't ask for a translation
        return subset.filter(lambda r: False)
    backingrepos = repo.ui.configlist("fbscmquery",
                                      "backingrepos",
                                      default=[reponame])

    lasterror = None
    hghash = None
    for backingrepo in backingrepos:
        try:
            client = graphql.Client(repo=repo)
            hghash = client.getmirroredrev(backingrepo, "git", reponame, "hg",
                                           n)
            if hghash != "":
                break
        except Exception as ex:
            lasterror = ex

    if not hghash:
        if lasterror:
            repo.ui.warn(("Could not translate revision {0}: {1}\n".format(
                n, lasterror)))
        else:
            repo.ui.warn(_x("Could not translate revision {0}\n".format(n)))
        return subset.filter(lambda r: False)

    rn = repo[node.bin(hghash)].rev()
    return subset & smartset.baseset([rn])
示例#30
0
def decodeheads(data):
    lines = decodeutf8(data).splitlines()
    if lines and lines[0].strip() != FORMAT_VERSION:
        raise error.Abort("invalid visibleheads file format %r" % lines[0])
    return [node.bin(head.strip()) for head in lines[1:]]