コード例 #1
0
ファイル: stackpush.py プロジェクト: ahornby/eden
    def check(self, ctx):
        """Check if push onto ctx can be done

        Raise ConflictsError if there are conflicts.
        """
        mctx = ctx.manifestctx()
        conflicts = []
        for path, expected in pycompat.iteritems(self.fileconditions):
            try:
                actual = mctx.find(path)
            except KeyError:
                actual = None
            if actual != expected:
                conflicts.append(path)
        if conflicts:
            raise ConflictsError(conflicts)
コード例 #2
0
ファイル: __init__.py プロジェクト: mitrandir77/eden
 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args,
                       **kwargs):
     if shallowrepo.requirement in repo.requirements:
         files = []
         sparsematch = repo.maybesparsematch(mctx.rev())
         for f, (m, actionargs, msg) in pycompat.iteritems(actions):
             if sparsematch and not sparsematch(f):
                 continue
             if m in ("c", "dc", "cm"):
                 files.append((f, hex(mctx.filenode(f))))
             elif m == "dg":
                 f2 = actionargs[0]
                 files.append((f2, hex(mctx.filenode(f2))))
         # batch fetch the needed files from the server
         repo.fileservice.prefetch(files, fetchhistory=False)
     return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
コード例 #3
0
ファイル: pointer.py プロジェクト: zerkella/eden
 def validate(self):
     """raise InvalidPointer on error. return self if there is no error"""
     requiredcount = 0
     for k, v in pycompat.iteritems(self):
         if k in self._requiredre:
             if not self._requiredre[k].match(v):
                 raise InvalidPointer(_("unexpected value: %s=%r") % (k, v))
             requiredcount += 1
         elif not self._keyre.match(k):
             raise InvalidPointer(_("unexpected key: %s") % k)
         if not self._valuere.match(v):
             raise InvalidPointer(_("unexpected value: %s=%r") % (k, v))
     if len(self._requiredre) != requiredcount:
         miss = sorted(set(self._requiredre.keys()).difference(self.keys()))
         raise InvalidPointer(_("missed keys: %s") % ", ".join(miss))
     return self
コード例 #4
0
ファイル: remotenames.py プロジェクト: mitrandir77/eden
def precachedistance(repo):
    """
    Caclulate and cache the distance between bookmarks and what they
    track, plus the distance from the tipmost head on current topological
    branch. This can be an expensive operation especially in repositories
    with a high commit rate, so it can be turned off in your hgrc:

        [remotenames]
        precachedistance = False
        precachecurrent = False
    """
    # when working between multiple local repos which do not all have
    # remotenames enabled, do this work only for those with it enabled
    if not util.safehasattr(repo, "_remotenames"):
        return

    # to avoid stale namespaces, let's reload
    repo._remotenames.clearnames()

    wlock = repo.wlock()
    try:
        invalidatedistancecache(repo)

        distances = {}
        if repo.ui.configbool("remotenames", "precachedistance"):
            distances = {}
            for bmark, tracked in pycompat.iteritems(_readtracking(repo)):
                distance = calculatenamedistance(repo, bmark, tracked)
                if distance != (None, None):
                    distances[bmark] = distance
            writedistancecache(repo, distances)

        if repo.ui.configbool("remotenames", "precachecurrent"):
            # are we on a 'branch' but not at the head?
            # i.e. is there a bookmark that we are heading towards?
            revs = list(repo.revs("limit(.:: and bookmark() - ., 1)"))
            if revs:
                # if we are here then we have one or more bookmarks
                # and we'll pick the first one for now
                bmark = repo[revs[0]].bookmarks()[0]
                distance = len(repo.revs("only(%d, .)", revs[0]))
                cachevfs = shareawarecachevfs(repo)
                cachevfs.writeutf8("distance.current",
                                   "%s %d" % (bmark, distance))

    finally:
        wlock.release()
コード例 #5
0
ファイル: bundleparts.py プロジェクト: leszfb/eden
 def bundle2scratchbookmarks(op, part):
     """Handler deletes bookmarks first then adds new bookmarks."""
     index = op.repo.bundlestore.index
     decodedbookmarks = bookmarks.decodebookmarks(part)
     toinsert = {}
     todelete = []
     for bookmark, node in pycompat.iteritems(decodedbookmarks):
         if node:
             toinsert[bookmark] = node
         else:
             todelete.append(bookmark)
     log = server._getorcreateinfinitepushlogger(op)
     with server.logservicecall(log, constants.scratchbookmarksparttype), index:
         if todelete:
             index.deletebookmarks(todelete)
         if toinsert:
             index.addmanybookmarks(toinsert, True)
コード例 #6
0
ファイル: git2hg.py プロジェクト: x414e54/eden
    def get_heads(refs):
        todo = []
        seenheads = set()
        for ref, sha in pycompat.iteritems(refs):
            # refs could contain refs on the server that we haven't pulled down
            # the objects for; also make sure it's a sha and not a symref
            if ref != "HEAD" and sha in git_object_store:
                obj = git_object_store[sha]
                while isinstance(obj, Tag):
                    obj_type, sha = obj.object
                    obj = git_object_store[sha]
                if isinstance(obj, Commit) and sha not in seenheads:
                    seenheads.add(sha)
                    todo.append(sha)

        todo.sort(key=commitdate, reverse=True)
        return todo
コード例 #7
0
    def testLargePack(self):
        """Test creating and reading from a large pack with over X entries.
        This causes it to use a 2^16 fanout table instead."""
        revisions = []
        blobs = {}
        total = SMALLFANOUTCUTOFF + 1
        for i in xrange(total):
            filename = "filename-%s" % i
            content = pycompat.encodeutf8(filename)
            node = self.getHash(content)
            blobs[(filename, node)] = content
            revisions.append((filename, node, nullid, content))

        pack = self.createPack(revisions)

        for (filename, node), content in pycompat.iteritems(blobs):
            actualcontent = pack.getdeltachain(filename, node)[0][4]
            self.assertEqual(actualcontent, content)
コード例 #8
0
    def logmanybookmarksforreplay(self, bookmarks, isbackup):
        """Log the contents of the ``bookmarks`` dict for replay."""

        if isbackup:
            # We don't replay backup bookmarks.
            return

        if not self.replaybookmarks:
            return

        data = [(bookmark, node, hashlib.sha1(bookmark).hexdigest(),
                 self.reponame)
                for (bookmark, node) in pycompat.iteritems(bookmarks)]

        self.sqlcursor.executemany(
            "INSERT INTO replaybookmarksqueue(bookmark, node, bookmark_hash, reponame) "
            "VALUES (%s, %s, %s, %s)",
            data,
        )
コード例 #9
0
def saveremotebookmarks(repo, newbookmarks, remote):
    remotenamesext = extensions.find("remotenames")
    remotepath = remotenamesext.activepath(repo.ui, remote)
    bookmarks = {}
    remotenames = remotenamesext.readremotenames(repo)
    for hexnode, nametype, remote, rname in remotenames:
        if remote != remotepath:
            continue
        if nametype == "bookmarks":
            if rname in newbookmarks:
                # It's possible if we have a normal bookmark that matches
                # scratch branch pattern. In this case just use the current
                # bookmark node
                del newbookmarks[rname]
            bookmarks[rname] = hexnode

    for bookmark, hexnode in pycompat.iteritems(newbookmarks):
        bookmarks[bookmark] = hexnode
    remotenamesext.saveremotenames(repo, {remotepath: bookmarks})
コード例 #10
0
def _drawendinglines(orig, lines, extra, edgemap, seen):
    # if we are going to have only one single column, draw the missing '|'s
    # and restore everything to normal. see comment in 'ascii' below for an
    # example of what will be changed. note: we do not respect 'graphstyle'
    # but always draw '|' here, for simplicity.
    if len(seen) == 1 or any(l[0:2] != [" ", " "] for l in lines):
        # draw '|' from bottom to top in the 1st column to connect to
        # something, like a '/' in the 2nd column, or a '+' in the 1st column.
        for line in reversed(lines):
            if line[0:2] != [" ", " "]:
                break
            line[0] = "|"
        # undo the wrapfunction
        extensions.unwrapfunction(graphmod, "_drawendinglines", _drawendinglines)
        # restore the space to '|'
        for k, v in pycompat.iteritems(edgemap):
            if v == " ":
                edgemap[k] = "|"
    orig(lines, extra, edgemap, seen)
コード例 #11
0
def buildpackmeta(metadict: "Mapping[str, int]") -> bytes:
    """like _buildpackmeta, but typechecks metadict and normalize it.

    This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
    and METAKEYFLAG will be dropped if its value is 0.
    """
    newmeta = {}
    for k, v in pycompat.iteritems((metadict or {})):
        expectedtype = _metaitemtypes.get(k, (bytes, ))
        if not isinstance(v, expectedtype):
            raise error.ProgrammingError("packmeta: wrong type of key %s" % k)
        # normalize int to binary buffer
        if int in expectedtype:
            # optimization: remove flag if it's 0 to save space
            if k == constants.METAKEYFLAG and v == 0:
                continue
            v = int2bin(v)
        newmeta[k] = v
    return _buildpackmeta(newmeta)
コード例 #12
0
 def _cmdline(self, cmd, *args, **kwargs):
     cmdline = [self.command, cmd] + list(args)
     for k, v in pycompat.iteritems(kwargs):
         if len(k) == 1:
             cmdline.append("-" + k)
         else:
             cmdline.append("--" + k.replace("_", "-"))
         try:
             if len(k) == 1:
                 cmdline.append("" + v)
             else:
                 cmdline[-1] += "=" + v
         except TypeError:
             pass
     cmdline = [util.shellquote(arg) for arg in cmdline]
     if not self.ui.debugflag:
         cmdline += ["2>", os.devnull]
     cmdline = " ".join(cmdline)
     return cmdline
コード例 #13
0
ファイル: subversion.py プロジェクト: leszfb/eden
    def _getchanges(self, rev, full):
        (paths, parents) = self.paths[rev]
        copies = {}
        if parents:
            files, self.removed, copies = self.expandpaths(rev, paths, parents)
        if full or not parents:
            # Perform a full checkout on roots
            uuid, module, revnum = revsplit(rev)
            entries = svn.client.ls(self.baseurl + quote(module),
                                    optrev(revnum), True, self.ctx)
            files = [
                n for n, e in pycompat.iteritems(entries)
                if e.kind == svn.core.svn_node_file
            ]
            self.removed = set()

        files.sort()
        files = zip(files, [rev] * len(files))
        return (files, copies)
コード例 #14
0
ファイル: hg.py プロジェクト: xmonader/eden
    def setbranch(self, branch, pbranches):
        if not self.clonebranches:
            return

        setbranch = branch != self.lastbranch
        self.lastbranch = branch
        if not branch:
            branch = "default"
        pbranches = [(b[0], b[1] and b[1] or "default") for b in pbranches]
        if pbranches:
            pbranch = pbranches[0][1]
        else:
            pbranch = "default"

        branchpath = os.path.join(self.path, branch)
        if setbranch:
            self.after()
            try:
                self.repo = hg.repository(self.ui, branchpath)
            except Exception:
                self.repo = hg.repository(self.ui, branchpath, create=True)
            self.before()

        # pbranches may bring revisions from other branches (merge parents)
        # Make sure we have them, or pull them.
        missings = {}
        for b in pbranches:
            try:
                self.repo.lookup(b[0])
            except Exception:
                missings.setdefault(b[1], []).append(b[0])

        if missings:
            self.after()
            for pbranch, heads in sorted(pycompat.iteritems(missings)):
                pbranchpath = os.path.join(self.path, pbranch)
                prepo = hg.peer(self.ui, {}, pbranchpath)
                self.ui.note(
                    _("pulling from %s into %s\n") % (pbranch, branch))
                exchange.pull(self.repo, prepo,
                              [prepo.lookup(h) for h in heads])
            self.before()
コード例 #15
0
def localrepolistkeys(orig, self, namespace, patterns=None):
    """Wrapper of localrepo.listkeys()"""

    if namespace == "bookmarks" and patterns:
        index = self.bundlestore.index
        # Using sortdict instead of a dictionary to ensure that bookmaks are
        # restored in the same order after a pullbackup. See T24417531
        results = util.sortdict()
        bookmarks = orig(self, namespace)
        for pattern in patterns:
            results.update(index.getbookmarks(pattern))
            if pattern.endswith("*"):
                pattern = "re:^" + pattern[:-1] + ".*"
            kind, pat, matcher = util.stringmatcher(pattern)
            for bookmark, node in pycompat.iteritems(bookmarks):
                if matcher(bookmark):
                    results[bookmark] = node
        return results
    else:
        return orig(self, namespace)
コード例 #16
0
    def _sendpackrequest(self, remote, fileids):
        """Formats and writes the given fileids to the remote as part of a
        getpackv1 call.
        """
        # Sort the requests by name, so we receive requests in batches by name
        grouped = {}
        for filename, node in fileids:
            grouped.setdefault(filename, set()).add(node)

        # Issue request
        pipeo = shallowutil.trygetattr(remote, ("_pipeo", "pipeo"))
        for filename, nodes in pycompat.iteritems(grouped):
            filenamelen = struct.pack(constants.FILENAMESTRUCT, len(filename))
            countlen = struct.pack(constants.PACKREQUESTCOUNTSTRUCT, len(nodes))
            rawnodes = "".join(n for n in nodes)

            pipeo.write("%s%s%s%s" % (filenamelen, filename, countlen, rawnodes))
            pipeo.flush()
        pipeo.write(struct.pack(constants.FILENAMESTRUCT, 0))
        pipeo.flush()
コード例 #17
0
    def testPackMany(self):
        """Pack many related and unrelated ancestors.
        """
        # Build a random pack file
        allentries = {}
        ancestorcounts = {}
        revisions = []
        random.seed(0)
        for i in range(100):
            filename = "filename-%s" % i
            entries = []
            p2 = nullid
            linknode = nullid
            for j in range(random.randint(1, 100)):
                node = self.getFakeHash()
                p1 = nullid
                if len(entries) > 0:
                    p1 = entries[random.randint(0, len(entries) - 1)]
                entries.append(node)
                revisions.append((filename, node, p1, p2, linknode, None))
                allentries[(filename, node)] = (p1, p2, linknode)
                if p1 == nullid:
                    ancestorcounts[(filename, node)] = 1
                else:
                    newcount = ancestorcounts[(filename, p1)] + 1
                    ancestorcounts[(filename, node)] = newcount

        # Must add file entries in reverse topological order
        revisions = list(reversed(revisions))
        pack = self.createPack(revisions)
        store = unionmetadatastore(pack)

        # Verify the pack contents
        for (filename, node), (p1, p2,
                               lastnode) in pycompat.iteritems(allentries):
            ap1, ap2, alinknode, acopyfrom = store.getnodeinfo(filename, node)
            ep1, ep2, elinknode = allentries[(filename, node)]
            self.assertEqual(ap1, ep1)
            self.assertEqual(ap2, ep2)
            self.assertEqual(alinknode, elinknode)
            self.assertEqual(acopyfrom, None)
コード例 #18
0
ファイル: hg.py プロジェクト: xmonader/eden
    def _calculatemergedfiles(self, source, p1ctx, p2ctx):
        """Calculates the files from p2 that we need to pull in when merging p1
        and p2, given that the merge is coming from the given source.

        This prevents us from losing files that only exist in the target p2 and
        that don't come from the source repo (like if you're merging multiple
        repositories together).
        """
        anc = [p1ctx.ancestor(p2ctx)]
        # Calculate what files are coming from p2
        actions, diverge, rename = mergemod.calculateupdates(
            self.repo,
            p1ctx,
            p2ctx,
            anc,
            True,  # branchmerge
            True,  # force
            False,  # acceptremote
            False,  # followcopies
        )

        for file, (action, info, msg) in pycompat.iteritems(actions):
            if source.targetfilebelongstosource(file):
                # If the file belongs to the source repo, ignore the p2
                # since it will be covered by the existing fileset.
                continue

            # If the file requires actual merging, abort. We don't have enough
            # context to resolve merges correctly.
            if action in ["m", "dm", "cd", "dc"]:
                raise error.Abort(
                    _("unable to convert merge commit "
                      "since target parents do not merge cleanly (file "
                      "%s, parents %s and %s)") % (file, p1ctx, p2ctx))
            elif action == "k":
                # 'keep' means nothing changed from p1
                continue
            else:
                # Any other change means we want to take the p2 version
                yield file
コード例 #19
0
ファイル: __init__.py プロジェクト: simpkins/eden
 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args,
                       **kwargs):
     if shallowrepo.requirement in repo.requirements:
         files = []
         sparsematch = repo.maybesparsematch(mctx.rev())
         for f, (m, actionargs, msg) in pycompat.iteritems(actions):
             if sparsematch and not sparsematch(f):
                 continue
             if m in ("c", "dc", "cm"):
                 files.append((f, hex(mctx.filenode(f))))
             elif m == "dg":
                 f2 = actionargs[0]
                 files.append((f2, hex(mctx.filenode(f2))))
         # We need history for the files so we can compute the sha(p1, p2,
         # text) for the files on disk. This will unfortunately fetch all the
         # history for the files, which is excessive. In the future we should
         # change this to fetch the sha256 and size, then we can avoid p1, p2
         # entirely.
         repo.fileservice.prefetch(files,
                                   fetchdata=False,
                                   fetchhistory=True)
     return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
コード例 #20
0
ファイル: shallowutil.py プロジェクト: mitrandir77/eden
def _buildpackmeta(metadict):
    # type: (Mapping[str, bytes]) -> bytes
    """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)

    The dict contains raw content - both keys and values are strings.
    Upper-level business may want to serialize some of other types (like
    integers) to strings before calling this function.

    raise ProgrammingError when metadata key is illegal, or ValueError if
    length limit is exceeded
    """
    metabuf = b""
    for k, v in sorted(pycompat.iteritems((metadict or {}))):
        if len(k) != 1:
            raise error.ProgrammingError("packmeta: illegal key: %s" % k)
        if len(v) > 0xFFFE:
            raise ValueError("metadata value is too long: 0x%x > 0xfffe" % len(v))
        metabuf += encodeutf8(k)
        metabuf += struct.pack("!H", len(v))
        metabuf += v
    # len(metabuf) is guaranteed representable in 4 bytes, because there are
    # only 256 keys, and for each value, len(value) <= 0xfffe.
    return metabuf
コード例 #21
0
ファイル: filemap.py プロジェクト: zerkella/eden
    def istargetfile(self, filename):
        """Return true if the given target filename is covered as a destination
        of the filemap. This is useful for identifying what parts of the target
        repo belong to the source repo and what parts don't."""
        if self.targetprefixes is None:
            self.targetprefixes = set()
            for before, after in pycompat.iteritems(self.rename):
                self.targetprefixes.add(after)

        # If "." is a target, then all target files are considered from the
        # source.
        if not self.targetprefixes or "." in self.targetprefixes:
            return True

        filename = normalize(filename)
        for pre, suf in rpairs(filename):
            # This check is imperfect since it doesn't account for the
            # include/exclude list, but it should work in filemaps that don't
            # apply include/exclude to the same source directories they are
            # renaming.
            if pre in self.targetprefixes:
                return True
        return False
コード例 #22
0
ファイル: sigtrace.py プロジェクト: pombredanne/eden-1
def printstacks(sig, currentframe):
    content = ""
    for tid, frame in pycompat.iteritems(sys._current_frames()):
        content += "Thread %s:\n%s\n" % (tid, util.smarttraceback(frame))

    path = pathformat % {"time": time.time(), "pid": os.getpid()}
    with open(path, "w") as f:
        f.write(content)

    # Also print to stderr
    sys.stderr.write(content)
    sys.stderr.write("\nStacktrace written to %s\n" % path)
    sys.stderr.flush()

    # Calculate the tracing data (can take a while) and write it.
    content = "Tracing Data:\n%s\n" % util.tracer.ascii()
    with open(path, "a") as f:
        f.write("\n")
        f.write(content)

    sys.stderr.write(content)
    sys.stderr.write("\nTracing data written to %s\n" % path)
    sys.stderr.flush()
コード例 #23
0
ファイル: sync.py プロジェクト: simpkins/eden
def _updateremotebookmarks(repo, tr, updates):
    """updates the remote bookmarks to point their new nodes"""
    oldremotebookmarks = _getremotebookmarks(repo)
    protectednames = _getprotectedremotebookmarks(repo)
    newremotebookmarks = {}
    omittedremotebookmarks = []
    unfi = repo

    # Filter out any deletions of default names.  These are protected and shouldn't
    # be deleted if this is the default remote
    for remotename, node in pycompat.iteritems(updates):
        remote, name = bookmarks.splitremotename(remotename)
        if node == nodemod.nullhex and remotename in protectednames:
            newremotebookmarks[remotename] = oldremotebookmarks.get(
                remotename, nodemod.nullhex)
        elif node != nodemod.nullhex and node not in unfi:
            omittedremotebookmarks.append(name)
            newremotebookmarks[remotename] = nodemod.nullhex
        else:
            newremotebookmarks[remotename] = node
    repo._remotenames.applychanges({"bookmarks": newremotebookmarks})

    return omittedremotebookmarks
コード例 #24
0
ファイル: git2hg.py プロジェクト: leszfb/eden
    def get_heads(refs):
        todo = []
        seenheads = set()
        for ref, sha in pycompat.iteritems(refs):
            # refs could contain refs on the server that we haven't pulled down
            # the objects for; also make sure it's a sha and not a symref
            assert isinstance(sha, bytes), "expected bytes, actual %s %s" % (
                sha.__class__,
                sha,
            )
            if ref != "HEAD" and len(sha) == 40 and sha in git_object_store:
                obj = git_object_store[sha]
                while isinstance(obj, Tag):
                    obj_type, sha = obj.object
                    obj = git_object_store[sha]
                if isinstance(obj, Commit) and sha not in seenheads:
                    seenheads.add(sha)
                    todo.append(sha)

        todo.sort(key=commitdate, reverse=True)

        # We convert to utf8 after the sort, since commitdate expects byte shas
        return [pycompat.decodeutf8(s) for s in todo]
コード例 #25
0
def _generateoutputparts(
    head, cgversion, bundlecaps, bundlerepo, bundleroots, bundlefile
):
    """generates bundle that will be send to the user

    returns tuple with raw bundle string and bundle type
    """
    parts = []
    if not _needsrebundling(head, bundlerepo):
        with util.posixfile(bundlefile, "rb") as f:
            unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile)
            if isinstance(unbundler, changegroup.cg1unpacker):
                part = bundle2.bundlepart("changegroup", data=unbundler._stream.read())
                part.addparam("version", "01")
                parts.append(part)
            elif isinstance(unbundler, bundle2.unbundle20):
                haschangegroup = False
                for part in unbundler.iterparts():
                    if part.type == "changegroup":
                        haschangegroup = True
                    newpart = bundle2.bundlepart(part.type, data=part.read())
                    for key, value in pycompat.iteritems(part.params):
                        newpart.addparam(key, value)
                    parts.append(newpart)

                if not haschangegroup:
                    raise error.Abort(
                        "unexpected bundle without changegroup part, "
                        + "head: %s" % hex(head),
                        hint="report to administrator",
                    )
            else:
                raise error.Abort("unknown bundle type")
    else:
        parts = _rebundle(bundlerepo, bundleroots, head, cgversion, bundlecaps)

    return parts
コード例 #26
0
    def __init__(self, ui, repotype, path, revs=None):
        """
        raises common.NoRepo if the directory doesn't exist or isn't a Google repo
        """

        super(repo_source, self).__init__(ui, repotype, path, revs=revs)

        self._fullmergeenabled = self.ui.configbool(self.CONFIG_NAMESPACE,
                                                    self.CONFIG_FULL_MERGE,
                                                    default=True)
        self._difftreecacheenabled = self.ui.configbool(
            self.CONFIG_NAMESPACE,
            self.CONFIG_DIFFTREE_CACHE_ENABLED,
            default=True)
        self._dirredenabled = self.ui.configbool(self.CONFIG_NAMESPACE,
                                                 self.CONFIG_DIRRED_ENABLED,
                                                 default=True)
        self._branchincludelist = self.ui.configlist(
            self.CONFIG_NAMESPACE, self.CONFIG_BRANCH_WHITELIST, default=None)

        self.srcencoding = "utf-8"  # TODO: Read from git source projects
        self.pprinter = pprint.PrettyPrinter()
        self.repo = repo(ui, path, branchincludelist=self._branchincludelist)
        self.repocommandline = repo_commandline(ui, path)
        self.gitcommandline = common.commandline(ui, "git")

        self.pathprojectindex = self.repo._buildprojectmap()
        self.projectpathindex = {
            project: path
            for path, project in pycompat.iteritems(self.pathprojectindex)
        }
        self.commitprojectindex = self._buildcommitprojectmap()
        self.objecthashprojectindex = {}
        self.filecache = {}
        self._difftreecache = {}
        self._filemodecache = {}
コード例 #27
0
ファイル: protocol.py プロジェクト: xmonader/eden
def clientfetch(repo, paths, lastnodemap=None, peer=None):
    """download annotate cache from the server for paths"""
    if not paths:
        return

    if peer is None:
        with annotatepeer(repo) as peer:
            return clientfetch(repo, paths, lastnodemap, peer)

    if lastnodemap is None:
        lastnodemap = {}

    ui = repo.ui
    batcher = peer.iterbatch()
    ui.debug("fastannotate: requesting %d files\n" % len(paths))
    for p in paths:
        batcher.getannotate(p, lastnodemap.get(p))
    # Note: This is the only place that fastannotate sends a request via SSH.
    # The SSH stream should not be in the remotefilelog "getfiles" loop.
    batcher.submit()
    results = list(batcher.results())

    ui.debug("fastannotate: server returned\n")
    for result in results:
        for path, content in pycompat.iteritems(result):
            # ignore malicious paths
            if not path.startswith("fastannotate/") or "/../" in (path + "/"):
                ui.debug("fastannotate: ignored malicious path %s\n" % path)
                continue
            if ui.debugflag:
                ui.debug(
                    "fastannotate: writing %d bytes to %s\n" % (len(content), path)
                )
            repo.localvfs.makedirs(os.path.dirname(path))
            with repo.localvfs(path, "wb") as f:
                f.write(content)
コード例 #28
0
ファイル: __init__.py プロジェクト: quark-zju/eden
    def _willbecomenoop(memworkingcopy, ctx, pctx=None):
        """({path: content}, ctx, ctx) -> bool. test if a commit will be noop

        if it will become an empty commit (does not change anything, after the
        memworkingcopy overrides), return True. otherwise return False.
        """
        if not pctx:
            parents = ctx.parents()
            if len(parents) != 1:
                return False
            pctx = parents[0]
        # ctx changes more files (not a subset of memworkingcopy)
        if not set(ctx.files()).issubset(set(pycompat.iterkeys(memworkingcopy))):
            return False
        for path, content in pycompat.iteritems(memworkingcopy):
            if path not in pctx or path not in ctx:
                return False
            fctx = ctx[path]
            pfctx = pctx[path]
            if pfctx.flags() != fctx.flags():
                return False
            if pfctx.data() != content:
                return False
        return True
コード例 #29
0
ファイル: copytrace.py プロジェクト: simpkins/eden
def _getamendcopies(repo, dest, ancestor):
    db, error = opendbm(repo, "r")
    if db is None:
        return {}
    try:
        ctx = dest
        count = 0
        limit = repo.ui.configint("copytrace", "amendcopytracecommitlimit")

        # Search for the ancestor commit that has amend copytrace data.  This
        # will be the most recent amend commit if we are rebasing onto an
        # amend commit.  If we reach the common ancestor or a public commit,
        # then there is no amend copytrace data to be found.
        while ctx.node() not in db:
            ctx = ctx.p1()
            count += 1
            if ctx == ancestor or count > limit or ctx.phase() == phases.public:
                return {}

        # Load the amend copytrace data from this commit.
        encoded = json.loads(db[ctx.node()])
        return dict(
            (
                codecs.decode(k.encode("utf8"), "base64").decode("utf8"),
                codecs.decode(v.encode("utf8"), "base64").decode("utf8"),
            )
            for (k, v) in pycompat.iteritems(encoded)
        )
    except Exception:
        repo.ui.log("copytrace", "Failed to load amend copytrace for %s" % dest.hex())
        return {}
    finally:
        try:
            db.close()
        except error:
            pass
コード例 #30
0
ファイル: __init__.py プロジェクト: ahornby/eden
 def printchunkstats(self):
     """print things like '1 of 2 chunk(s) applied'"""
     ui = self.ui
     chunkstats = self.chunkstats
     if ui.verbose:
         # chunkstats for each file
         for path, stat in pycompat.iteritems(chunkstats):
             if stat[0]:
                 ui.write(
                     _n(
                         "%s: %d of %d chunk applied\n",
                         "%s: %d of %d chunks applied\n",
                         stat[1],
                     ) % (path, stat[0], stat[1]))
     elif not ui.quiet:
         # a summary for all files
         stats = chunkstats.values()
         applied, total = (sum(s[i] for s in stats) for i in (0, 1))
         if applied == 0:
             ui.write(_("nothing applied\n"))
         else:
             ui.write(
                 _n("%d of %d chunk applied\n", "%d of %d chunks applied\n",
                    total) % (applied, total))