예제 #1
0
파일: dirsync.py 프로젝트: ahornby/eden
def getconfigs(wctx):
    """returns {name: [path]}.
    [path] under a same name are synced. name is not useful.
    """
    # read from .hgdirsync in repo
    filename = ".hgdirsync"
    try:
        content = pycompat.decodeutf8(wctx[filename].data())
    except (error.ManifestLookupError, IOError, AttributeError, KeyError):
        content = ""
    cfg = config.config()
    if content:
        cfg.parse(filename, "[dirsync]\n%s" % content, ["dirsync"])

    maps = util.sortdict()
    repo = wctx.repo()
    for key, value in repo.ui.configitems("dirsync") + cfg.items("dirsync"):
        if "." not in key:
            continue
        name, disambig = key.split(".", 1)
        # Normalize paths to have / at the end. For easy concatenation later.
        if value[-1] != "/":
            value = value + "/"
        if name not in maps:
            maps[name] = []
        maps[name].append(value)
    return maps
예제 #2
0
def extractpointers(repo, revs):
    """return a list of lfs pointers added by given revs"""
    ui = repo.ui
    if ui.debugflag:
        ui.write(_("lfs: computing set of blobs to upload\n"))
    pointers = util.sortdict()
    for r in revs:
        ctx = repo[r]
        for p in pointersfromctx(ctx).values():
            pointers[p.oid()] = p
    return pointers.values()
예제 #3
0
파일: __init__.py 프로젝트: x414e54/eden
def debuglfsdownload(ui, repo, *pats, **opts):
    """calculate the LFS download size when updating between REV1 and REV2

    If --no-sparse is provided, this operation would ignore any sparse
    profile that might be present and report data for the full checkout.

    With -v also prints which files are to be downloaded and the size of
    each file."""
    revs = opts.get("rev")

    node1, node2 = scmutil.revpair(repo, revs)
    match = lambda s: True
    if not opts.get("sparse"):
        ui.debug("will ignore sparse profile in this repo\n")
    else:
        if not util.safehasattr(repo, "sparsematch"):
            raise error.Abort(
                _("--ignore-sparse makes no sense in a non-sparse" " repository")
            )
        match = repo.sparsematch(node2)

    with ui.configoverride({("remotefilelog", "dolfsprefetch"): False}):
        ctx1, ctx2 = repo[node1], repo[node2]
        mfdiff = ctx2.manifest().diff(ctx1.manifest())
        lfsflogs = util.sortdict()  # LFS filelogs
        for fname in mfdiff:
            if not match(fname):
                continue
            flog = repo.file(fname)
            try:
                node = ctx2.filenode(fname)
            except error.ManifestLookupError:
                continue
            if wrapper._islfs(flog, node=node):
                lfsflogs[fname] = flog

        totalsize = 0
        presentsize = 0
        for fname, flog in lfsflogs.items():
            rawtext = flog.revision(ctx2.filenode(fname), raw=True)
            p = pointer.deserialize(rawtext)
            present = repo.svfs.lfslocalblobstore.has(p.oid())
            lfssize = int(p["size"])
            ui.note(_("%s: %i (present=%r)\n") % (fname, lfssize, present))
            totalsize += lfssize
            presentsize += lfssize if present else 0
        ui.status(
            _("Total size: %i, to download: %i, already exists: %r\n")
            % (totalsize, totalsize - presentsize, presentsize)
        )
예제 #4
0
def pointersfromctx(ctx):
    """return a dict {path: pointer} for given single changectx"""
    result = util.sortdict()
    for f in ctx.files():
        if f not in ctx:
            continue
        fctx = ctx[f]
        if not _islfs(fctx.filelog(), fctx.filenode()):
            continue
        try:
            result[f] = pointer.deserialize(fctx.rawdata())
        except pointer.InvalidPointer as ex:
            raise error.Abort(
                _("lfs: corrupted pointer (%s@%s): %s\n") %
                (f, short(ctx.node()), ex))
    return result
예제 #5
0
파일: dirsync.py 프로젝트: zerkella/eden
def getconfigs(repo):
    # read from wvfs/.hgdirsync
    filename = ".hgdirsync"
    content = repo.wvfs.tryreadutf8(filename)
    cfg = config.config()
    if content:
        cfg.parse(filename, "[dirsync]\n%s" % content, ["dirsync"])

    maps = util.sortdict()
    for key, value in repo.ui.configitems("dirsync") + cfg.items("dirsync"):
        if "." not in key:
            continue
        name, disambig = key.split(".", 1)
        # Normalize paths to have / at the end. For easy concatenation later.
        if value[-1] != "/":
            value = value + "/"
        if name not in maps:
            maps[name] = []
        maps[name].append(value)
    return maps
예제 #6
0
def localrepolistkeys(orig, self, namespace, patterns=None):
    """Wrapper of localrepo.listkeys()"""

    if namespace == "bookmarks" and patterns:
        index = self.bundlestore.index
        # Using sortdict instead of a dictionary to ensure that bookmaks are
        # restored in the same order after a pullbackup. See T24417531
        results = util.sortdict()
        bookmarks = orig(self, namespace)
        for pattern in patterns:
            results.update(index.getbookmarks(pattern))
            if pattern.endswith("*"):
                pattern = "re:^" + pattern[:-1] + ".*"
            kind, pat, matcher = util.stringmatcher(pattern)
            for bookmark, node in pycompat.iteritems(bookmarks):
                if matcher(bookmark):
                    results[bookmark] = node
        return results
    else:
        return orig(self, namespace)
예제 #7
0
 def getbookmarks(self, query):
     """Get all bookmarks that match the pattern."""
     if not self._connected:
         self.sqlconnect()
     self.log.info("QUERY BOOKMARKS reponame: %r query: %r" %
                   (self.reponame, query))
     query = _convertbookmarkpattern(query)
     self.sqlcursor.execute(
         "SELECT bookmark, node from bookmarkstonode WHERE "
         "reponame = %s AND bookmark LIKE %s "
         # Bookmarks have to be restored in the same order of creation
         # See T24417531
         "ORDER BY time ASC",
         params=(self.reponame, query),
     )
     result = self.sqlcursor.fetchall()
     bookmarks = util.sortdict()
     for row in result:
         if len(row) != 2:
             self.log.info("Bad row returned: %s" % row)
             continue
         bookmarks[row[0]] = row[1]
     return bookmarks
예제 #8
0
def cloudlistbackups(ui, repo, dest=None, **opts):
    """list backups that are available on the server"""

    remotepath = ccutil.getremotepath(repo, dest)
    getconnection = lambda: repo.connectionpool.get(remotepath, opts)

    sourceusername = opts.get("user")
    if not sourceusername:
        sourceusername = util.shortuser(repo.ui.username())
    backupinfo = backupbookmarks.downloadbackupbookmarks(
        repo, remotepath, getconnection, sourceusername)

    if opts.get("json"):
        jsondict = util.sortdict()
        for hostname, reporoot in backupinfo.keys():
            jsondict.setdefault(hostname, []).append(reporoot)
        ui.write("%s\n" % json.dumps(jsondict, indent=4))
    elif not backupinfo:
        ui.write(_("no backups available for %s\n") % sourceusername)
    else:
        backupbookmarks.printbackupbookmarks(ui,
                                             sourceusername,
                                             backupinfo,
                                             all=bool(opts.get("all")))
예제 #9
0
def downloadbackupbookmarks(
    repo,
    remotepath,
    getconnection,
    sourceusername,
    sourcehostname=None,
    sourcereporoot=None,
):
    """download backup bookmarks from the server

    Returns an ordered dict mapping:
      (hostname, reporoot) => {"heads": [NODE, ...], "bookmarks": {NAME: NODE, ...}}

    Sqlindex returns backups in order of insertion.  Hostnames and reporoot in
    the dict should be in most-recently-used order, so the fresher backups come
    first. Within the backups, the order of insertion is preserved.

    Fileindex returns backups in lexicographic order, since the fileindex
    doesn't support maintaining the order of insertion.
    """

    pattern = "infinitepush/backups/%s" % sourceusername
    if sourcehostname:
        pattern += "/%s" % sourcehostname
        if sourcereporoot:
            pattern += sourcereporoot
    pattern += "/*"

    with getconnection() as conn:
        if "listkeyspatterns" not in conn.peer.capabilities():
            raise error.Abort(
                "'listkeyspatterns' command is not supported for the server %s"
                % conn.peer.url())
        bookmarks = conn.peer.listkeyspatterns("bookmarks", patterns=[pattern])

    backupinfo = util.sortdict()
    for name, hexnode in pycompat.iteritems(bookmarks):

        match = _backupbookmarkre.match(name)
        if match:
            username, hostname, reporoot, type, name = match.groups()

            if sourcereporoot and sourcereporoot != reporoot:
                continue
            if sourcehostname and sourcehostname != hostname:
                continue
            entry = backupinfo.setdefault((hostname, reporoot), {})
            if type == "heads":
                entry.setdefault("heads", []).append(hexnode)
            elif type == "bookmarks":
                entry.setdefault("bookmarks",
                                 {})[_unescapebookmark(name)] = hexnode
        else:
            repo.ui.warn(
                _("backup bookmark format unrecognised: '%s' -> %s") %
                (name, hexnode))

    # Reverse to make MRU order
    backupinfomru = util.sortdict()
    for key, value in reversed(backupinfo.items()):
        backupinfomru[key] = value

    return backupinfomru
예제 #10
0
def _http_bookmark_fetch(repo, names):
    (bookmarks, _stats) = repo.edenapi.bookmarks(repo.name, names)
    return util.sortdict(
        ((bm, n) for (bm, n) in bookmarks.items() if n is not None))