示例#1
0
def progresstest(ui, loops, total, **opts):
    loops = int(loops)
    total = int(total)
    if total == -1:
        total = None
    nested = opts.get("nested", None)
    useunicode = opts.get("unicode", False)
    if useunicode:
        topic = pycompat.ensurestr("\u3042\u3044\u3046\u3048")
    else:
        topic = "progress test"
    with progress.bar(ui, topic, "cycles", total) as prog:
        for i in range(loops + 1):
            if useunicode:
                prog.value = (i, unicodeloopitems[i % len(unicodeloopitems)])
            else:
                prog.value = (i, "loop %s" % i)
            syncrender()
            if nested:
                nestedtotal = 5 if i % 6 == 5 else 2
                with progress.bar(ui, "nested progress",
                                  total=nestedtotal) as nestedprog:
                    for j in range(nestedtotal + 1):
                        nestedprog.value = (j, "nest %s" % j)
                        syncrender()
示例#2
0
def progresstest(ui, loops, total, **opts):
    loops = int(loops)
    total = int(total)
    if total == -1:
        total = None
    nested = opts.get("nested", None)
    useunicode = opts.get("unicode", False)
    if useunicode:
        topic = u"\u3042\u3044\u3046\u3048".encode("utf-8")
    else:
        topic = "progress test"
    with progress.bar(ui, topic, "cycles", total) as prog:
        for i in range(loops + 1):
            if useunicode:
                prog.value = (i, unicodeloopitems[i % len(unicodeloopitems)])
            else:
                prog.value = (i, "loop %s" % i)
            progress._engine.pump(_faketime.increment())
            if nested:
                nestedtotal = 5 if i % 6 == 5 else 2
                with progress.bar(ui, "nested progress",
                                  total=nestedtotal) as nestedprog:
                    for j in range(nestedtotal + 1):
                        nestedprog.value = (j, "nest %s" % j)
                        progress._engine.pump(_faketime.increment())
示例#3
0
def receivepack(ui, fh, dpack, hpack, version=1):
    # type: (UI, IO[bytes], mutabledatastore, mutablehistorystore, int) -> Tuple[List[Tuple[bytes, bytes]], List[Tuple[bytes, bytes]]]
    receiveddata = []
    receivedhistory = []

    size = 0
    with progress.bar(ui, _("receiving pack")) as prog:
        while True:
            filename = readpath(fh)
            count = 0

            # Store the history for later sorting
            for value in readhistory(fh):
                node, p1, p2, linknode, copyfrom = value
                hpack.add(filename, node, p1, p2, linknode, copyfrom)
                receivedhistory.append((filename, node))
                count += 1
                size += len(filename) + len(node) + sum(
                    len(x or "") for x in value)

            for node, deltabase, delta, metadata in readdeltas(
                    fh, version=version):
                dpack.add(filename, node, deltabase, delta, metadata=metadata)
                receiveddata.append((filename, node))
                count += 1
                size += len(filename) + len(node) + len(deltabase) + len(delta)

            if count == 0 and filename == "":
                break
            prog.value += 1
    perftrace.tracebytes("Received Pack Size", size)

    return receiveddata, receivedhistory
示例#4
0
 def downloadtest(description, bytecount):
     pipeo.write(b"download %i\n" % bytecount)
     pipeo.flush()
     l = pipei.readline()
     if not l or not l.startswith(b"download bytes"):
         raise error.Abort("invalid response from server: %r" % l)
     bytecount = int(l.split()[2])
     with progress.bar(ui,
                       description,
                       total=bytecount,
                       formatfunc=util.bytecount) as prog:
         starttime = util.timer()
         remaining = bytecount
         while remaining > 0:
             data = pipei.read(min(remaining, BLOCK_SIZE))
             if not data:
                 raise error.Abort(
                     "premature end of speed-test download stream")
             remaining -= len(data)
             prog.value = bytecount - remaining
         l = pipei.readline()
         if not l or not l.startswith(b"download complete"):
             raise error.Abort("invalid response from server: %r" % l)
         endtime = util.timer()
     return endtime - starttime
示例#5
0
    def _marklookupsclean(self):
        dirstate = self.dirstate
        normal = dirstate.normal
        newdmap = dirstate._map
        cleanlookups = self.cleanlookups
        self.cleanlookups = []

        repo = dirstate._repo
        p1 = dirstate.p1()
        wctx = repo[None]
        pctx = repo[p1]

        with progress.bar(self.ui, _("marking clean"), _("files"),
                          len(cleanlookups)) as prog:
            count = 0
            for f in cleanlookups:
                count += 1
                prog.value = (count, f)
                # Only make something clean if it's already in a
                # normal state. Things in other states, like 'm'
                # merge state, should not be marked clean.
                entry = newdmap[f]
                if entry[0] == "n" and f not in newdmap.copymap and entry[
                        2] != -2:
                    # It may have been a while since we added the
                    # file to cleanlookups, so double check that
                    # it's still clean.
                    if self._compareondisk(f, wctx, pctx) is False:
                        normal(f)
示例#6
0
        def _prefetch(self, revs, base=None, matcher=None):
            mfl = self.manifestlog

            # Copy the skip set to start large and avoid constant resizing,
            # and since it's likely to be very similar to the prefetch set.
            files = set()
            basemf = self[base or nullid].manifest()
            with progress.bar(self.ui, _("prefetching"),
                              total=len(revs)) as prog:
                for rev in sorted(revs):
                    ctx = self[rev]
                    if matcher is None:
                        matcher = self.maybesparsematch(rev)

                    mfctx = ctx.manifestctx()
                    mf = mfctx.read()

                    for path, (new, old) in mf.diff(basemf, matcher).items():
                        if new[0]:
                            files.add((path, new[0]))
                    prog.value += 1

            if files:
                results = [(path, hex(fnode)) for (path, fnode) in files]
                self.fileservice.prefetch(results)
示例#7
0
    def walktree(self, heads):
        """Return a mapping that identifies the uncommitted parents of every
        uncommitted changeset."""
        visit = heads
        known = set()
        parents = {}
        numcommits = self.source.numcommits()
        with progress.bar(self.ui, _("scanning"), _("revisions"),
                          numcommits) as prog:
            while visit:
                n = visit.pop(0)
                if n in known:
                    continue
                if n in self.map:
                    m = self.map[n]
                    if m == SKIPREV or self.dest.hascommitfrommap(m):
                        continue
                known.add(n)
                prog.value = len(known)
                commit = self.cachecommit(n)
                parents[n] = []
                for p in commit.parents:
                    parents[n].append(p)
                    visit.append(p)

        return parents
示例#8
0
def debuggettrees(ui, repo, **opts):
    edenapi.bailifdisabled(ui)

    keys = []
    for line in sys.stdin.readlines():
        parts = line.split()
        (node, path) = parts if len(parts) > 1 else (parts[0], "")
        keys.append((path, node))

    dpack, __ = repo.manifestlog.getmutablesharedpacks()

    msg = _("fetching %d trees") % len(keys)
    with progress.bar(
        ui, msg, start=0, unit=_("bytes"), formatfunc=util.bytecount
    ) as prog:

        def progcb(dl, dlt, ul, ult):
            if dl > 0:
                prog._total = dlt
                prog.value = dl

        stats = repo.edenapi.get_trees(keys, dpack, progcb)

    ui.write(stats.to_str() + "\n")

    packpath, __ = repo.manifestlog._mutablesharedpacks.commit()
    ui.write(_("wrote datapack: %s\n") % packpath)
示例#9
0
def debuggethistory(ui, repo, **opts):
    edenapi.bailifdisabled(ui)

    input = (line.split() for line in sys.stdin.readlines())
    keys = [(path, node) for node, path in input]
    depth = opts.get("depth") or None

    __, hpack = repo.fileslog.getmutablesharedpacks()

    msg = _("fetching history for %d files") % len(keys)
    with progress.bar(
        ui, msg, start=0, unit=_("bytes"), formatfunc=util.bytecount
    ) as prog:

        def progcb(dl, dlt, ul, ult):
            if dl > 0:
                prog._total = dlt
                prog.value = dl

        stats = repo.edenapi.get_history(keys, hpack, depth, progcb)

    ui.write(stats.to_str() + "\n")

    __, packpath = repo.fileslog._mutablesharedpacks.commit()
    ui.write(_("wrote historypack: %s\n") % packpath)
示例#10
0
def updateglobalrevmeta(ui, repo, *args, **opts):
    """Reads globalrevs from the latest hg commits and adds them to the
    globalrev-hg mapping."""
    with repo.wlock(), repo.lock():
        unfi = repo
        clnode = unfi.changelog.node
        clrevision = unfi.changelog.changelogrevision
        globalrevmap = _globalrevmap(unfi)

        lastrev = globalrevmap.lastrev
        repolen = len(unfi)
        with progress.bar(ui, _("indexing"), _("revs"), repolen - lastrev) as prog:

            def addtoglobalrevmap(grev, node):
                if grev:
                    globalrevmap.add(int(grev), node)

            for rev in range(lastrev, repolen):  # noqa: F821
                hgnode = clnode(rev)
                commitdata = clrevision(rev)
                extra = commitdata.extra

                svnrev = _getsvnrev(extra)
                addtoglobalrevmap(svnrev, hgnode)

                globalrev = _getglobalrev(ui, extra)
                if globalrev != svnrev:
                    addtoglobalrevmap(globalrev, hgnode)

                prog.value += 1

        globalrevmap.lastrev = repolen
        globalrevmap.save()
示例#11
0
def progresstest(ui, total, **opts):
    total = int(total)

    waitforfile(opts.get("waitfile"))

    with progress.bar(ui, "eating", "apples", total) as bar:
        for i in range(1, total + 1):
            bar.value = i
            waitforfile(opts.get("waitfile"))
示例#12
0
def debugverifylinkrevcache(ui, repo, *pats, **opts):
    """read the linkrevs from the database and verify if they are correct"""
    # restore to the original _adjustlinkrev implementation
    c = context.basefilectx
    extensions.unwrapfunction(c, "_adjustlinkrev", _adjustlinkrev)

    paths = {}  # {id: name}
    nodes = {}  # {id: name}

    repo = repo.unfiltered()
    idx = repo.unfiltered().changelog.index

    db = repo._linkrevcache
    paths = dict(db._getdb(db._pathdbname))
    nodes = dict(db._getdb(db._nodedbname))
    pathsrev = dict(
        (v, pycompat.decodeutf8(k)) for k, v in pycompat.iteritems(paths))
    nodesrev = dict((v, k) for k, v in pycompat.iteritems(nodes))
    lrevs = dict(db._getdb(db._linkrevdbname))

    readfilelog = ui.configbool("linkrevcache", "readfilelog", True)

    total = len(lrevs)
    with progress.bar(ui, _("verifying"), total=total) as prog:
        for i, (k, v) in enumerate(pycompat.iteritems(lrevs)):
            prog.value = i
            pathid, nodeid = k.split(b"\0")
            path = pathsrev[pathid]
            fnode = nodesrev[nodeid]
            linkrevs = _str2intlist(pycompat.decodeutf8(v))
            linkrevs.sort()

            for linkrev in linkrevs:
                fctx = repo[linkrev][path]
                introrev = fctx.introrev()
                fctx.linkrev()
                if readfilelog:
                    flinkrev = fctx.linkrev()
                else:
                    flinkrev = None
                if introrev == linkrev:
                    continue
                if introrev in idx.commonancestorsheads(
                        introrev, linkrev) and (introrev in linkrevs
                                                or introrev == flinkrev):
                    adjective = _("unnecessary")
                else:
                    adjective = _("incorrect")
                ui.warn(
                    _("%s linkrev %s for %s @ %s (expected: %s)\n") %
                    (adjective, linkrev, path, node.hex(fnode), introrev))

    ui.write(_("%d entries verified\n") % total)
示例#13
0
def countrate(ui, repo, amap, *pats, **opts):
    """Calculate stats"""
    opts = pycompat.byteskwargs(opts)
    if opts.get("dateformat"):

        def getkey(ctx):
            t, tz = ctx.date()
            date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
            return date.strftime(opts["dateformat"])

    else:
        tmpl = opts.get("oldtemplate") or opts.get("template")
        tmpl = cmdutil.makelogtemplater(ui, repo, tmpl)

        def getkey(ctx):
            ui.pushbuffer()
            tmpl.show(ctx)
            return ui.popbuffer()

    rate = {}
    df = False
    if opts.get("date"):
        df = util.matchdate(opts["date"])

    prog = progress.bar(ui, _("analyzing"), _("revisions"), len(repo))
    m = scmutil.match(repo[None], pats, opts)

    def prep(ctx, fns):
        rev = ctx.rev()
        if df and not df(ctx.date()[0]):  # doesn't match date format
            return

        key = getkey(ctx).strip()
        key = amap.get(key, key)  # alias remap
        if opts.get("changesets"):
            rate[key] = (rate.get(key, (0, ))[0] + 1, 0)
        else:
            parents = ctx.parents()
            if len(parents) > 1:
                ui.note(_("revision %d is a merge, ignoring...\n") % (rev, ))
                return

            ctx1 = parents[0]
            lines = changedlines(ui, repo, ctx1, ctx, fns)
            rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]

        prog.value += 1

    with prog:
        for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
            continue

    return rate
示例#14
0
def debugbuildannotatecache(ui, repo, *pats, **opts):
    """incrementally build fastannotate cache up to REV for specified files

    If REV is not specified, use the config 'fastannotate.mainbranch'.

    If fastannotate.client is True, download the annotate cache from the
    server. Otherwise, build the annotate cache locally.

    The annotate cache will be built using the default diff and follow
    options and lives in '.hg/fastannotate/default'.
    """
    rev = opts.get("REV") or ui.config("fastannotate", "mainbranch")
    if not rev:
        raise error.Abort(
            _("you need to provide a revision"),
            hint=_("set fastannotate.mainbranch or use --rev"),
        )
    if ui.configbool("fastannotate", "unfilteredrepo", True):
        repo = repo.unfiltered()
    ctx = scmutil.revsingle(repo, rev)
    m = scmutil.match(ctx, pats, opts)
    paths = list(ctx.walk(m))
    if util.safehasattr(repo, "prefetchfastannotate"):
        # client
        if opts.get("REV"):
            raise error.Abort(_("--rev cannot be used for client"))
        repo.prefetchfastannotate(paths)
    else:
        # server, or full repo
        with progress.bar(ui, _("building"), total=len(paths)) as prog:
            for i, path in enumerate(paths):
                prog.value = i
                with facontext.annotatecontext(repo, path) as actx:
                    try:
                        if actx.isuptodate(rev):
                            continue
                        actx.annotate(rev, rev)
                    except (faerror.CannotReuseError, faerror.CorruptedFileError):
                        # the cache is broken (could happen with renaming so the
                        # file history gets invalidated). rebuild and try again.
                        ui.debug("fastannotate: %s: rebuilding broken cache\n" % path)
                        actx.rebuild()
                        try:
                            actx.annotate(rev, rev)
                        except Exception as ex:
                            # possibly a bug, but should not stop us from
                            # building cache for other files.
                            ui.warn(
                                _("fastannotate: %s: failed to " "build cache: %r\n")
                                % (path, ex)
                            )
示例#15
0
    def _processlookups(self, lookups):
        repo = self.dirstate._repo
        p1 = self.dirstate.p1()

        if util.safehasattr(repo, "fileservice"):
            p1mf = repo[p1].manifest()
            lookupmatcher = matchmod.exact(repo.root, repo.root, lookups)
            # We fetch history because we know _compareondisk() uses
            # filelog.cmp() which computes the sha(p1, p2, text), which requires
            # the history of the file. Later we'll move to comparing content
            # hashes, and we can prefetch those hashes instead.
            # Note, this may be slow for files with long histories.
            repo.fileservice.prefetch(
                list((f, hex(p1mf[f])) for f in p1mf.matches(lookupmatcher)),
                fetchdata=False,
                fetchhistory=True,
            )

        wctx = repo[None]
        pctx = repo[p1]

        with progress.bar(self.ui, _("checking changes"), _("files"),
                          len(lookups)) as prog:
            # Sort so we get deterministic ordering. This is important for tests.
            count = 0
            for fn in sorted(lookups):
                count += 1
                prog.value = (count, fn)
                changed = self._compareondisk(fn, wctx, pctx)
                if changed is None:
                    # File no longer exists
                    if self.dtolog > 0:
                        self.dtolog -= 1
                        self.ui.log("status",
                                    "R %s: checked in filesystem" % fn)
                    yield (fn, False)
                elif changed is True:
                    # File exists and is modified
                    if self.mtolog > 0:
                        self.mtolog -= 1
                        self.ui.log("status",
                                    "M %s: checked in filesystem" % fn)
                    yield (fn, True)
                else:
                    # File exists and is clean
                    if self.ftolog > 0:
                        self.ftolog -= 1
                        self.ui.log("status",
                                    "C %s: checked in filesystem" % fn)
                    self.cleanlookups.append(fn)
示例#16
0
def quickchecklog(ui, log, name, knownbroken):
    """
    knownbroken: a set of known broken *changelog* revisions

    returns (rev, linkrev) of the first bad entry
    returns (None, None) if nothing is bad
    """
    lookback = 10
    rev = max(0, len(log) - lookback)
    numchecked = 0
    seengood = False
    with progress.bar(ui, _("checking %s") % name) as prog:
        while rev < len(log):
            numchecked += 1
            prog.value = (numchecked, rev)
            (startflags, clen, ulen, baserev, linkrev, p1, p2,
             node) = log.index[rev]
            if linkrev in knownbroken:
                ui.write(
                    _("%s: marked corrupted at rev %d (linkrev=%d)\n") %
                    (name, rev, linkrev))
                return rev, linkrev
            try:
                log.revision(rev, raw=True)
                if rev != 0:
                    if (startflags == 0 or linkrev == 0
                            or (p1 == 0 and p2 == 0) or clen == 0 or ulen == 0
                            or node == nullid):
                        # In theory no 100% correct. But those fields being 0 is
                        # almost always a corruption practically.
                        raise ValueError("suspected bad revision data")
                seengood = True
                rev += 1
            except Exception:  #  RevlogError, mpatchError, ValueError, etc
                if rev == 0:
                    msg = _("all %s entries appear corrupt!") % (name, )
                    raise error.RevlogError(msg)
                if not seengood:
                    # If the earliest rev we looked at is bad, look back farther
                    lookback *= 2
                    rev = max(0, len(log) - lookback)
                    continue
                ui.write(
                    _("%s: corrupted at rev %d (linkrev=%d)\n") %
                    (name, rev, linkrev))
                return rev, linkrev
    ui.write(_("%s looks okay\n") % name)
    return None, None
示例#17
0
文件: wirepack.py 项目: simpkins/eden
def receivepack(
    ui: "UI",
    fh: "IO[bytes]",
    dpack: "mutabledatastore",
    hpack: "mutablehistorystore",
    version: int = 1,
) -> "Tuple[List[Tuple[bytes, bytes]], List[Tuple[bytes, bytes]]]":
    receiveddata = []
    receivedhistory = []

    size = 0
    start = time.time()
    with progress.bar(ui, _("receiving pack")) as prog:
        while True:
            filename = readpath(fh)
            count = 0

            # Store the history for later sorting
            for value in readhistory(fh):
                node, p1, p2, linknode, copyfrom = value
                hpack.add(filename, node, p1, p2, linknode, copyfrom)
                receivedhistory.append((filename, node))
                count += 1
                size += len(filename) + len(node) + sum(
                    len(x or "") for x in value)

            for node, deltabase, delta, metadata in readdeltas(
                    fh, version=version):
                dpack.add(filename, node, deltabase, delta, metadata=metadata)
                receiveddata.append((filename, node))
                count += 1
                size += len(filename) + len(node) + len(deltabase) + len(delta)

            if count == 0 and filename == "":
                break
            prog.value += 1
    perftrace.tracebytes("Received Pack Size", size)
    duration = time.time() - start
    megabytes = float(size) / 1024 / 1024
    if ui.configbool("remotefilelog", "debug-fetches") and (
            duration > 1 or len(receiveddata) > 100 or megabytes > 1):
        ui.warn(
            _("Receive pack: %s entries, %.2f MB, %.2f seconds (%0.2f MBps)\n")
            % (len(receiveddata), megabytes, duration, megabytes / duration))

    return receiveddata, receivedhistory
示例#18
0
def _pullheadgroups(repo, remotepath, headgroups):
    backuplock.progresspulling(
        repo, [nodemod.bin(node) for newheads in headgroups for node in newheads]
    )
    with progress.bar(
        repo.ui, _("pulling from commit cloud"), total=len(headgroups)
    ) as prog:
        for index, headgroup in enumerate(headgroups):
            headgroupstr = " ".join([head[:12] for head in headgroup])
            url = repo.ui.paths.getpath(remotepath).url
            repo.ui.status(_("pulling %s from %s\n") % (headgroupstr, url))
            prog.value = (index, headgroupstr)
            repo.pull(
                remotepath,
                headnodes=[nodemod.bin(hexnode) for hexnode in headgroup],
                quiet=False,
            )
            repo.connectionpool.close()
示例#19
0
    def convert(self, sortmode):
        try:
            self.source.before()
            self.dest.before()
            self.source.setrevmap(self.map)
            self.ui.status(_("scanning source...\n"))
            heads = self.source.getheads()
            parents = self.walktree(heads)
            self.mergesplicemap(parents, self.splicemap)
            self.ui.status(_("sorting...\n"))
            t = self.toposort(parents, sortmode)
            num = len(t)
            c = None

            self.ui.status(_("converting...\n"))
            with progress.bar(self.ui, _("converting"), _("revisions"),
                              len(t)) as prog:
                for i, c in enumerate(t):
                    num -= 1
                    desc = self.commitcache[c].desc
                    if "\n" in desc:
                        desc = desc.splitlines()[0]
                    # convert log message to local encoding without using
                    # tolocal() because the encoding.encoding convert()
                    # uses is 'utf-8'
                    self.ui.status("%d %s\n" % (num, recode(desc)))
                    self.ui.note(_("source: %s\n") % recode(c))
                    prog.value = i
                    self.copy(c)

            bookmarks = self.source.getbookmarks()
            cbookmarks = {}
            for k in bookmarks:
                v = bookmarks[k]
                if self.map.get(v, SKIPREV) != SKIPREV:
                    cbookmarks[k] = self.map[v]

            if c and cbookmarks:
                self.dest.putbookmarks(cbookmarks)

            self.writeauthormap()
        finally:
            self.cleanup()
示例#20
0
    def _httpfetchhistory(self, fileids, hpack, depth=None):
        """Fetch file history over HTTPS using the Eden API"""
        n = len(fileids)
        msg = (_n(
            "fetching history for %d file over HTTPS",
            "fetching history for %d files over HTTPS",
            n,
        ) % n)

        if self.ui.interactive() and edenapi.debug(self.ui):
            self.ui.warn(("%s\n") % msg)

        self.ui.metrics.gauge("http_gethistory_revs", n)
        self.ui.metrics.gauge("http_gethistory_calls", 1)

        with progress.bar(self.ui,
                          msg,
                          start=0,
                          unit=_("bytes"),
                          formatfunc=util.bytecount) as prog:

            def progcallback(dl, dlt, ul, ult):
                if dl > 0:
                    prog._total = dlt
                    prog.value = dl

            stats = self.repo.edenapi.get_history(fileids, hpack, depth,
                                                  progcallback)

        if self.ui.interactive() and edenapi.debug(self.ui):
            self.ui.warn(_("%s\n") % stats.to_str())

        self.ui.metrics.gauge("http_gethistory_time_ms",
                              stats.time_in_millis())
        self.ui.metrics.gauge("http_gethistory_latency_ms",
                              stats.latency_in_millis())
        self.ui.metrics.gauge("http_gethistory_bytes_downloaded",
                              stats.downloaded())
        self.ui.metrics.gauge("http_gethistory_bytes_uploaded",
                              stats.uploaded())
        self.ui.metrics.gauge("http_gethistory_requests", stats.requests())
示例#21
0
    def _linkrev(self):
        if self._filenode == nullid:
            return nullrev

        p1, p2, linknode, copyfrom = self.getnodeinfo()
        rev = self._repo.changelog.nodemap.get(linknode)
        if rev is not None:
            return rev

        # Search all commits for the appropriate linkrev (slow, but uncommon)
        repo = self._repo
        path = self._path
        fileid = self._filenode
        cl = repo.changelog
        mfl = repo.manifestlog

        with repo.ui.timesection("scanlinkrev"), repo.ui.configoverride({
            ("treemanifest", "fetchdepth"):
                1
        }), perftrace.trace("Scanning for Linkrev"), progress.bar(
                repo.ui,
                _("scanning for linkrev of %s") % path) as prog:
            perftrace.tracevalue("Path", path)
            allrevs = repo.revs("_all()")
            allrevs.sort(reverse=True)
            for i, rev in enumerate(allrevs):
                prog.value = i
                node = cl.node(rev)
                data = cl.read(
                    node)  # get changeset data (we avoid object creation)
                if path in data[3]:  # checking the 'files' field.
                    # The file has been touched, check if the hash is what we're
                    # looking for.
                    if fileid == mfl[data[0]].read().get(path):
                        perftrace.tracevalue("Distance", i)
                        return rev

        # Couldn't find the linkrev. This should generally not happen, and will
        # likely cause a crash.
        return None
示例#22
0
def bytesprogresstest(ui):
    values = [
        0,
        10,
        250,
        999,
        1000,
        1024,
        22000,
        1048576,
        1474560,
        123456789,
        555555555,
        1000000000,
        1111111111,
    ]
    with progress.bar(ui,
                      "bytes progress test",
                      "bytes",
                      max(values),
                      formatfunc=util.bytecount) as prog:
        for value in values:
            prog.value = (value, "%s bytes" % value)
            progress._engine.pump(_faketime.increment())
示例#23
0
def bytesprogresstest(ui):
    values = [
        0,
        10,
        250,
        999,
        1000,
        1024,
        22000,
        1048576,
        1474560,
        123456789,
        555555555,
        1000000000,
        1111111111,
    ]
    with progress.bar(ui,
                      "bytes progress test",
                      "bytes",
                      max(values),
                      formatfunc=util.bytecount) as prog:
        for value in values:
            prog.value = (value, "%s bytes" % value)
            syncrender()
示例#24
0
    def _batch(self, pointers, localstore, action, objectnames=None):
        if action not in ["upload", "download"]:
            raise error.ProgrammingError("invalid Git-LFS action: %s" % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get("size", 0) for x in objects)
        perftrace.tracebytes("Size", total)
        sizes = {}
        for obj in objects:
            sizes[obj.get("oid")] = obj.get("size", 0)
        topic = {
            "upload": _("lfs uploading"),
            "download": _("lfs downloading")
        }[action]
        if self.ui.verbose and len(objects) > 1:
            self.ui.write(
                _("lfs: need to transfer %d objects (%s)\n") %
                (len(objects), util.bytecount(total)))

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get("size", 0)
                if self.ui.verbose:
                    if action == "download":
                        msg = _("lfs: downloading %s (%s)\n")
                    elif action == "upload":
                        msg = _("lfs: uploading %s (%s)\n")
                    self.ui.write(msg %
                                  (obj.get("oid"), util.bytecount(objsize)))
                retry = self.retry
                while True:
                    try:
                        yield 0, obj.get("oid")
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get("oid")
                        break
                    except Exception as ex:
                        if retry > 0:
                            if self.ui.verbose:
                                self.ui.write(
                                    _("lfs: failed: %r (remaining retry %d)\n")
                                    % (ex, retry))
                            retry -= 1
                            continue
                        raise

        starttimestamp = util.timer()
        if action == "download":
            oids = worker.worker(
                self.ui,
                0.1,
                transfer,
                (),
                sorted(objects, key=lambda o: o.get("oid")),
                preferthreads=True,
                callsite="blobstore",
            )
        else:
            oids = transfer(objects)

        transferred = 0
        with progress.bar(self.ui,
                          topic,
                          _("bytes"),
                          total=total,
                          formatfunc=util.bytecount) as prog:
            for count, oid in oids:
                if count != 0:
                    transferred += sizes[oid]
                    if self.ui.verbose:
                        self.ui.write(_("lfs: processed: %s\n") % oid)
                if objectnames is not None:
                    prog.value = (transferred, objectnames.get(oid, ""))
                else:
                    prog.value = transferred

        currenttimestamp = util.timer()
        self._metrics["lfs_%s_size" % action] += total
        self._metrics["lfs_%s_time" % action] += (currenttimestamp - max(
            self._timestamp["latest_%s_timestamp" % action],
            starttimestamp)) * 1000
        self._timestamp["latest_%s_timestamp" % action] = currenttimestamp
示例#25
0
    def requestpacks(self, fileids, fetchdata, fetchhistory):
        self.remotecache.reconnect()

        perftrace.traceflag("packs")
        cache = self.remotecache
        fileslog = self.repo.fileslog

        total = len(fileids)
        totalfetches = 0
        if fetchdata:
            totalfetches += total
        if fetchhistory:
            totalfetches += total
        with progress.bar(self.ui,
                          _("fetching from memcache"),
                          total=totalfetches) as prog:
            # generate `get` keys and make data request
            getkeys = [file + "\0" + node for file, node in fileids]
            if fetchdata:
                cache.getdatapack(getkeys)
            if fetchhistory:
                cache.gethistorypack(getkeys)

            # receive both data and history
            misses = []
            try:
                allmisses = set()
                if fetchdata:
                    allmisses.update(cache.receive(prog))
                    fileslog.contentstore.markforrefresh()
                if fetchhistory:
                    allmisses.update(cache.receive(prog))
                    fileslog.metadatastore.markforrefresh()

                misses = [key.split("\0") for key in allmisses]
                perftrace.tracevalue("Memcache Misses", len(misses))
            except CacheConnectionError:
                misses = fileids
                self.ui.warn(
                    _("warning: cache connection closed early - " +
                      "falling back to server\n"))

            global fetchmisses
            missedfiles = len(misses)
            fetchmisses += missedfiles

            fromcache = total - missedfiles
            self.ui.log(
                "remotefilelog",
                "remote cache hit rate is %r of %r\n",
                fromcache,
                total,
                hit=fromcache,
                total=total,
            )

        oldumask = os.umask(0o002)
        try:
            # receive cache misses from master
            if missedfiles > 0:
                self._fetchpackfiles(misses, fetchdata, fetchhistory)
        finally:
            os.umask(oldumask)
示例#26
0
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, expectedfiles, *args)

    newfiles = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    with progress.bar(repo.ui, _("files"), total=expectedfiles) as prog:
        while True:
            chunkdata = source.filelogheader()
            if not chunkdata:
                break
            f = chunkdata["filename"]
            repo.ui.debug("adding %s revisions\n" % f)
            prog.value += 1

            if not repo.shallowmatch(f):
                fl = repo.file(f)
                deltas = source.deltaiter()
                fl.addgroup(deltas, revmap, trp)
                continue

            chain = None
            while True:
                # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
                revisiondata = source.deltachunk(chain)
                if not revisiondata:
                    break

                chain = revisiondata[0]

                revisiondatas[(f, chain)] = revisiondata
                queue.append((f, chain))

                if f not in visited:
                    newfiles += 1
                    visited.add(f)

            if chain is None:
                raise error.Abort(_("received file revlog group is empty"))

    processed = set()

    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Prefetch the non-bundled revisions that we will need
    prefetchfiles = []
    for f, node in queue:
        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]

        for dependent in dependents:
            if dependent == nullid or (f, dependent) in revisiondatas:
                continue
            prefetchfiles.append((f, hex(dependent)))

    repo.fileservice.prefetch(prefetchfiles)

    # Get rawtext by applying delta chains.
    @util.lrucachefunc
    def reconstruct(f, node):
        revisiondata = revisiondatas.get((f, node), None)
        if revisiondata is None:
            # Read from repo.
            return repo.file(f).revision(node, raw=False)
        else:
            # Apply delta-chain.
            # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
            deltabase, delta, flags = revisiondata[4:]
            if deltabase == nullid:
                base = ""
            else:
                if flags:
                    # LFS (flags != 0) should always use nullid as deltabase.
                    raise error.Abort("unexpected deltabase")
                base = reconstruct(f, deltabase)
            rawtext = mdiff.patch(base, delta)
            if isinstance(rawtext, pycompat.buffer):  # noqa
                rawtext = bytes(rawtext)
            return rawtext

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    maxskipcount = len(queue) + 1
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > maxskipcount:
            raise error.Abort(_("circular node dependency on ancestormap"))

        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        node, p1, p2, linknode, deltabase, delta, flags = revisiondata

        # Deltas are always against flags=0 rawtext (see revdiff and its
        # callers), if deltabase is not nullid.
        if flags and deltabase != nullid:
            raise error.Abort("unexpected deltabase")

        rawtext = reconstruct(f, node)
        meta, text = shallowutil.parsemeta(rawtext, flags)
        if "copy" in meta:
            copyfrom = meta["copy"]
            copynode = bin(meta["copyrev"])
            if not available(f, node, copyfrom, copynode):
                continue

        if any(not available(f, node, f, p) for p in [p1, p2] if p != nullid):
            continue

        # Use addrawrevision so if it's already LFS, take it as-is, do not
        # re-calculate the LFS object.
        fl = repo.file(f)
        fl.addrawrevision(rawtext,
                          trp,
                          linknode,
                          p1,
                          p2,
                          node=node,
                          flags=flags)
        processed.add((f, node))
        skipcount = 0

    return len(revisiondatas), newfiles
示例#27
0
    def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
                          fnodes, source):
        # type: (Sequence[int], Mapping[bytes, int], bool, Any, MutableMapping[str, Any], Any) -> Iterable[bytes]
        """
        - `commonrevs` is the set of known commits on both sides
        - `clrevorder` is a mapping from cl node to rev number, used for
                       determining which commit is newer.
        - `mfs` is the potential manifest nodes to send,
                with maps to their linknodes
                { manifest root node -> link node }
        - `fnodes` is a mapping of { filepath -> { node -> clnode } }
                If fastpathlinkrev is false, we are responsible for populating
                fnodes.
        - `args` and `kwargs` are extra arguments that will be passed to the
                core generatemanifests method, whose length depends on the
                version of core Hg.
        """
        # If we're not using the fastpath, then all the trees will be necessary
        # so we can inspect which files changed and need to be sent. So let's
        # bulk fetch the trees up front.
        repo = self._repo

        if self._cansendflat(mfs.keys()):
            # In this code path, generating the manifests populates fnodes for
            # us.
            chunks = super(shallowcg1packer,
                           self).generatemanifests(commonrevs, clrevorder,
                                                   fastpathlinkrev, mfs,
                                                   fnodes, source)
            for chunk in chunks:
                yield chunk
        else:
            # If not using the fast path, we need to discover what files to send
            if not fastpathlinkrev:
                localmfstore = None
                if len(repo.manifestlog.localdatastores) > 0:
                    localmfstore = repo.manifestlog.localdatastores[0]
                sharedmfstore = None
                if len(repo.manifestlog.shareddatastores) > 0:
                    sharedmfstore = contentstore.unioncontentstore(
                        *repo.manifestlog.shareddatastores)

                def containslocalfiles(mfnode):
                    # This is a local tree, then it contains local files.
                    if localmfstore and not localmfstore.getmissing(
                        [("", mfnode)]):
                        return True

                    # If not a local tree, and it doesn't exist in the store,
                    # then it is to be generated and may contain local files.
                    # This can happen while serving an infinitepush bundle that
                    # contains flat manifests. It will need to generate trees
                    # for that manifest.
                    if (repo.svfs.treemanifestserver and sharedmfstore
                            and sharedmfstore.getmissing([("", mfnode)])):
                        return True

                    return False

                # If we're sending files, we need to process the manifests
                filestosend = self.shouldaddfilegroups(source)
                if filestosend is not NoFiles:
                    mflog = repo.manifestlog
                    with progress.bar(repo.ui, _("manifests"),
                                      total=len(mfs)) as prog:
                        for mfnode, clnode in pycompat.iteritems(mfs):
                            prog.value += 1
                            if filestosend == LocalFiles and not containslocalfiles(
                                    mfnode):
                                continue

                            try:
                                mfctx = mflog[mfnode]
                                p1node = mfctx.parents[0]
                                p1ctx = mflog[p1node]
                            except LookupError:
                                if not repo.svfs.treemanifestserver or treeonly(
                                        repo):
                                    raise
                                # If we can't find the flat version, look for trees
                                tmfl = mflog.treemanifestlog
                                mfctx = tmfl[mfnode]
                                p1node = tmfl[mfnode].parents[0]
                                p1ctx = tmfl[p1node]

                            diff = pycompat.iteritems(p1ctx.read().diff(
                                mfctx.read()))
                            for filename, ((anode, aflag), (bnode,
                                                            bflag)) in diff:
                                if bnode is not None:
                                    fclnodes = fnodes.setdefault(filename, {})
                                    fclnode = fclnodes.setdefault(
                                        bnode, clnode)
                                    if clrevorder[clnode] < clrevorder[fclnode]:
                                        fclnodes[bnode] = clnode

            yield self.close()
示例#28
0
def verify(ui, repo, hgctx):
    """verify that a Mercurial rev matches the corresponding Git rev

    Given a Mercurial revision that has a corresponding Git revision in the map,
    this attempts to answer whether that revision has the same contents as the
    corresponding Git revision.

    """
    handler = repo.githandler

    gitsha = handler.map_git_get(hgctx.hex())
    if not gitsha:
        # TODO deal better with commits in the middle of octopus merges
        raise hgutil.Abort(
            _("no git commit found for rev %s") % hgctx,
            hint=_("if this is an octopus merge, "
                   "verify against the last rev"),
        )

    try:
        gitcommit = handler.git.get_object(pycompat.encodeutf8(gitsha))
    except KeyError:
        raise hgutil.Abort(
            _("git equivalent %s for rev %s not found!") % (gitsha, hgctx))
    if not isinstance(gitcommit, Commit):
        raise hgutil.Abort(
            _("git equivalent %s for rev %s is not a commit!") %
            (gitsha, hgctx))

    ui.status(_("verifying rev %s against git commit %s\n") % (hgctx, gitsha))
    failed = False

    # TODO check commit message and other metadata

    dirkind = stat.S_IFDIR

    hgfiles = set(hgctx)
    gitfiles = set()

    i = 0
    with progress.bar(ui, _("verify"), total=len(hgfiles)) as prog:
        for gitfile, dummy in diff_tree.walk_trees(handler.git.object_store,
                                                   gitcommit.tree, None):
            if gitfile.mode == dirkind:
                continue
            # TODO deal with submodules
            if gitfile.mode == S_IFGITLINK:
                continue
            prog.value = i
            i += 1
            gitfilepath = pycompat.decodeutf8(gitfile.path)
            gitfiles.add(gitfilepath)

            try:
                fctx = hgctx[gitfilepath]
            except error.LookupError:
                # we'll deal with this at the end
                continue

            hgflags = fctx.flags()
            gitflags = handler.convert_git_int_mode(gitfile.mode)
            if hgflags != gitflags:
                ui.write(
                    _("file has different flags: %s (hg '%s', git '%s')\n") %
                    (gitfilepath, hgflags, gitflags))
                failed = True
            if fctx.data() != handler.git[gitfile.sha].data:
                ui.write(_("difference in: %s\n") % gitfilepath)
                failed = True

    if hgfiles != gitfiles:
        failed = True
        missing = gitfiles - hgfiles
        for f in sorted(missing):
            ui.write(_("file found in git but not hg: %s\n") % f)
        unexpected = hgfiles - gitfiles
        for f in sorted(unexpected):
            ui.write(_("file found in hg but not git: %s\n") % f)

    if failed:
        return 1
    else:
        return 0
示例#29
0
    def expandpaths(self, rev, paths, parents):
        changed, removed = set(), set()
        copies = {}

        new_module, revnum = revsplit(rev)[1:]
        if new_module != self.module:
            self.module = new_module
            self.reparent(self.module)

        with progress.bar(self.ui, _("scanning paths"), _("paths"), len(paths)) as prog:
            for i, (path, ent) in enumerate(paths):
                prog.value = (i, path)
                entrypath = self.getrelpath(path)

                kind = self._checkpath(entrypath, revnum)
                if kind == svn.core.svn_node_file:
                    changed.add(self.recode(entrypath))
                    if not ent.copyfrom_path or not parents:
                        continue
                    # Copy sources not in parent revisions cannot be
                    # represented, ignore their origin for now
                    pmodule, prevnum = revsplit(parents[0])[1:]
                    if ent.copyfrom_rev < prevnum:
                        continue
                    copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
                    if not copyfrom_path:
                        continue
                    self.ui.debug(
                        "copied to %s from %s@%s\n"
                        % (entrypath, copyfrom_path, ent.copyfrom_rev)
                    )
                    copies[self.recode(entrypath)] = self.recode(copyfrom_path)
                elif kind == 0:  # gone, but had better be a deleted *file*
                    self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
                    pmodule, prevnum = revsplit(parents[0])[1:]
                    parentpath = pmodule + "/" + entrypath
                    fromkind = self._checkpath(entrypath, prevnum, pmodule)

                    if fromkind == svn.core.svn_node_file:
                        removed.add(self.recode(entrypath))
                    elif fromkind == svn.core.svn_node_dir:
                        oroot = parentpath.strip("/")
                        nroot = path.strip("/")
                        children = self._iterfiles(oroot, prevnum)
                        for childpath in children:
                            childpath = childpath.replace(oroot, nroot)
                            childpath = self.getrelpath("/" + childpath, pmodule)
                            if childpath:
                                removed.add(self.recode(childpath))
                    else:
                        self.ui.debug(
                            "unknown path in revision %d: %s\n" % (revnum, path)
                        )
                elif kind == svn.core.svn_node_dir:
                    if ent.action == "M":
                        # If the directory just had a prop change,
                        # then we shouldn't need to look for its children.
                        continue
                    if ent.action == "R" and parents:
                        # If a directory is replacing a file, mark the previous
                        # file as deleted
                        pmodule, prevnum = revsplit(parents[0])[1:]
                        pkind = self._checkpath(entrypath, prevnum, pmodule)
                        if pkind == svn.core.svn_node_file:
                            removed.add(self.recode(entrypath))
                        elif pkind == svn.core.svn_node_dir:
                            # We do not know what files were kept or removed,
                            # mark them all as changed.
                            for childpath in self._iterfiles(pmodule, prevnum):
                                childpath = self.getrelpath("/" + childpath)
                                if childpath:
                                    changed.add(self.recode(childpath))

                    for childpath in self._iterfiles(path, revnum):
                        childpath = self.getrelpath("/" + childpath)
                        if childpath:
                            changed.add(self.recode(childpath))

                    # Handle directory copies
                    if not ent.copyfrom_path or not parents:
                        continue
                    # Copy sources not in parent revisions cannot be
                    # represented, ignore their origin for now
                    pmodule, prevnum = revsplit(parents[0])[1:]
                    if ent.copyfrom_rev < prevnum:
                        continue
                    copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
                    if not copyfrompath:
                        continue
                    self.ui.debug(
                        "mark %s came from %s:%d\n"
                        % (path, copyfrompath, ent.copyfrom_rev)
                    )
                    children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
                    for childpath in children:
                        childpath = self.getrelpath("/" + childpath, pmodule)
                        if not childpath:
                            continue
                        copytopath = path + childpath[len(copyfrompath) :]
                        copytopath = self.getrelpath(copytopath)
                        copies[self.recode(copytopath)] = self.recode(childpath)

        changed.update(removed)
        return (list(changed), removed, copies)
示例#30
0
        def _prefetch(self,
                      revs,
                      base=None,
                      pats=None,
                      opts=None,
                      matcher=None):
            fallbackpath = self.fallbackpath
            if fallbackpath:
                # If we know a rev is on the server, we should fetch the server
                # version of those files, since our local file versions might
                # become obsolete if the local commits are stripped.
                with progress.spinner(self.ui,
                                      _("finding outgoing revisions")):
                    localrevs = self.revs("outgoing(%s)", fallbackpath)
                if base is not None and base != nullrev:
                    serverbase = list(
                        self.revs("first(reverse(::%s) - %ld)", base,
                                  localrevs))
                    if serverbase:
                        base = serverbase[0]
            else:
                localrevs = self

            mfl = self.manifestlog
            if base is not None:
                mfdict = mfl[self[base].manifestnode()].read()
                skip = set(mfdict.iteritems())
            else:
                skip = set()

            # Copy the skip set to start large and avoid constant resizing,
            # and since it's likely to be very similar to the prefetch set.
            files = skip.copy()
            serverfiles = skip.copy()
            visited = set()
            visited.add(nullid)
            with progress.bar(self.ui, _("prefetching"),
                              total=len(revs)) as prog:
                for rev in sorted(revs):
                    ctx = self[rev]
                    if pats:
                        m = scmutil.match(ctx, pats, opts)
                    if matcher is None:
                        matcher = self.maybesparsematch(rev)

                    mfnode = ctx.manifestnode()
                    mfctx = mfl[mfnode]

                    # Decompressing manifests is expensive.
                    # When possible, only read the deltas.
                    p1, p2 = mfctx.parents
                    if p1 in visited and p2 in visited:
                        mfdict = mfctx.readnew()
                    else:
                        mfdict = mfctx.read()

                    diff = mfdict.iteritems()
                    if pats:
                        diff = (pf for pf in diff if m(pf[0]))
                    if matcher:
                        diff = (pf for pf in diff if matcher(pf[0]))
                    if rev not in localrevs:
                        serverfiles.update(diff)
                    else:
                        files.update(diff)

                    visited.add(mfctx.node())
                    prog.value += 1

            files.difference_update(skip)
            serverfiles.difference_update(skip)

            # Fetch files known to be on the server
            if serverfiles:
                results = [(path, hex(fnode)) for (path, fnode) in serverfiles]
                self.fileservice.prefetch(results, force=True)

            # Fetch files that may or may not be on the server
            if files:
                results = [(path, hex(fnode)) for (path, fnode) in files]
                self.fileservice.prefetch(results)