def perfbdiff(ui, repo, file_, rev=None, count=None, **opts): """benchmark a bdiff between revisions By default, benchmark a bdiff between its delta parent and itself. With ``--count``, benchmark bdiffs between delta parents and self for N revisions starting at the specified revision. With ``--alldata``, assume the requested revision is a changeset and measure bdiffs for all changes related to that changeset (manifest and filelogs). """ if opts["alldata"]: opts["changelog"] = True if opts.get("changelog") or opts.get("manifest"): file_, rev = None, file_ elif rev is None: raise error.CommandError("perfbdiff", "invalid arguments") textpairs = [] r = cmdutil.openrevlog(repo, "perfbdiff", file_, opts) startrev = r.rev(r.lookup(rev)) for rev in range(startrev, min(startrev + count, len(r) - 1)): if opts["alldata"]: # Load revisions associated with changeset. ctx = repo[rev] mtext = repo.manifestlog._revlog.revision(ctx.manifestnode()) for pctx in ctx.parents(): pman = repo.manifestlog._revlog.revision(pctx.manifestnode()) textpairs.append((pman, mtext)) # Load filelog revisions by iterating manifest delta. man = ctx.manifest() pman = ctx.p1().manifest() for filename, change in pman.diff(man).items(): fctx = repo.file(filename) f1 = fctx.revision(change[0][0] or -1) f2 = fctx.revision(change[1][0] or -1) textpairs.append((f1, f2)) else: dp = r.deltaparent(rev) textpairs.append((r.revision(dp), r.revision(rev))) def d(): for pair in textpairs: mdiff.textdiff(*pair) timer, fm = gettimer(ui, opts) timer(d) fm.end()
def cmd(ui, repo, csid=None, **opts): if csid is None: raise error.CommandError("snapshot isworkingcopy", _("missing snapshot id")) snapshot = repo.edenapi.fetchsnapshot({ "cs_id": bytes.fromhex(csid), }, ) maxuntrackedsize = parsemaxuntracked(opts) iswc, reason = _isworkingcopy(ui, repo, snapshot, maxuntrackedsize) if iswc: if not ui.plain(): ui.status(_("snapshot is the working copy\n")) else: raise error.Abort( _("snapshot is not the working copy: {}").format(reason))
def show(ui, repo, csid=None, **opts): if csid is None: raise error.CommandError("snapshot show", _("missing snapshot id")) try: snapshot = repo.edenapi.fetchsnapshot({ "cs_id": bytes.fromhex(csid), }, ) except Exception: raise error.Abort(_("snapshot doesn't exist")) else: ctx = _snapshot2ctx(repo, snapshot) match = scmutil.matchall(repo) printeropt = {"patch": not opts["stat"], "stat": opts["stat"]} buffered = False if opts["json"] is True: displayer = jsonchangeset(ui, repo, match, printeropt, buffered) else: ui.status(_("snapshot: {}\n").format(csid)) displayer = changeset_printer(ui, repo, match, printeropt, buffered) displayer.show(ctx) displayer.close()
def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): """Benchmark obtaining a revlog revision. Obtaining a revlog revision consists of roughly the following steps: 1. Compute the delta chain 2. Obtain the raw chunks for that delta chain 3. Decompress each raw chunk 4. Apply binary patches to obtain fulltext 5. Verify hash of fulltext This command measures the time spent in each of these phases. """ if opts.get("changelog") or opts.get("manifest"): file_, rev = None, file_ elif rev is None: raise error.CommandError("perfrevlogrevision", "invalid arguments") r = cmdutil.openrevlog(repo, "perfrevlogrevision", file_, opts) # _chunkraw was renamed to _getsegmentforrevs. try: segmentforrevs = r._getsegmentforrevs except AttributeError: segmentforrevs = r._chunkraw node = r.lookup(rev) rev = r.rev(node) def getrawchunks(data, chain): start = r.start length = r.length inline = r._inline iosize = r._io.size buffer = util.buffer offset = start(chain[0]) chunks = [] ladd = chunks.append for rev in chain: chunkstart = start(rev) if inline: chunkstart += (rev + 1) * iosize chunklength = length(rev) ladd(buffer(data, chunkstart - offset, chunklength)) return chunks def dodeltachain(rev): if not cache: r.clearcaches() r._deltachain(rev) def doread(chain): if not cache: r.clearcaches() segmentforrevs(chain[0], chain[-1]) def dorawchunks(data, chain): if not cache: r.clearcaches() getrawchunks(data, chain) def dodecompress(chunks): decomp = r.decompress for chunk in chunks: decomp(chunk) def dopatch(text, bins): if not cache: r.clearcaches() mdiff.patches(text, bins) def dohash(text): if not cache: r.clearcaches() r.checkhash(text, node, rev=rev) def dorevision(): if not cache: r.clearcaches() r.revision(node) chain = r._deltachain(rev)[0] data = segmentforrevs(chain[0], chain[-1])[1] rawchunks = getrawchunks(data, chain) bins = r._chunks(chain) text = str(bins[0]) bins = bins[1:] text = mdiff.patches(text, bins) benches = [ (lambda: dorevision(), "full"), (lambda: dodeltachain(rev), "deltachain"), (lambda: doread(chain), "read"), (lambda: dorawchunks(data, chain), "rawchunks"), (lambda: dodecompress(rawchunks), "decompress"), (lambda: dopatch(text, bins), "patch"), (lambda: dohash(text), "hash"), ] for fn, title in benches: timer, fm = gettimer(ui, opts) timer(fn, title=title) fm.end()