def repo_bugids(ui, repo): def addbugids(bugids, ctx): lns = ctx.description().splitlines() for ln in lns: m = bug_check.match(ln) if m: b = int(m.group(1)) if not b in bugids: bugids[b] = ctx.rev() # Should cache this, eventually bugids = { } # bugid -> rev opts = { 'rev' : ['0:tip'] } ui.debug("Gathering bugids ...\n") try: nop = lambda c, fns: None iter = cmdutil.walkchangerevs(repo, _matchall(repo), opts, nop) for ctx in iter: addbugids(bugids, ctx) except (AttributeError, TypeError): # AttributeError: matchall does not exist in hg < 1.1 # TypeError: walkchangerevs args differ in hg <= 1.3.1 get = util.cachefunc(lambda r: repo.changectx(r).changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, [], get, opts) for st, rev, fns in changeiter: if st == 'add': node = repo.changelog.node(rev) addbugids(bugids, context.changectx(repo, node)) return bugids
def _get_history_1_4(self, repo, pats, opts, limit): matcher = cmdutil.match(repo, pats, opts) if self.isfile: fncache = {} def prep(ctx, fns): if self.isfile: fncache[ctx.rev()] = fns[0] else: def prep(ctx, fns): pass # keep one lookahead entry so that we can detect renames path = self.path entry = None count = 1 for ctx in cmdutil.walkchangerevs(repo, matcher, opts, prep): if self.isfile and entry: path = fncache[ctx.rev()] if path != entry[0]: entry = entry[0:2] + (Changeset.COPY,) if entry: count += 1 yield entry entry = (path, self.repos.hg_display(ctx.node()), Changeset.EDIT) if entry: if count < limit: entry = entry[0:2] + (Changeset.ADD,) yield entry
def impl_hg_tree(repo, cid, path, names, *args): m = cmdutil.match(repo, pats=[path], default=path) data = {} for name in names: rev_iter = cmdutil.walkchangerevs(repo, m, {'rev': cid}, lambda c,f: None) data[name] = rev_iter.next().hex() return data
def _get_history_1_3(self, repo, pats, opts, limit): if self.repos.version_info > (1, 3, 999): changefn = lambda r: repo[r] else: changefn = lambda r: repo[r].changeset() get = cachefunc(changefn) if self.isfile: fncache = {} chgiter, matchfn = cmdutil.walkchangerevs(self.repos.ui, repo, pats, get, opts) # keep one lookahead entry so that we can detect renames path = self.path entry = None count = 1 for st, rev, fns in chgiter: if st == 'add' and self.isfile: fncache[rev] = fns[0] elif st == 'iter': if self.isfile and entry: path = fncache[rev] if path != entry[0]: entry = entry[0:2] + (Changeset.COPY,) if entry: count += 1 yield entry n = repo.changelog.node(rev) entry = (path, self.repos.hg_display(n), Changeset.EDIT) if entry: if count < limit: entry = entry[0:2] + (Changeset.ADD,) yield entry
def _walkctxs(self, base, head, follow=False, pick=None): '''Generate changectxs between BASE and HEAD. Walk changesets between BASE and HEAD (in the order implied by their relation), following a given branch if FOLLOW is a true value, yielding changectxs where PICK (if specified) returns a true value. PICK is a function of one argument, a changectx.''' opts = {'rev': ['%s:%s' % (base.rev(), head.rev())], 'follow': follow} changectx = self.repo.changectx getcset = util.cachefunc(lambda r: changectx(r).changeset()) # # See the docstring of mercurial.cmdutil.walkchangerevs() for # the phased approach to the iterator returned. The important # part to note is that the 'add' phase gathers nodes, which # the 'iter' phase then iterates through. # changeiter = cmdutil.walkchangerevs(self.ui, self.repo, [], getcset, opts)[0] matched = {} for st, rev, fns in changeiter: if st == 'add': ctx = changectx(rev) if not pick or pick(ctx): matched[rev] = ctx elif st == 'iter': if rev in matched: yield matched[rev]
def tipmost_shared(head, outnodes): '''Return the tipmost node on the same branch as head that is not in outnodes. We walk from head to the bottom of the workspace (revision 0) collecting nodes not in outnodes during the add phase and return the first node we see in the iter phase that was previously collected. See the docstring of mercurial.cmdutil.walkchangerevs() for the phased approach to the iterator returned. The important part to note is that the 'add' phase gathers nodes, which the 'iter' phase then iterates through.''' get = util.cachefunc(lambda r: self.repo.changectx(r).changeset()) changeiter = cmdutil.walkchangerevs(self.repo.ui, self.repo, [], get, {'rev': ['%s:0' % head], 'follow': True})[0] seen = [] for st, rev, fns in changeiter: n = self.repo.changelog.node(rev) if st == 'add': if n not in outnodes: seen.append(n) elif st == 'iter': if n in seen: return rev return None
def bases(self): '''Find the bases that in combination define the "old" side of a recommitted set of changes, based on AL''' get = util.cachefunc(lambda r: self.ws.repo.changectx(r).changeset()) # We don't rebuild the AL So the AL local tip is the old tip revrange = "%s:%s" % (self.base.rev(), self.localtip.rev()) changeiter = cmdutil.walkchangerevs(self.ws.repo.ui, self.ws.repo, [], get, {'rev': [revrange]})[0] hold = [] ret = [] alrevs = [x.rev() for x in self.revs] for st, rev, fns in changeiter: n = self.ws.repo.changelog.node(rev) if st == 'add': if rev in alrevs: hold.append(n) elif st == 'iter': if n not in hold: continue p = self.ws.repo.changelog.parents(n) if p[1] != node.nullid: continue if self.ws.repo.changectx(p[0]).rev() not in alrevs: ret.append(n) return ret
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts['dateformat']) else: tmpl = opts.get('template', '{author|email}') tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() count = pct = 0 rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) get = util.cachefunc(lambda r: repo[r].changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts) for st, rev, fns in changeiter: if not st == 'add': continue if df and not df(get(rev)[2][0]): # doesn't match date format continue ctx = repo[rev] key = getkey(ctx) key = amap.get(key, key) # alias remap if opts.get('changesets'): rate[key] = rate.get(key, 0) + 1 else: parents = ctx.parents() if len(parents) > 1: ui.note(_('Revision %d is a merge, ignoring...\n') % (rev, )) continue ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = rate.get(key, 0) + lines if opts.get('progress'): count += 1 newpct = int(100.0 * count / max(len(repo), 1)) if pct < newpct: pct = newpct ui.write("\r" + _("generating stats: %d%%") % pct) sys.stdout.flush() if opts.get('progress'): ui.write("\r") sys.stdout.flush() return rate
def versions(self, item): local_repo = self._local_repo instance_match = match.exact(local_repo.root, local_repo.getcwd(), [item]) change_contexts = walkchangerevs(local_repo, instance_match, {'rev': None}, lambda ctx, fns: ctx) for change_context in change_contexts: yield Version(change_context)
def impl_hg_tree(repo, cid, path, names, *args): m = cmdutil.match(repo, pats=[path], default=path) data = {} for name in names: rev_iter = cmdutil.walkchangerevs( repo, m, {'rev': cid}, lambda c, f: None) data[name] = rev_iter.next().hex() return data
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" opts = pycompat.byteskwargs(opts) if opts.get(b'dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return encoding.strtolocal( date.strftime(encoding.strfromlocal(opts[b'dateformat'])) ) else: tmpl = opts.get(b'oldtemplate') or opts.get(b'template') tmpl = logcmdutil.maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() progress = ui.makeprogress( _(b'analyzing'), unit=_(b'revisions'), total=len(repo) ) rate = {} df = False if opts.get(b'date'): df = dateutil.matchdate(opts[b'date']) m = scmutil.match(repo[None], pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx).strip() key = amap.get(key, key) # alias remap if opts.get(b'changesets'): rate[key] = (rate.get(key, (0,))[0] + 1, 0) else: parents = ctx.parents() if len(parents) > 1: ui.note(_(b'revision %d is a merge, ignoring...\n') % (rev,)) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] progress.increment() for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue progress.complete() return rate
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts['dateformat']) else: tmpl = opts.get('template', '{author|email}') tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() state = {'count': 0, 'pct': 0} rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = cmdutil.match(repo, pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx) key = amap.get(key, key) # alias remap if opts.get('changesets'): rate[key] = (rate.get(key, (0,))[0] + 1, 0) else: parents = ctx.parents() if len(parents) > 1: ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] if opts.get('progress'): state['count'] += 1 newpct = int(100.0 * state['count'] / max(len(repo), 1)) if state['pct'] < newpct: state['pct'] = newpct ui.write("\r" + _("generating stats: %d%%") % state['pct']) sys.stdout.flush() for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue if opts.get('progress'): ui.write("\r") sys.stdout.flush() return rate
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts['dateformat']) else: tmpl = opts.get('template', '{author|email}') tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() count = pct = 0 rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) get = util.cachefunc(lambda r: repo[r].changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts) for st, rev, fns in changeiter: if not st == 'add': continue if df and not df(get(rev)[2][0]): # doesn't match date format continue ctx = repo[rev] key = getkey(ctx) key = amap.get(key, key) # alias remap if opts.get('changesets'): rate[key] = rate.get(key, 0) + 1 else: parents = ctx.parents() if len(parents) > 1: ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) continue ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = rate.get(key, 0) + lines if opts.get('progress'): count += 1 newpct = int(100.0 * count / max(len(repo), 1)) if pct < newpct: pct = newpct ui.write("\r" + _("generating stats: %d%%") % pct) sys.stdout.flush() if opts.get('progress'): ui.write("\r") sys.stdout.flush() return rate
def jcheck(ui, repo, **opts): """check changesets against JDK standards""" ui.debug("jcheck repo=%s opts=%s\n" % (repo.path, opts)) repocompat(repo) if not repo.local(): raise error_Abort("repository '%s' is not local" % repo.path) if not os.path.exists(os.path.join(repo.root, ".jcheck")): ui.status("jcheck not enabled (no .jcheck in repository root)\n") return Pass if len(opts["rev"]) == 0: opts["rev"] = ["tip"] strict = opts.has_key("strict") and opts["strict"] lax = opts.has_key("lax") and opts["lax"] if strict: lax = False ch = checker(ui, repo, strict, lax) ch.check_repo() try: nop = lambda c, fns: None iter = cmdutil.walkchangerevs(repo, _matchall(repo), opts, nop) for ctx in iter: ch.check(ctx.rev(), ctx.node()) except (AttributeError, TypeError): # AttributeError: matchall does not exist in hg < 1.1 # TypeError: walkchangerevs args differ in hg <= 1.3.1 get = util.cachefunc(lambda r: repo.changectx(r).changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, [], get, opts) if ui.debugflag: displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn) for st, rev, fns in changeiter: if st == 'add': node = repo.changelog.node(rev) if ui.debugflag: displayer.show(rev, node, copies=False) ch.check(rev, node) elif st == 'iter': if ui.debugflag: displayer.flush(rev) if ch.rv == Fail: ui.status("\n") return ch.rv
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts['dateformat']) else: tmpl = opts.get('oldtemplate') or opts.get('template') tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() state = {'count': 0} rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = scmutil.match(repo[None], pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx).strip() key = amap.get(key, key) # alias remap if opts.get('changesets'): rate[key] = (rate.get(key, (0, ))[0] + 1, 0) else: parents = ctx.parents() if len(parents) > 1: ui.note(_('revision %d is a merge, ignoring...\n') % (rev, )) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] state['count'] += 1 ui.progress(_('analyzing'), state['count'], total=len(repo), unit=_('revisions')) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue ui.progress(_('analyzing'), None) return rate
def gather_metrics(ui, repo, *pats, **opts): # This is my code to gather what we need for metrics state = {'count': 0} metrics = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = scmutil.match(repo[None], pats, opts) def walker(ctx, fns): #import pdb #pdb.set_trace() # Verify that this change is inside our date # These are passed into the match and walker functions via the opts param # Not sure this is needed, but churn had it, so keeping for now # TODO: Experiment with whether this extra check is necessary if df and not df(ctx.date()[0]): return # Create the chgset's object in our tracker chgsetID = ctx.hex() metrics[chgsetID] = {} metrics[chgsetID]['is_merge'] = len(ctx.parents()) > 1 ctx1 = ctx.parents()[0] metrics[chgsetID]['parents'] = ctx.parents()[0].hex() user = ctx.user() metrics[chgsetID]['committer'] = user metrics[chgsetID]['committer_email'] = user[user.find('<') + 1:user.find('>')] metrics[chgsetID]['committer_name'] = user.split('<')[0].strip() t, tz = ctx.date() d = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) metrics[chgsetID]['datestamp'] = d.strftime('%Y-%m-%dT%H:%M:%SZ') # If we have a robot committer, don't bother parsing the commit message metrics[chgsetID]['msg'] = ctx.description() if '*****@*****.**' not in user: metrics[chgsetID]['bug'], metrics[chgsetID]['is_backout'], \ metrics[chgsetID]['backout_rev'], metrics[chgsetID]['reviewer'], \ metrics[chgsetID]['approver'] = parse_commit_msg(ctx.description()) metrics[chgsetID]['files'] = get_lines_and_files( ui, repo, ctx1, ctx, fns) state['count'] += 1 ui.progress(_('analyzing'), state['count'], total=len(repo)) for ctx in cmdutil.walkchangerevs(repo, m, opts, walker): continue ui.progress(_('analyzing'), None) return metrics
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get("dateformat"): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts["dateformat"]) else: tmpl = opts.get("template", "{author|email}") tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() state = {"count": 0} rate = {} df = False if opts.get("date"): df = util.matchdate(opts["date"]) m = scmutil.match(repo[None], pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx).strip() key = amap.get(key, key) # alias remap if opts.get("changesets"): rate[key] = (rate.get(key, (0,))[0] + 1, 0) else: parents = ctx.parents() if len(parents) > 1: ui.note(_("Revision %d is a merge, ignoring...\n") % (rev,)) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] state["count"] += 1 ui.progress(_("analyzing"), state["count"], total=len(repo)) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue ui.progress(_("analyzing"), None) return rate
def gather_metrics(ui, repo, *pats, **opts): # This is my code to gather what we need for metrics state = {'count': 0} metrics = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = scmutil.match(repo[None], pats, opts) def walker(ctx, fns): #import pdb #pdb.set_trace() # Verify that this change is inside our date # These are passed into the match and walker functions via the opts param # Not sure this is needed, but churn had it, so keeping for now # TODO: Experiment with whether this extra check is necessary if df and not df(ctx.date()[0]): return # Create the chgset's object in our tracker chgsetID = ctx.hex() metrics[chgsetID] = {} metrics[chgsetID]['is_merge'] = len(ctx.parents()) > 1 ctx1 = ctx.parents()[0] metrics[chgsetID]['parents'] = ctx.parents()[0].hex() user = ctx.user() metrics[chgsetID]['committer'] = user metrics[chgsetID]['committer_email'] = user[user.find('<')+1:user.find('>')] metrics[chgsetID]['committer_name'] = user.split('<')[0].strip() t, tz = ctx.date() d = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) metrics[chgsetID]['datestamp'] = d.strftime('%Y-%m-%dT%H:%M:%SZ') # If we have a robot committer, don't bother parsing the commit message metrics[chgsetID]['msg'] = ctx.description() if '*****@*****.**' not in user: metrics[chgsetID]['bug'], metrics[chgsetID]['is_backout'], \ metrics[chgsetID]['backout_rev'], metrics[chgsetID]['reviewer'], \ metrics[chgsetID]['approver'] = parse_commit_msg(ctx.description()) metrics[chgsetID]['files'] = get_lines_and_files(ui, repo, ctx1, ctx, fns) state['count'] += 1 ui.progress(_('analyzing'), state['count'], total=len(repo)) for ctx in cmdutil.walkchangerevs(repo, m, opts, walker): continue ui.progress(_('analyzing'), None) return metrics
def countrate(ui, repo, amap, *pats, **opts): """Calculate stats""" if opts.get('dateformat'): def getkey(ctx): t, tz = ctx.date() date = datetime.datetime(*time.gmtime(float(t) - tz)[:6]) return date.strftime(opts['dateformat']) else: tmpl = opts.get('template', '{author|email}') tmpl = maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) return ui.popbuffer() state = {'count': 0} rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = cmdutil.match(repo, pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx) key = amap.get(key, key) # alias remap key = key.strip() # ignore leading and trailing spaces if opts.get('changesets'): rate[key] = (rate.get(key, (0,))[0] + 1, 0) else: parents = ctx.parents() if len(parents) > 1: ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] state['count'] += 1 ui.progress(_('analyzing'), state['count'], total=len(repo)) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue ui.progress(_('analyzing'), None) return rate
def get_history(self, limit=None): newer = None # 'newer' is the previously seen history tuple older = None # 'older' is the currently examined history tuple log = self.repos.repo.changelog # directory history if self.isdir: if not self.path: # special case for the root for r in xrange(log.rev(self.n), -1, -1): yield ('', self.repos.hg_display(log.node(r)), r and Changeset.EDIT or Changeset.ADD) return # Code compatibility for ''walkchangerevs'': # In Mercurial 0.7, it had 5 arguments, but # [hg 1d7d0c07e8f3] removed the 3rd argument ('cwd'). args = (self.repos.ui, self.repos.repo) if walkchangerevs.func_code.co_argcount == 5: args = args + (None, ) args = args + (['path:%s' % self.path], { 'rev': ['%s:0' % hex(self.n)] }) wcr = walkchangerevs(*args) matches = {} for st, rev, fns in wcr[0]: if st == 'window': matches.clear() elif st == 'add': matches[rev] = 1 elif st == 'iter': if matches[rev]: yield (self.path, self.repos.hg_display(log.node(rev)), Changeset.EDIT) return # file history file_n = self.manifest[self.path] file = self.repos.repo.file(self.path) # FIXME: COPY currently unsupported for file_rev in xrange(file.rev(file_n), -1, -1): rev = log.node(file.linkrev(file.node(file_rev))) older = (self.path, self.repos.hg_display(rev), Changeset.ADD) if newer: change = newer[0] == older[0] and Changeset.EDIT or \ Changeset.COPY newer = (newer[0], newer[1], change) yield newer newer = older if newer: yield newer
def downloadlfiles(ui, repo, rev=None): match = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {}) def prepare(ctx, fns): pass totalsuccess = 0 totalmissing = 0 if rev != []: # walkchangerevs on empty list would return all revs for ctx in cmdutil.walkchangerevs(repo, match, {'rev' : rev}, prepare): success, missing = cachelfiles(ui, repo, ctx.node()) totalsuccess += len(success) totalmissing += len(missing) ui.status(_("%d additional largefiles cached\n") % totalsuccess) if totalmissing > 0: ui.status(_("%d largefiles failed to download\n") % totalmissing) return totalsuccess, totalmissing
def walk_changesets(repo, start, end, path_filter): if isinstance(repo, git.Repo): paths = [path_filter] if path_filter else [] for commit in repo.iter_commits(rev='%s..%s' % (end, start), paths=paths): yield {'msg': commit.message, 'author': '%s <%s>' % (commit.author.name, commit.author.email)} else: pats = () if not path_filter else ['path:%s' % path_filter] opts = {'rev': [start + ':' + end]} matchfn = match.match(repo.root, repo.getcwd(), pats) def prep(ctx, fns): pass for rev in cmdutil.walkchangerevs(repo, matchfn, opts, prep): yield {'msg': rev.description(), 'author': str(rev.user()).decode('utf-8')}
def downloadlfiles(ui, repo, rev=None): matchfn = scmutil.match(repo[None], [repo.wjoin(lfutil.shortname)], {}) def prepare(ctx, fns): pass totalsuccess = 0 totalmissing = 0 if rev != []: # walkchangerevs on empty list would return all revs for ctx in cmdutil.walkchangerevs(repo, matchfn, {'rev' : rev}, prepare): success, missing = cachelfiles(ui, repo, ctx.node()) totalsuccess += len(success) totalmissing += len(missing) ui.status(_("%d additional largefiles cached\n") % totalsuccess) if totalmissing > 0: ui.status(_("%d largefiles failed to download\n") % totalmissing) return totalsuccess, totalmissing
def filtered_log_generator(repo, pats, opts): """Fill view model iteratively repo - Mercurial repository object pats - list of file names or patterns opts - command line options for log command """ # Log searches: pattern, keyword, date, etc df = False if opts["date"]: df = util.matchdate(opts["date"]) stack = [] get = util.cachefunc(lambda r: repo.changectx(r).changeset()) changeiter, matchfn = cmdutil.walkchangerevs(repo.ui, repo, pats, get, opts) for st, rev, fns in changeiter: if st == "iter": if stack: yield stack.pop() continue if st != "add": continue parents = __get_parents(repo, rev) if opts["no_merges"] and len(parents) == 2: continue if opts["only_merges"] and len(parents) != 2: continue if df: changes = get(rev) if not df(changes[2][0]): continue # TODO: add copies/renames later if opts["keyword"]: changes = get(rev) miss = 0 for k in [kw.lower() for kw in opts["keyword"]]: if not (k in changes[1].lower() or k in changes[4].lower() or k in " ".join(changes[3]).lower()): miss = 1 break if miss: continue stack.append((rev, (0, 0), [], parents))
def countrate(ui, repo, *pats, **opts): """Calculate stats""" def getkey(ctx): t, tz = ctx.date() return int((float(t) - tz) / 86400) * 86400 # bin by day state = {'count': 0} rate = {} df = False if opts.get('date'): df = util.matchdate(opts['date']) m = scmutil.match(repo[None], pats, opts) def prep(ctx, fns): rev = ctx.rev() if df and not df(ctx.date()[0]): # doesn't match date format return key = getkey(ctx) if opts.get('changesets'): rate[key] = rate.get(key, 0) + 1 else: parents = ctx.parents() if len(parents) > 1: ui.note(_('Revision %d is a merge, ignoring...\n') % (rev,)) return ctx1 = parents[0] lines = changedlines(ui, repo, ctx1, ctx, fns) rate[key] = rate.get(key, 0) + lines state['count'] += 1 ui.progress(_('analyzing'), state['count'], total=len(repo)) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue ui.progress(_('analyzing'), None) return rate
def _walkctxs(self, base, head, follow=False, pick=None): '''Generate changectxs between BASE and HEAD. Walk changesets between BASE and HEAD (in the order implied by their relation), following a given branch if FOLLOW is a true value, yielding changectxs where PICK (if specified) returns a true value. PICK is a function of one argument, a changectx.''' chosen = {} def prep(ctx, fns): chosen[ctx.rev()] = not pick or pick(ctx) opts = {'rev': ['%s:%s' % (base.rev(), head.rev())], 'follow': follow} matcher = cmdutil.matchall(self.repo) for ctx in cmdutil.walkchangerevs(self.repo, matcher, opts, prep): if chosen[ctx.rev()]: yield ctx
def filtered_log_generator(repo, pats, opts): '''Fill view model iteratively repo - Mercurial repository object pats - list of file names or patterns opts - command line options for log command ''' matching_revs = [] only_branch = opts.get('branch', None) df = False if opts['date']: df = util.matchdate(opts['date']) def prep(ctx, fns): if only_branch and ctx.branch() != only_branch: return if opts['no_merges'] and len(ctx.parents()) == 2: return if opts['only_merges'] and len(ctx.parents()) != 2: return if df and not df(ctx.date()[0]): return if opts['user'] and not [k for k in opts['user'] if k in ctx.user()]: return if opts['keyword']: for k in [kw.lower() for kw in opts['keyword']]: if (k in ctx.user().lower() or k in ctx.description().lower() or k in " ".join(ctx.files()).lower()): break else: return matching_revs.append(ctx.rev()) m = match.match(repo.root, repo.root, pats) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): if ctx.rev() in matching_revs: yield (ctx.rev(), (0,0), [], None)
def _walkctxs(self, base, head, follow=False, pick=None): '''Generate changectxs between BASE and HEAD. Walk changesets between BASE and HEAD (in the order implied by their relation), following a given branch if FOLLOW is a true value, yielding changectxs where PICK (if specified) returns a true value. PICK is a function of one argument, a changectx.''' opts = { 'rev': ['%s:%s' % (base.rev(), head.rev())], 'follow': follow } changectx = self.repo.changectx getcset = util.cachefunc(lambda r: changectx(r).changeset()) # # See the docstring of mercurial.cmdutil.walkchangerevs() for # the phased approach to the iterator returned. The important # part to note is that the 'add' phase gathers nodes, which # the 'iter' phase then iterates through. # changeiter = cmdutil.walkchangerevs(self.ui, self.repo, [], getcset, opts)[0] matched = {} for st, rev, fns in changeiter: if st == 'add': ctx = changectx(rev) if not pick or pick(ctx): matched[rev] = ctx elif st == 'iter': if rev in matched: yield matched[rev]
def _walkctxs(self, base, head, follow=False, pick=None): '''Generate changectxs between BASE and HEAD. Walk changesets between BASE and HEAD (in the order implied by their relation), following a given branch if FOLLOW is a true value, yielding changectxs where PICK (if specified) returns a true value. PICK is a function of one argument, a changectx.''' chosen = {} def prep(ctx, fns): chosen[ctx.rev()] = not pick or pick(ctx) opts = { 'rev': ['%s:%s' % (base.rev(), head.rev())], 'follow': follow } matcher = cmdutil.matchall(self.repo) for ctx in cmdutil.walkchangerevs(self.repo, matcher, opts, prep): if chosen[ctx.rev()]: yield ctx
if len(changedFiles) == 0: ui.write(" (none)\n") else: for changedFile in changedFiles: ui.write(" %s\n" % changedFile) # Expand files out to their current full paths matchfn = scmutil.match(repo[None], changedFiles, default='relglob') exactFiles = repo.walk(matchfn) if len(exactFiles) == 0: return matchfn = scmutil.match(repo[None], exactFiles, default='path') left = opts['limit'] opts['rev'] = None for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, lambda a,b: None): if left == 0: break left -= 1 yield repo[ctx.rev()] fileRe = re.compile(r"^\+\+\+ (?:b/)?([^\s]*)", re.MULTILINE) suckerRe = re.compile(r"[^s-]r=(\w+)") supersuckerRe = re.compile(r"sr=(\w+)") def reviewers(ui, repo, patchfile=None, **opts): '''Suggest a reviewer for a patch Scan through the last LIMIT commits to find candidate reviewers for a patch (or set of files).
def walk(repo, revs): '''Returns revisions in repo specified by the string revs''' return cmdutil.walkchangerevs(repo, match.always(repo.root, None), {'rev': [revs.encode('ascii', 'ignore')]}, lambda *args: None)
def impl_hg_node(repo, cid, path, *args): m = cmdutil.match(repo, pats=[path], default=path) rev_iter = cmdutil.walkchangerevs(repo, m, {'rev': cid}, lambda c, f: None) return rev_iter.next().hex()
def graphviz(ui, repo, *limits, **opts): '''generate DOT language source to visualize changeset tree. "limit-spec" should be formatted in one of styles shown below: 1. Start,End 2. Start, 3. ,End With "revision" option, "Start" and "End" should be ones which are acceptable for "-r" option of other Mercurial commands. With "datetime" option, they should be in dateTime format of XML Schema, which is known as "YYYY-MM-DDThh:mm:ss". With "interval" option, they should be specified as interval from invocation time(= "now") in second. Both "Start" and "End" are treated as "inclusive". ''' ################ def between_datetime(node, lower_dt, upper_dt): date = datetimestr(repo.changelog.read(node)[2]) return ((lower_dt <= date) and (date <= upper_dt)) def accept_by_dt(lower_dt, upper_dt): if (not dtregexp.match(lower_dt)): raise util.Abort(_('illegal datetime format: %s') % (lower_dt)) if (not dtregexp.match(upper_dt)): raise util.Abort(_('illegal datetime format: %s') % (upper_dt)) ui.note(('# visualize revision between "%s" - "%s"\n') % (lower_dt, upper_dt)) return lambda rev, node: between_datetime(node, lower_dt, upper_dt) def accept_by_interval(now, lower_interval, upper_interval): lower_dt = datetimestr((now - lower_interval, None)) upper_dt = datetimestr((now - upper_interval, None)) ui.note(('# visualize revision between "%s" - "%s"\n') % (lower_dt, upper_dt)) return lambda rev, node: between_datetime(node, lower_dt, upper_dt) def accept_by_rev(lower_rev, upper_rev): ui.note(('# visualize revision between "%d" - "%d"\n') % (lower_rev, upper_rev)) return lambda rev, node: ((lower_rev <= rev) and (rev <= upper_rev)) def getrev(revname): return repo.changelog.rev(repo.lookup(revname)) ui.note('# %s\n' % opts) ################ # check limit type: types = 0 types += (opts['revision'] and 1 or 0) types += (opts['datetime'] and 1 or 0) types += (opts['interval'] and 1 or 0) if 1 < types: raise util.Abort(_('-r, -d and -i are mutually exclusive')) ################ # check urlbase options: if opts['urlbase']: ui.note(('# "-u %s" is specified\n') % (opts['urlbase'])) urlbase = opts['urlbase'] or '' ################ # check aliases option: aliases = {} # (key, val) => (username, alias) getalias = lambda name: aliases.get(name, name) if opts['aliases']: ui.note(('# "-a %s" is specified\n') % opts['aliases']) if os.path.isfile(opts['aliases']): f = open(opts['aliases'], "r") for line in f.readlines(): line = line.strip() if '#' != line[0]: alias, actual = line.split('=') aliases[actual] = alias ui.note( ('# use "%s" as alias for "%s"\n') % (alias, actual)) f.close() else: ui.warn(('%s: file not found, so skip reading aliases\n') % (opts['aliases'])) ################ # check format: ui.note( ('# "%s" is recognized as label format\n') % (opts['format_label'])) ui.note(('# "%s" is recognized as tooltip format\n') % (opts['format_tooltip'])) ################ # check attribute renderring hook: node_hook = lambda repo, rev, node, changes, alias: None if opts['node_attr_hook']: hookname = opts['node_attr_hook'] ui.note(('# try to use "%s" as node attribute hook\n') % (escapeNL(hookname))) node_hook = get_hook(hookname) edge_hook = lambda repo, parent, child, first_parent, alias: None # (parentrev, parentnode, parentchanges) = parent # (childrev, childnode, childchanges) = child if opts['edge_attr_hook']: hookname = opts['edge_attr_hook'] ui.note(('# try to use "%s" as edge attribute hook\n') % (escapeNL(hookname))) edge_hook = get_hook(hookname) cluster_hook = lambda repo, branch, date: None if opts['cluster_attr_hook']: hookname = opts['cluster_attr_hook'] ui.note(('# try to use "%s" as cluster attribute hook\n') % (escapeNL(hookname))) cluster_hook = get_hook(hookname) ################ # check limit specificaitons: accepts = [] # list of "lambda: rev, node:" def acceptable(rev, node): for accept in accepts: if accept(rev, node): return True return False if (0 == len(limits)): # all revisions will be visualized accepts.append(lambda rev, node: True) now = long(time.time()) for limit in limits: fields = limit.split(',') if ((2 != len(fields)) or ((fields[0] == "") and (fields[1] == ""))): raise util.Abort(_('invalid limit specification: "%s"') % (limit)) if opts['datetime']: accepts.append( accept_by_dt(getval(fields[0], "0001-01-01T00:00:00"), getval(fields[1], "9999-12-31T23:59:59"))) elif opts['interval']: accepts.append( accept_by_interval(now, int(getval(fields[0], now)), int(getval(fields[1], '0')))) else: # default type is revision accepts.append( accept_by_rev(getrev(getval(fields[0], "1")), getrev(getval(fields[1], "-1")))) ################ # gather target revisions: revmap = {} # (key, val) => (rev, (node, changes)) get = util.cachefunc(lambda r: repo.changectx(r).changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats=None, change=get, opts={'rev': []}) for st, rev, b in changeiter: if (st == 'iter'): node = repo.lookup(rev) if acceptable(rev, node): revmap[rev] = (node, repo.changelog.read(node)) ################ # render source code for DOT language: ui.write(("digraph {\n")) branches = {} branchindex = 0 group_by_branch = opts['group_by_branch'] group_by_date = opts['group_by_date'] for revision, (node, changes) in revmap.items(): branch = (group_by_branch and changes[5].get("branch") or 'default') if not branches.has_key(branch): branches[branch] = ({}, branchindex) branchindex += 1 branchcluster, index = branches[branch] date = (group_by_date and datestr(changes[2]) or '') datecluster = branchcluster.get(date) if not datecluster: datecluster = [] branchcluster[date] = datecluster datecluster.append(revision) for branch, (branchcluster, index) in branches.items(): if branch != 'default': ui.write(('subgraph cluster_%d {\n') % (index)) for date, revs in branchcluster.items(): if '' != date: ui.write(('subgraph cluster_%s_%s \n{') % (index, date)) hooked = cluster_hook(repo, branch, date) or '' ui.write(('%s\n') % (hooked)) for revision in revs: node, changes = revmap[revision] keywordmap = { 'u': getalias(changes[1]), 'U': changes[1], 'r': str(revision), 's': short(node), 'h': hex(node), 'd': datetimestr(changes[2]), 't': get_1stline(changes[4]) } attr = [] attr.append(('URL = "%s/rev/%s"') % (urlbase, short(node))) attr.append(('label = "%s"') % (escapeDQ(opts['format_label'] % keywordmap))) attr.append(('tooltip = "%s"') % (escapeDQ(opts['format_tooltip'] % keywordmap))) hooked = node_hook(repo, revision, node, changes, getalias) if hooked: attr.append(hooked) ui.write(('%d [ %s ]\n') % (revision, string.join(attr, ', '))) if '' != date: ui.write(('}\n')) if branch != 'default': hooked = cluster_hook(repo, branch, '') or '' ui.write(('%s\n') % (hooked)) ui.write(('}\n')) if opts['show_tags']: tagindex = 0 for tag, node in repo.tagslist(): revision = repo.changelog.rev(node) if revmap.has_key(revision): ui.write(('tag_%d [ label="%s" shape=plaintext ]\n' % (tagindex, escapeDQ(tag)))) ui.write(('tag_%d -> %s [ arrowhead=none, arrowtail=dot ]' % (tagindex, revision))) tagindex += 1 for revision, (node, changes) in revmap.items(): child = (revision, node, changes) first_parent = True for parentnode in repo.changelog.parents(node): parentrev = repo.changelog.rev(parentnode) if revmap.has_key(parentrev): pnode, pchanges = revmap[parentrev] parent = (parentrev, pnode, pchanges) hooked = edge_hook(repo, parent, child, first_parent, getalias) ui.write(('%d -> %d [ %s ] \n') % (parentrev, revision, (hooked or ' '))) first_parent = False ui.write(("}\n"))
def walk(base, exclude_pattern, start_revision, mode, archive_base, project): """walk all revisions contained in the repository""" repo = hg.repository(ui.ui(), base) c = repo[None] if c.modified() or c.added() or c.removed(): raise util.Abort(_("uncommitted local changes")) pats = () # last_revision = get_last_revision(url) # if(start_revision and last_revision == 0): last_revision = start_revision # else: # last_revision += 1 opts = {'rev': [str(last_revision) + ':'], 'date': '', 'user': ''} def create_metrics(in_file_names): # create the metrics from the current revision context = {} context['base'] = base context['in_file_names'] = in_file_names context['include_metrics'] = [ ('sloc', 'SLOCMetric'), ('mccabe', 'McCabeMetric')] context['quiet'] = True context['verbose'] = False # context['format'] = 'CSV' res = metrics.process(context) return metrics.format(res, 'CSV') def process_revision(revision): # change to revision and create metrics print 'Processing revision : %s' % revision # change repository to revision hg.clean(repo, revision) # collect files to process exclude = re.compile('|'.join([translate(ep) for ep in exclude_pattern])) files = [os.path.relpath(os.path.join(dp, name), base) for (dp, dn, fn) in os.walk(base) for name in fn if not exclude.match(os.path.relpath(os.path.join(dp, name), base))] print 'Number of files to process : %d' % len(files) return create_metrics(files) #post_metrics(revision_metrics, revision, url) def write_archive(archive_name, data): """write the metrics to archive.""" tar = tarfile.open(archive_name, 'w:gz') # create a file record output = StringIO.StringIO(data) info = tar.tarinfo() info.name = 'metrics.txt' # info.uname = 'pat' # info.gname = 'users' info.size = output.len # add the file to the tar and close it tar.addfile(info, output) tar.close() # from commands.py log: matchfn = scmutil.match(repo[None], pats, opts) #limit = cmdutil.loglimit(opts) #count = 0 #endrev = None #if opts.get('copies') and opts.get('rev'): # endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1 df = False if opts["date"]: df = util.matchdate(opts["date"]) def prep(ctx, fns): rev = ctx.rev() parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] if opts.get('no_merges') and len(parents) == 2: return if opts.get('only_merges') and len(parents) != 2: return if opts.get('only_branch') and ctx.branch() not in opts['only_branch']: return if df and not df(ctx.date()[0]): return if opts['user'] and not [k for k in opts['user'] if k in ctx.user()]: return if opts.get('keyword'): for k in [kw.lower() for kw in opts['keyword']]: if (k in ctx.user().lower() or k in ctx.description().lower() or k in " ".join(ctx.files()).lower()): break else: return revisions = [] rev_last_date = -1 for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): rev_date = util.datestr(ctx.date(), '%Y%m%d') if(mode == 'daily' and rev_last_date == rev_date): #print '%s Skipped rev %s' % (rev_date, ctx.rev()) pass else: result = process_revision(ctx.rev()) datestamp = util.datestr(ctx.date(), '%y%m%d%H') if not os.path.exists(os.path.join(archive_base, datestamp)): # create the directories os.makedirs(os.path.join(archive_base, datestamp)) archive_name = os.path.join(archive_base, datestamp, '%s-%s-metrics.tgz' % (project, datestamp)) write_archive(archive_name, result) rev_last_date = rev_date
def graphviz(ui, repo, *limits, **opts): '''generate DOT language source to visualize changeset tree. "limit-spec" should be formatted in one of styles shown below: 1. Start,End 2. Start, 3. ,End With "revision" option, "Start" and "End" should be ones which are acceptable for "-r" option of other Mercurial commands. With "datetime" option, they should be in dateTime format of XML Schema, which is known as "YYYY-MM-DDThh:mm:ss". With "interval" option, they should be specified as interval from invocation time(= "now") in second. Both "Start" and "End" are treated as "inclusive". ''' ################ def between_datetime(node, lower_dt, upper_dt): date = datetimestr(repo.changelog.read(node)[2]) return ((lower_dt <= date) and (date <= upper_dt)) def accept_by_dt(lower_dt, upper_dt): if (not dtregexp.match(lower_dt)): raise util.Abort(_('illegal datetime format: %s') % (lower_dt)) if (not dtregexp.match(upper_dt)): raise util.Abort(_('illegal datetime format: %s') % (upper_dt)) ui.note(('# visualize revision between "%s" - "%s"\n') % (lower_dt, upper_dt)) return lambda rev, node: between_datetime(node, lower_dt, upper_dt) def accept_by_interval(now, lower_interval, upper_interval): lower_dt = datetimestr((now - lower_interval, None)) upper_dt = datetimestr((now - upper_interval, None)) ui.note(('# visualize revision between "%s" - "%s"\n') % (lower_dt, upper_dt)) return lambda rev, node: between_datetime(node, lower_dt, upper_dt) def accept_by_rev(lower_rev, upper_rev): ui.note(('# visualize revision between "%d" - "%d"\n') % (lower_rev, upper_rev)) return lambda rev, node: ((lower_rev <= rev) and (rev <= upper_rev)) def getrev(revname): return repo.changelog.rev(repo.lookup(revname)) ui.note('# %s\n' % opts) ################ # check limit type: types = 0 types += (opts['revision'] and 1 or 0) types += (opts['datetime'] and 1 or 0) types += (opts['interval'] and 1 or 0) if 1 < types: raise util.Abort(_('-r, -d and -i are mutually exclusive')) ################ # check urlbase options: if opts['urlbase']: ui.note(('# "-u %s" is specified\n') % (opts['urlbase'])) urlbase = opts['urlbase'] or '' ################ # check aliases option: aliases = {} # (key, val) => (username, alias) getalias = lambda name: aliases.get(name, name) if opts['aliases']: ui.note(('# "-a %s" is specified\n') % opts['aliases']) if os.path.isfile(opts['aliases']): f = open(opts['aliases'] ,"r") for line in f.readlines(): line = line.strip() if '#' != line[0]: alias, actual = line.split('=') aliases[actual] = alias ui.note(('# use "%s" as alias for "%s"\n') % (alias, actual)) f.close() else: ui.warn(('%s: file not found, so skip reading aliases\n') % (opts['aliases'])) ################ # check format: ui.note(('# "%s" is recognized as label format\n') % (opts['format_label'])) ui.note(('# "%s" is recognized as tooltip format\n') % (opts['format_tooltip'])) ################ # check attribute renderring hook: node_hook = lambda repo, rev, node, changes, alias: None if opts['node_attr_hook']: hookname = opts['node_attr_hook'] ui.note(('# try to use "%s" as node attribute hook\n') % (escapeNL(hookname))) node_hook = get_hook(hookname) edge_hook = lambda repo, parent, child, first_parent, alias: None # (parentrev, parentnode, parentchanges) = parent # (childrev, childnode, childchanges) = child if opts['edge_attr_hook']: hookname = opts['edge_attr_hook'] ui.note(('# try to use "%s" as edge attribute hook\n') % (escapeNL(hookname))) edge_hook = get_hook(hookname) cluster_hook = lambda repo, branch, date: None if opts['cluster_attr_hook']: hookname = opts['cluster_attr_hook'] ui.note(('# try to use "%s" as cluster attribute hook\n') % (escapeNL(hookname))) cluster_hook = get_hook(hookname) ################ # check limit specificaitons: accepts = [] # list of "lambda: rev, node:" def acceptable(rev, node): for accept in accepts: if accept(rev, node): return True return False if (0 == len(limits)): # all revisions will be visualized accepts.append(lambda rev, node: True) now = long(time.time()) for limit in limits: fields = limit.split(',') if ((2 != len(fields)) or ((fields[0] == "") and (fields[1] == ""))): raise util.Abort(_('invalid limit specification: "%s"') % (limit)) if opts['datetime']: accepts.append(accept_by_dt(getval(fields[0], "0001-01-01T00:00:00"), getval(fields[1], "9999-12-31T23:59:59"))) elif opts['interval']: accepts.append(accept_by_interval(now, int(getval(fields[0], now)), int(getval(fields[1], '0')))) else: # default type is revision accepts.append(accept_by_rev(getrev(getval(fields[0], "1")), getrev(getval(fields[1], "-1")))) ################ # gather target revisions: revmap = {} # (key, val) => (rev, (node, changes)) get = util.cachefunc(lambda r: repo.changectx(r).changeset()) changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats=None, change=get, opts={ 'rev': [] }) for st, rev, b in changeiter: if (st == 'iter'): node = repo.lookup(rev); if acceptable(rev, node): revmap[rev] = (node, repo.changelog.read(node)) ################ # render source code for DOT language: ui.write(("digraph {\n")) branches = { } branchindex = 0 group_by_branch = opts['group_by_branch'] group_by_date = opts['group_by_date'] for revision, (node, changes) in revmap.items(): branch = (group_by_branch and changes[5].get("branch") or 'default') if not branches.has_key(branch): branches[branch] = ({ }, branchindex) branchindex += 1 branchcluster, index = branches[branch] date = (group_by_date and datestr(changes[2]) or '') datecluster = branchcluster.get(date) if not datecluster: datecluster = [ ] branchcluster[date] = datecluster datecluster.append(revision) for branch, (branchcluster, index) in branches.items(): if branch != 'default': ui.write(('subgraph cluster_%d {\n') % (index)) for date, revs in branchcluster.items(): if '' != date: ui.write(('subgraph cluster_%s_%s \n{') % (index, date)) hooked = cluster_hook(repo, branch, date) or '' ui.write(('%s\n') % (hooked)) for revision in revs: node, changes = revmap[revision] keywordmap = { 'u': getalias(changes[1]), 'U': changes[1], 'r': str(revision), 's': short(node), 'h': hex(node), 'd': datetimestr(changes[2]), 't': get_1stline(changes[4]) } attr = [] attr.append(('URL = "%s/rev/%s"') % (urlbase, short(node))) attr.append(('label = "%s"') % (escapeDQ(opts['format_label'] % keywordmap))) attr.append(('tooltip = "%s"') % (escapeDQ(opts['format_tooltip'] % keywordmap))) hooked = node_hook(repo, revision, node, changes, getalias) if hooked: attr.append(hooked) ui.write(('%d [ %s ]\n') % (revision, string.join(attr, ', '))) if '' != date: ui.write(('}\n')) if branch != 'default': hooked = cluster_hook(repo, branch, '') or '' ui.write(('%s\n') % (hooked)) ui.write(('}\n')) if opts['show_tags']: tagindex = 0 for tag, node in repo.tagslist(): revision = repo.changelog.rev(node) if revmap.has_key(revision): ui.write(('tag_%d [ label="%s" shape=plaintext ]\n' % (tagindex, escapeDQ(tag)))) ui.write(('tag_%d -> %s [ arrowhead=none, arrowtail=dot ]' % (tagindex, revision))) tagindex += 1 for revision, (node, changes) in revmap.items(): child = (revision, node, changes) first_parent = True for parentnode in repo.changelog.parents(node): parentrev = repo.changelog.rev(parentnode) if revmap.has_key(parentrev): pnode, pchanges = revmap[parentrev] parent = (parentrev, pnode, pchanges) hooked = edge_hook(repo, parent, child, first_parent, getalias) ui.write(('%d -> %d [ %s ] \n') % (parentrev, revision, (hooked or ' '))) first_parent = False ui.write(("}\n"))
if len(changedFiles) == 0: ui.write(" (none)\n") else: for changedFile in changedFiles: ui.write(" %s\n" % changedFile) # Expand files out to their current full paths matchfn = scmutil.match(repo[None], changedFiles, default='relglob') exactFiles = repo.walk(matchfn) if len(exactFiles) == 0: return matchfn = scmutil.match(repo[None], exactFiles, default='path') left = opts['limit'] opts['rev'] = None for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, lambda a, b: None): if left == 0: break left -= 1 yield repo[ctx.rev()] fileRe = re.compile(r"^\+\+\+ (?:b/)?([^\s]*)", re.MULTILINE) suckerRe = re.compile(r"[^s-]r=(\w+)") supersuckerRe = re.compile(r"sr=(\w+)") @command('reviewers', [('f', 'file', [], 'see reviewers for FILE', 'FILE'), ('r', 'rev', [], 'see reviewers for revisions', 'REVS'), ('l', 'limit', 200, 'how many revisions back to scan', 'LIMIT'),
def show_hunk(ui, repo, *pats, **opts): matchfn = scmutil.match(repo[None], pats, opts) limit = cmdutil.loglimit(opts) count = 0 getrenamed, endrev = None, None # if opts.get('copies'): # if opts.get('rev'): # endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1 # getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) df = False # if opts["date"]: # df = util.matchdate(opts["date"]) branches = opts.get('branch', []) + opts.get('only_branch', []) opts['branch'] = [repo.lookupbranch(b) for b in branches] patch = scmutil.matchall(repo) line = opts['line'] displayer = changeset_printer(ui, repo, patch, opts, False, line) def prep(ctx, fns): rev = ctx.rev() parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev] if opts.get('no_merges') and len(parents) == 2: return if opts.get('only_merges') and len(parents) != 2: return if opts.get('branch') and ctx.branch() not in opts['branch']: return if not opts.get('hidden') and ctx.hidden(): return if df and not df(ctx.date()[0]): return lower = encoding.lower if opts.get('user'): luser = lower(ctx.user()) for k in [lower(x) for x in opts['user']]: if (k in luser): break else: return if opts.get('keyword'): luser = lower(ctx.user()) ldesc = lower(ctx.description()) lfiles = lower(" ".join(ctx.files())) for k in [lower(x) for x in opts['keyword']]: if (k in luser or k in ldesc or k in lfiles): break else: return copies = None if getrenamed is not None and rev: copies = [] for fn in ctx.files(): rename = getrenamed(fn, rev) if rename: copies.append((fn, rename[0])) revmatchfn = None if opts.get('patch') or opts.get('stat'): if opts.get('follow') or opts.get('follow_first'): # note: this might be wrong when following through merges revmatchfn = scmutil.match(repo[None], fns, default='path') else: revmatchfn = matchfn displayer.show(ctx, copies=copies, matchfn=revmatchfn) for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): if count == limit: break if displayer.flush(ctx.rev()): count += 1 displayer.close()