def pushlog_html(web): """WebCommand for producing the HTML view of the pushlog.""" req = web.req query = pushlog_setup(web.repo, req) data = { b'changenav': templateutil.mappinggenerator(pushlog_changenav, args=(query, )), b'rev': 0, b'entries': templateutil.mappinggenerator(pushlog_changelist, args=(web, query, False)), b'latestentry': templateutil.mappinggenerator(pushlog_changelist, args=(web, query, True)), b'startdate': req.qsparams.get(b'startdate', b'1 week ago'), b'enddate': req.qsparams.get(b'enddate', b'now'), b'querydescription': query.description(), b'archives': web.archivelist(b"tip"), } return web.sendtemplate(b'pushlog', **pycompat.strkwargs(data))
def exchangepush(orig, repo, remote, force=False, revs=None, newbranch=False, bookmarks=(), opargs=None, **kwargs): if isinstance(remote, gitrepo): pushop = exchange.pushoperation( repo, remote, force, revs, newbranch, bookmarks, **pycompat.strkwargs(opargs or {}), ) pushop.cgresult = repo.githandler.push(remote.path, revs, bookmarks, force) return pushop else: return orig( repo, remote, force, revs, newbranch, bookmarks=bookmarks, opargs=None, **kwargs, )
def changesetentry(orig, web, ctx): """Wraps webutil.changesetentry to provide extra metadata.""" d = orig(web, ctx) d = pycompat.byteskwargs(d) addmetadata(web.repo, ctx, d) return pycompat.strkwargs(d)
def _pull(orig, ui, repo, source="default", **opts): opts = pycompat.byteskwargs(opts) # Copy paste from `pull` command source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) scratchbookmarks = {} unfi = repo.unfiltered() unknownnodes = [] for rev in opts.get('rev', []): if rev not in unfi: unknownnodes.append(rev) if opts.get('bookmark'): bookmarks = [] revs = opts.get('rev') or [] for bookmark in opts.get('bookmark'): if _scratchbranchmatcher(bookmark): # rev is not known yet # it will be fetched with listkeyspatterns next scratchbookmarks[bookmark] = 'REVTOFETCH' else: bookmarks.append(bookmark) if scratchbookmarks: other = hg.peer(repo, opts, source) fetchedbookmarks = other.listkeyspatterns( 'bookmarks', patterns=scratchbookmarks) for bookmark in scratchbookmarks: if bookmark not in fetchedbookmarks: raise error.Abort('remote bookmark %s not found!' % bookmark) scratchbookmarks[bookmark] = fetchedbookmarks[bookmark] revs.append(fetchedbookmarks[bookmark]) opts['bookmark'] = bookmarks opts['rev'] = revs if scratchbookmarks or unknownnodes: # Set anyincoming to True extensions.wrapfunction(discovery, 'findcommonincoming', _findcommonincoming) try: # Remote scratch bookmarks will be deleted because remotenames doesn't # know about them. Let's save it before pull and restore after remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, source) result = orig(ui, repo, source, **pycompat.strkwargs(opts)) # TODO(stash): race condition is possible # if scratch bookmarks was updated right after orig. # But that's unlikely and shouldn't be harmful. if common.isremotebooksenabled(ui): remotescratchbookmarks.update(scratchbookmarks) _saveremotebookmarks(repo, remotescratchbookmarks, source) else: _savelocalbookmarks(repo, scratchbookmarks) return result finally: if scratchbookmarks: extensions.unwrapfunction(discovery, 'findcommonincoming')
def _push(orig, ui, repo, *dests, **opts): opts = pycompat.byteskwargs(opts) bookmark = opts.get(b'bookmark') # we only support pushing one infinitepush bookmark at once if len(bookmark) == 1: bookmark = bookmark[0] else: bookmark = b'' oldphasemove = None overrides = {(experimental, configbookmark): bookmark} with ui.configoverride(overrides, b'infinitepush'): scratchpush = opts.get(b'bundle_store') if _scratchbranchmatcher(bookmark): scratchpush = True # bundle2 can be sent back after push (for example, bundle2 # containing `pushkey` part to update bookmarks) ui.setconfig(experimental, b'bundle2.pushback', True) if scratchpush: # this is an infinitepush, we don't want the bookmark to be applied # rather that should be stored in the bundlestore opts[b'bookmark'] = [] ui.setconfig(experimental, configscratchpush, True) oldphasemove = extensions.wrapfunction( exchange, b'_localphasemove', _phasemove ) paths = list(urlutil.get_push_paths(repo, ui, dests)) if len(paths) > 1: msg = _(b'cannot push to multiple path with infinitepush') raise error.Abort(msg) path = paths[0] destpath = path.pushloc or path.loc # Remote scratch bookmarks will be deleted because remotenames doesn't # know about them. Let's save it before push and restore after remotescratchbookmarks = _readscratchremotebookmarks(ui, repo, destpath) result = orig(ui, repo, *dests, **pycompat.strkwargs(opts)) if common.isremotebooksenabled(ui): if bookmark and scratchpush: other = hg.peer(repo, opts, destpath) try: fetchedbookmarks = other.listkeyspatterns( b'bookmarks', patterns=[bookmark] ) remotescratchbookmarks.update(fetchedbookmarks) finally: other.close() _saveremotebookmarks(repo, remotescratchbookmarks, destpath) if oldphasemove: exchange._localphasemove = oldphasemove return result
def _push(orig, ui, repo, dest=None, *args, **opts): opts = pycompat.byteskwargs(opts) bookmark = opts.get(b'bookmark') # we only support pushing one infinitepush bookmark at once if len(bookmark) == 1: bookmark = bookmark[0] else: bookmark = b'' oldphasemove = None overrides = {(experimental, configbookmark): bookmark} with ui.configoverride(overrides, b'infinitepush'): scratchpush = opts.get(b'bundle_store') if _scratchbranchmatcher(bookmark): scratchpush = True # bundle2 can be sent back after push (for example, bundle2 # containing `pushkey` part to update bookmarks) ui.setconfig(experimental, b'bundle2.pushback', True) if scratchpush: # this is an infinitepush, we don't want the bookmark to be applied # rather that should be stored in the bundlestore opts[b'bookmark'] = [] ui.setconfig(experimental, configscratchpush, True) oldphasemove = extensions.wrapfunction(exchange, b'_localphasemove', _phasemove) # Copy-paste from `push` command path = ui.paths.getpath(dest, default=(b'default-push', b'default')) if not path: raise error.Abort( _(b'default repository not configured!'), hint=_(b"see 'hg help config.paths'"), ) destpath = path.pushloc or path.loc # Remote scratch bookmarks will be deleted because remotenames doesn't # know about them. Let's save it before push and restore after remotescratchbookmarks = _readscratchremotebookmarks( ui, repo, destpath) result = orig(ui, repo, dest, *args, **pycompat.strkwargs(opts)) if common.isremotebooksenabled(ui): if bookmark and scratchpush: other = hg.peer(repo, opts, destpath) fetchedbookmarks = other.listkeyspatterns(b'bookmarks', patterns=[bookmark]) remotescratchbookmarks.update(fetchedbookmarks) _saveremotebookmarks(repo, remotescratchbookmarks, destpath) if oldphasemove: exchange._localphasemove = oldphasemove return result
def dosplit(ui, repo, tr, ctx, opts): committed = [] # [ctx] # Set working parent to ctx.p1(), and keep working copy as ctx's content if ctx.node() != repo.dirstate.p1(): hg.clean(repo, ctx.node(), show_stats=False) with repo.dirstate.parentchange(): scmutil.movedirstate(repo, ctx.p1()) # Any modified, added, removed, deleted result means split is incomplete def incomplete(repo): st = repo.status() return any((st.modified, st.added, st.removed, st.deleted)) # Main split loop while incomplete(repo): if committed: header = _(b'HG: Splitting %s. So far it has been split into:\n' ) % short(ctx.node()) # We don't want color codes in the commit message template, so # disable the label() template function while we render it. with ui.configoverride({(b'templatealias', b'label(l,x)'): b"x"}, b'split'): for c in committed: summary = cmdutil.format_changeset_summary(ui, c, b'split') header += _(b'HG: - %s\n') % summary header += _( b'HG: Write commit message for the next split changeset.\n') else: header = _(b'HG: Splitting %s. Write commit message for the ' b'first split changeset.\n') % short(ctx.node()) opts.update({ b'edit': True, b'interactive': True, b'message': header + ctx.description(), }) commands.commit(ui, repo, **pycompat.strkwargs(opts)) newctx = repo[b'.'] committed.append(newctx) if not committed: raise error.InputError(_(b'cannot split an empty revision')) scmutil.cleanupnodes( repo, {ctx.node(): [c.node() for c in committed]}, operation=b'split', fixphase=True, ) return committed[-1]
def pushes(web): """WebCommand to return a data structure containing pushes.""" req = web.req query = pushlog_setup(web.repo, req) data = pushes_worker(query, web.repo, b'full' in req.qsparams) if query.formatversion == 1: template = b'pushes1' elif query.formatversion == 2: template = b'pushes2' else: raise ErrorResponse(500, b'unexpected formatversion') return web.sendtemplate(template, **pycompat.strkwargs(data))
def amend(ui, repo, *pats, **opts): """amend the working copy parent with all or specified outstanding changes Similar to :hg:`commit --amend`, but reuse the commit message without invoking editor, unless ``--edit`` was set. See :hg:`help commit` for more details. """ opts = pycompat.byteskwargs(opts) if len(opts['note']) > 255: raise error.Abort(_("cannot store a note of more than 255 bytes")) with repo.wlock(), repo.lock(): if not opts.get('logfile'): opts['message'] = opts.get('message') or repo['.'].description() opts['amend'] = True return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts))
def amend(ui, repo, *pats, **opts): """amend the working copy parent with all or specified outstanding changes Similar to :hg:`commit --amend`, but reuse the commit message without invoking editor, unless ``--edit`` was set. See :hg:`help commit` for more details. """ opts = pycompat.byteskwargs(opts) cmdutil.checknotesize(ui, opts) with repo.wlock(), repo.lock(): if not opts.get(b'logfile'): opts[b'message'] = opts.get(b'message') or repo[b'.'].description() opts[b'amend'] = True return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts))
def commitfunc(ui, repo, message, match, opts): hasmq = util.safehasattr(repo, 'mq') if hasmq: saved, repo.mq.checkapplied = repo.mq.checkapplied, False overrides = {('phases', 'new-commit'): phases.secret} try: editor_ = False if editor: editor_ = cmdutil.getcommiteditor(editform='shelve.shelve', **pycompat.strkwargs(opts)) with repo.ui.configoverride(overrides): return repo.commit(message, shelveuser, opts.get('date'), match, editor=editor_, extra=extra) finally: if hasmq: repo.mq.checkapplied = saved
def dosplit(ui, repo, tr, ctx, opts): committed = [] # [ctx] # Set working parent to ctx.p1(), and keep working copy as ctx's content # NOTE: if we can have "update without touching working copy" API, the # revert step could be cheaper. hg.clean(repo, ctx.p1().node(), show_stats=False) parents = repo.changelog.parents(ctx.node()) ui.pushbuffer() cmdutil.revert(ui, repo, ctx, parents) ui.popbuffer() # discard "reverting ..." messages # Any modified, added, removed, deleted result means split is incomplete incomplete = lambda repo: any(repo.status()[:4]) # Main split loop while incomplete(repo): if committed: header = (_('HG: Splitting %s. So far it has been split into:\n') % short(ctx.node())) for c in committed: firstline = c.description().split('\n', 1)[0] header += _('HG: - %s: %s\n') % (short(c.node()), firstline) header += _('HG: Write commit message for the next split ' 'changeset.\n') else: header = _('HG: Splitting %s. Write commit message for the ' 'first split changeset.\n') % short(ctx.node()) opts.update({ 'edit': True, 'interactive': True, 'message': header + ctx.description(), }) commands.commit(ui, repo, **pycompat.strkwargs(opts)) newctx = repo['.'] committed.append(newctx) if not committed: raise error.Abort(_('cannot split an empty revision')) scmutil.cleanupnodes(repo, {ctx.node(): [c.node() for c in committed]}, operation='split', fixphase=True) return committed[-1]
def clonenarrowcmd(orig, ui, repo, *args, **opts): """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" opts = pycompat.byteskwargs(opts) wrappedextraprepare = util.nullcontextmanager() opts_narrow = opts['narrow'] if opts_narrow: def pullbundle2extraprepare_widen(orig, pullop, kwargs): # Create narrow spec patterns from clone flags includepats = narrowspec.parsepatterns(opts['include']) excludepats = narrowspec.parsepatterns(opts['exclude']) # If necessary, ask the server to expand the narrowspec. includepats, excludepats = expandpull(pullop, includepats, excludepats) if not includepats and excludepats: # If nothing was included, we assume the user meant to include # everything, except what they asked to exclude. includepats = {'path:.'} pullop.repo.setnarrowpats(includepats, excludepats) # This will populate 'includepats' etc with the values from the # narrowspec we just saved. orig(pullop, kwargs) if opts.get('depth'): kwargs['depth'] = opts['depth'] wrappedextraprepare = extensions.wrappedfunction( exchange, '_pullbundle2extraprepare', pullbundle2extraprepare_widen) def pullnarrow(orig, repo, *args, **kwargs): if opts_narrow: repo.requirements.add(changegroup.NARROW_REQUIREMENT) repo._writerequirements() return orig(repo, *args, **kwargs) wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) with wrappedextraprepare, wrappedpull: return orig(ui, repo, *args, **pycompat.strkwargs(opts))
def dosplit(ui, repo, tr, ctx, opts): committed = [] # [ctx] # Set working parent to ctx.p1(), and keep working copy as ctx's content if ctx.node() != repo.dirstate.p1(): hg.clean(repo, ctx.node(), show_stats=False) with repo.dirstate.parentchange(): scmutil.movedirstate(repo, ctx.p1()) # Any modified, added, removed, deleted result means split is incomplete incomplete = lambda repo: any(repo.status()[:4]) # Main split loop while incomplete(repo): if committed: header = _(b'HG: Splitting %s. So far it has been split into:\n' ) % short(ctx.node()) for c in committed: firstline = c.description().split(b'\n', 1)[0] header += _(b'HG: - %s: %s\n') % (short(c.node()), firstline) header += _( b'HG: Write commit message for the next split changeset.\n') else: header = _(b'HG: Splitting %s. Write commit message for the ' b'first split changeset.\n') % short(ctx.node()) opts.update({ b'edit': True, b'interactive': True, b'message': header + ctx.description(), }) commands.commit(ui, repo, **pycompat.strkwargs(opts)) newctx = repo[b'.'] committed.append(newctx) if not committed: raise error.Abort(_(b'cannot split an empty revision')) scmutil.cleanupnodes( repo, {ctx.node(): [c.node() for c in committed]}, operation=b'split', fixphase=True, ) return committed[-1]
def clonenarrowcmd(orig, ui, repo, *args, **opts): """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" opts = pycompat.byteskwargs(opts) wrappedextraprepare = util.nullcontextmanager() narrowspecfile = opts[b'narrowspec'] if narrowspecfile: filepath = os.path.join(encoding.getcwd(), narrowspecfile) ui.status(_(b"reading narrowspec from '%s'\n") % filepath) try: fdata = util.readfile(filepath) except IOError as inst: raise error.Abort( _(b"cannot read narrowspecs from '%s': %s") % (filepath, encoding.strtolocal(inst.strerror))) includes, excludes, profiles = sparse.parseconfig(ui, fdata, b'narrow') if profiles: raise error.Abort( _(b"cannot specify other files using '%include' in" b" narrowspec")) narrowspec.validatepatterns(includes) narrowspec.validatepatterns(excludes) # narrowspec is passed so we should assume that user wants narrow clone opts[b'narrow'] = True opts[b'include'].extend(includes) opts[b'exclude'].extend(excludes) if opts[b'narrow']: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) if opts.get(b'depth'): kwargs[b'depth'] = opts[b'depth'] wrappedextraprepare = extensions.wrappedfunction( exchange, b'_pullbundle2extraprepare', pullbundle2extraprepare_widen) with wrappedextraprepare: return orig(ui, repo, *args, **pycompat.strkwargs(opts))
def mvcheck(orig, ui, repo, *pats, **opts): """Hook to check for moves at commit time""" opts = pycompat.byteskwargs(opts) renames = None disabled = opts.pop('no_automv', False) if not disabled: threshold = ui.configint('automv', 'similarity') if not 0 <= threshold <= 100: raise error.Abort(_('automv.similarity must be between 0 and 100')) if threshold > 0: match = scmutil.match(repo[None], pats, opts) added, removed = _interestingfiles(repo, match) renames = _findrenames(repo, match, added, removed, threshold / 100.0) with repo.wlock(): if renames is not None: scmutil._markchanges(repo, (), (), renames) return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
def pushlog_feed(web): """WebCommand for producing the ATOM feed of the pushlog.""" req = web.req req.qsparams[b'style'] = b'atom' # Need to reset the templater instance to use the new style. web.tmpl = web.templater(req) query = pushlog_setup(web.repo, req) if query.entries: dt = pycompat.bytestr(isotime(query.entries[0][2])) else: dt = datetime.utcnow().isoformat().split('.', 1)[0] dt = pycompat.bytestr(dt) dt += b'Z' url = req.apppath or b'/' if not url.endswith(b'/'): url += b'/' queryentries = ((pushid, user, date, node) for (pushid, user, date, node) in query.entries if scmutil.isrevsymbol(web.repo, node)) data = { b'urlbase': query.urlbase, b'url': url, b'repo': query.reponame, b'date': dt, b'entries': templateutil.mappinggenerator(feedentrygenerator, args=(queryentries, web.repo, url, query.urlbase)), } web.res.headers[b'Content-Type'] = ATOM_MIMETYPE return web.sendtemplate(b'pushlog', **pycompat.strkwargs(data))
def commit(self, *args, **kwargs): if not self.manualsync: return super(servosyncrepo, self).commit(*args, **kwargs) kwargs = pycompat.byteskwargs(kwargs) # Override some of the commit meta data. msg = self.manualsync_commit[b'desc'] user = self.manualsync_commit[b'user'] # This method has many keyword arguments that mercurial # ocassionally passes positionally, meanig they end up # in *args, instead of **kwargs. This can be problematic as # naively modifying the value in **kwargs will result in # the argument being passed twice, which is an error. # Protect against this by stripping the values out of # *args and **kwargs, passing them positionally ourselves. for key in (b'text', b'user'): if args: args = args[1:] if key in kwargs: del kwargs[key] kwargs[b'extra'] = kwargs[b'extra'] if b'extra' in kwargs else {} kwargs[b'extra'][SOURCE_KEY] = encoding.tolocal(LINEAR_REPO_URL) kwargs[b'extra'][REVISION_KEY] = self.manualsync_commit[b'node'] # TODO: Verify that the file changes being committed are only # under the servo/ directory. ret = super(servosyncrepo, self).commit(msg, user, *args, **pycompat.strkwargs(kwargs)) ctx = repo[ret] if any(not f.startswith(b'servo/') for f in ctx.files()): self.ui.warn( _(b'warning: this commit touches files outside the servo ' b'directory and would be rejected by the server\n')) return ctx
def reportpackmetrics(ui, prefix, *stores): dicts = [s.getmetrics() for s in stores] dict = prefixkeys(sumdicts(*dicts), prefix + '_') ui.log(prefix + "_packsizes", "\n", **pycompat.strkwargs(dict))
def _docreatecmd(ui, repo, pats, opts): wctx = repo[None] parents = wctx.parents() if len(parents) > 1: raise error.Abort(_('cannot shelve while merging')) parent = parents[0] origbranch = wctx.branch() if parent.node() != nodemod.nullid: desc = "changes to: %s" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' if not opts.get('message'): opts['message'] = desc lock = tr = activebookmark = None try: lock = repo.lock() # use an uncommitted transaction to generate the bundle to avoid # pull races. ensure we don't print the abort message to stderr. tr = repo.transaction('commit', report=lambda x: None) interactive = opts.get('interactive', False) includeunknown = (opts.get('unknown', False) and not opts.get('addremove', False)) name = getshelvename(repo, parent, opts) activebookmark = _backupactivebookmark(repo) extra = {} if includeunknown: _includeunknownfiles(repo, pats, opts, extra) if _iswctxonnewbranch(repo) and not _isbareshelve(pats, opts): # In non-bare shelve we don't store newly created branch # at bundled commit repo.dirstate.setbranch(repo['.'].branch()) commitfunc = getcommitfunc(extra, interactive, editor=True) if not interactive: node = cmdutil.commit(ui, repo, commitfunc, pats, opts) else: node = cmdutil.dorecord(ui, repo, commitfunc, None, False, cmdutil.recordfilter, *pats, **pycompat.strkwargs(opts)) if not node: _nothingtoshelvemessaging(ui, repo, pats, opts) return 1 _shelvecreatedcommit(repo, node, name) if ui.formatted(): desc = stringutil.ellipsis(desc, ui.termwidth()) ui.status(_('shelved as %s\n') % name) hg.update(repo, parent.node()) if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts): repo.dirstate.setbranch(origbranch) _finishshelve(repo) finally: _restoreactivebookmark(repo, activebookmark) lockmod.release(tr, lock)
def changesetentry(orig, web, ctx): """Add metadata for an individual changeset in hgweb.""" d = orig(web, ctx) d = pycompat.byteskwargs(d) repo = web.repo db = db_for_repo(repo) if not db: return pycompat.strkwargs(d) releases = release_info_for_changeset(db, repo, ctx) if releases[b'this']: d[b'firefox_releases_here'] = [] d[b'firefox_releases_first'] = [] for config, build in sorted(releases[b'this'].items()): build[b'anchor'] = releasedb.build_anchor(build) # Set links to previous and future releases. if config in releases[b'previous']: build[b'previousnode'] = releases[b'previous'][config][ b'revision'] d[b'firefox_releases_here'].append(build) d[b'firefox_releases_first'].append(build) if releases[b'future']: d.setdefault(b'firefox_releases_first', []) for config, build in sorted(releases[b'future'].items()): build[b'anchor'] = releasedb.build_anchor(build) if build not in d[b'firefox_releases_first']: d[b'firefox_releases_first'].append(build) if releases[b'previous']: d[b'firefox_releases_last'] = [] for config, build in sorted(releases[b'previous'].items()): build[b'anchor'] = releasedb.build_anchor(build) d[b'firefox_releases_last'].append(build) # Used so we don't display "first release with" and "last release without". # We omit displaying in this scenario because we're not confident in the # data and don't want to take chances with inaccurate data. if b'firefox_releases_first' in d and b'firefox_releases_last' in d: d[b'have_first_and_last_firefox_releases'] = True # Do some template fixes # TODO build via a generator if b'firefox_releases_first' in d: d[b'firefox_releases_first'] = templateutil.mappinglist( d[b'firefox_releases_first']) if b'firefox_releases_last' in d: d[b'firefox_releases_last'] = templateutil.mappinglist( d[b'firefox_releases_last']) if b'firefox_releases_here' in d: d[b'firefox_releases_here'] = templateutil.mappinglist( d[b'firefox_releases_here']) return pycompat.strkwargs(d)
def _dosign(ui, repo, *revs, **opts): mygpg = newgpg(ui, **opts) opts = pycompat.byteskwargs(opts) sigver = b"0" sigmessage = b"" date = opts.get(b'date') if date: opts[b'date'] = dateutil.parsedate(date) if revs: nodes = [repo.lookup(n) for n in revs] else: nodes = [ node for node in repo.dirstate.parents() if node != hgnode.nullid ] if len(nodes) > 1: raise error.Abort( _(b'uncommitted merge - please provide a specific revision')) if not nodes: nodes = [repo.changelog.tip()] for n in nodes: hexnode = hgnode.hex(n) ui.write( _(b"signing %d:%s\n") % (repo.changelog.rev(n), hgnode.short(n))) # build data data = node2txt(repo, n, sigver) sig = mygpg.sign(data) if not sig: raise error.Abort(_(b"error while signing")) sig = binascii.b2a_base64(sig) sig = sig.replace(b"\n", b"") sigmessage += b"%s %s %s\n" % (hexnode, sigver, sig) # write it if opts[b'local']: repo.vfs.append(b"localsigs", sigmessage) return if not opts[b"force"]: msigs = match.exact([b'.hgsigs']) if any(repo.status(match=msigs, unknown=True, ignored=True)): raise error.Abort( _(b"working copy of .hgsigs is changed "), hint=_(b"please commit .hgsigs manually"), ) sigsfile = repo.wvfs(b".hgsigs", b"ab") sigsfile.write(sigmessage) sigsfile.close() if b'.hgsigs' not in repo.dirstate: repo[None].add([b".hgsigs"]) if opts[b"no_commit"]: return message = opts[b'message'] if not message: # we don't translate commit messages message = b"\n".join([ b"Added signature for changeset %s" % hgnode.short(n) for n in nodes ]) try: editor = cmdutil.getcommiteditor(editform=b'gpg.sign', **pycompat.strkwargs(opts)) repo.commit(message, opts[b'user'], opts[b'date'], match=msigs, editor=editor) except ValueError as inst: raise error.Abort(pycompat.bytestr(inst))
def _forceprefetch(self, repo, path, fnode, revs, commonlogkwargs): # This next part is super non-obvious, so big comment block time! # # It is possible to get extremely bad performance here when a fairly # common set of circumstances occur when this extension is combined # with a server-side commit rewriting extension like pushrebase. # # First, an engineer creates Commit A and pushes it to the server. # While the server's data structure will have the correct linkrev # for the files touched in Commit A, the client will have the # linkrev of the local commit, which is "invalid" because it's not # an ancestor of the main line of development. # # The client will never download the remotefilelog with the correct # linkrev as long as nobody else touches that file, since the file # data and history hasn't changed since Commit A. # # After a long time (or a short time in a heavily used repo), if the # same engineer returns to change the same file, some commands -- # such as amends of commits with file moves, logs, diffs, etc -- # can trigger this _adjustlinknode code. In those cases, finding # the correct rev can become quite expensive, as the correct # revision is far back in history and we need to walk back through # history to find it. # # In order to improve this situation, we force a prefetch of the # remotefilelog data blob for the file we were called on. We do this # at most once, when we first see a public commit in the history we # are traversing. # # Forcing the prefetch means we will download the remote blob even # if we have the "correct" blob in the local store. Since the union # store checks the remote store first, this means we are much more # likely to get the correct linkrev at this point. # # In rare circumstances (such as the server having a suboptimal # linkrev for our use case), we will fall back to the old slow path. # # We may want to add additional heuristics here in the future if # the slow path is used too much. One promising possibility is using # obsolescence markers to find a more-likely-correct linkrev. logmsg = '' start = time.time() try: repo.fileservice.prefetch([(path, hex(fnode))], force=True) # Now that we've downloaded a new blob from the server, # we need to rebuild the ancestor map to recompute the # linknodes. self._ancestormap = None linknode = self.ancestormap()[fnode][2] # 2 is linknode if self._verifylinknode(revs, linknode): logmsg = 'remotefilelog prefetching succeeded' return linknode logmsg = 'remotefilelog prefetching not found' return None except Exception as e: logmsg = 'remotefilelog prefetching failed (%s)' % e return None finally: elapsed = time.time() - start repo.ui.log('linkrevfixup', logmsg, elapsed=elapsed * 1000, **pycompat.strkwargs(commonlogkwargs))
def getcommiteditor(): editform = cmdutil.mergeeditform(repo[None], b'transplant') return cmdutil.getcommiteditor(editform=editform, **pycompat.strkwargs(opts))
def automationrelevancewebcommand(web): req = web.req if b'node' not in req.qsparams: # TRACKING hg48 if util.versiontuple(n=2) >= (4, 8): return web.sendtemplate(b'error', error=b"missing parameter 'node'") else: return web.sendtemplate( b'error', error={b'error': b"missing parameter 'node'"}) repo = web.repo deletefields = { b'bookmarks', b'branch', b'branches', b'changelogtag', b'child', b'ctx', b'inbranch', b'instabilities', b'obsolete', b'parent', b'succsandmarkers', b'tags', b'whyunstable', } csets = [] # Query an unfiltered repo because sometimes automation wants to run against # changesets that have since become hidden. The response exposes whether the # requested node is visible, so consumers can make intelligent decisions # about what to do if the changeset isn't visible. urepo = repo.unfiltered() revs = list(urepo.revs(b'automationrelevant(%r)', req.qsparams[b'node'])) # The pushlog extensions wraps webutil.commonentry and the way it is called # means pushlog opens a SQLite connection on every call. This is inefficient. # So we pre load and cache data for pushlog entries we care about. cl = urepo.changelog nodes = [cl.node(rev) for rev in revs] with repo.unfiltered().pushlog.cache_data_for_nodes(nodes): for rev in revs: ctx = urepo[rev] entry = webutil.changelistentry(web, ctx) if req.qsparams.get(b'backouts'): backout_node = get_backoutbynode(b'hgmo', repo, ctx) if backout_node is not None: entry[b'backedoutby'] = backout_node # The pushnodes list is redundant with data from other changesets. # The amount of redundant data for pushes containing N>100 # changesets can add up to megabytes in size. try: del entry[b'pushnodes'] except KeyError: pass # Some items in changelistentry are generators, which json.dumps() # can't handle. So we expand them. entrycopy = copy.copy(entry) for k, v in entrycopy.items(): # "files" is a generator that attempts to call a template. # Don't even bother and just repopulate it. if k == b'files': entry[b'files'] = sorted(ctx.files()) elif k == b'allparents': # TRACKING hg48 # generic template keyword args needed (context, mapping) # they are not actually used, so `None, None` is sufficient if util.versiontuple(n=2) >= (4, 8): iterator = v(None, None).itermaps(ctx) else: iterator = v().itermaps(ctx) entry[b'parents'] = [p[b'node'] for p in iterator] del entry[b'allparents'] # These aren't interesting to us, so prune them. The # original impetus for this was because "changelogtag" # isn't part of the json template and adding it is non-trivial. elif k in deletefields: del entry[k] elif isinstance(v, types.GeneratorType): entry[k] = list(v) csets.append(entry) # Advertise whether the requested revision is visible (non-obsolete). if csets: visible = csets[-1][b'node'] in repo else: visible = None data = { b'changesets': templateutil.mappinglist(csets), b'visible': visible, } return web.sendtemplate(b'automationrelevance', **pycompat.strkwargs(data))
def email(ui, repo, *revs, **opts): """send changesets by email By default, diffs are sent in the format generated by :hg:`export`, one per message. The series starts with a "[PATCH 0 of N]" introduction, which describes the series as a whole. Each patch email has a Subject line of "[PATCH M of N] ...", using the first line of the changeset description as the subject text. The message contains two or three parts. First, the changeset description. With the -d/--diffstat option, if the diffstat program is installed, the result of running diffstat on the patch is inserted. Finally, the patch itself, as generated by :hg:`export`. With the -d/--diffstat or --confirm options, you will be presented with a final summary of all messages and asked for confirmation before the messages are sent. By default the patch is included as text in the email body for easy reviewing. Using the -a/--attach option will instead create an attachment for the patch. With -i/--inline an inline attachment will be created. You can include a patch both as text in the email body and as a regular or an inline attachment by combining the -a/--attach or -i/--inline with the --body option. With -B/--bookmark changesets reachable by the given bookmark are selected. With -o/--outgoing, emails will be generated for patches not found in the destination repository (or only those which are ancestors of the specified revisions if any are provided) With -b/--bundle, changesets are selected as for --outgoing, but a single email containing a binary Mercurial bundle as an attachment will be sent. Use the ``patchbomb.bundletype`` config option to control the bundle type as with :hg:`bundle --type`. With -m/--mbox, instead of previewing each patchbomb message in a pager or sending the messages directly, it will create a UNIX mailbox file with the patch emails. This mailbox file can be previewed with any mail user agent which supports UNIX mbox files. With -n/--test, all steps will run, but mail will not be sent. You will be prompted for an email recipient address, a subject and an introductory message describing the patches of your patchbomb. Then when all is done, patchbomb messages are displayed. In case email sending fails, you will find a backup of your series introductory message in ``.hg/last-email.txt``. The default behavior of this command can be customized through configuration. (See :hg:`help patchbomb` for details) Examples:: hg email -r 3000 # send patch 3000 only hg email -r 3000 -r 3001 # send patches 3000 and 3001 hg email -r 3000:3005 # send patches 3000 through 3005 hg email 3000 # send patch 3000 (deprecated) hg email -o # send all patches not in default hg email -o DEST # send all patches not in DEST hg email -o -r 3000 # send all ancestors of 3000 not in default hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST hg email -B feature # send all ancestors of feature bookmark hg email -b # send bundle of all patches not in default hg email -b DEST # send bundle of all patches not in DEST hg email -b -r 3000 # bundle of all ancestors of 3000 not in default hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST hg email -o -m mbox && # generate an mbox file... mutt -R -f mbox # ... and view it with mutt hg email -o -m mbox && # generate an mbox file ... formail -s sendmail \\ # ... and use formail to send from the mbox -bm -t < mbox # ... using sendmail Before using this command, you will need to enable email in your hgrc. See the [email] section in hgrc(5) for details. """ opts = pycompat.byteskwargs(opts) _charsets = mail._charsets(ui) bundle = opts.get(b'bundle') date = opts.get(b'date') mbox = opts.get(b'mbox') outgoing = opts.get(b'outgoing') rev = opts.get(b'rev') bookmark = opts.get(b'bookmark') if not (opts.get(b'test') or mbox): # really sending mail.validateconfig(ui) if not (revs or rev or outgoing or bundle or bookmark): raise error.Abort( _(b'specify at least one changeset with -B, -r or -o')) if outgoing and bundle: raise error.Abort( _(b"--outgoing mode always on with --bundle;" b" do not re-specify --outgoing")) cmdutil.check_at_most_one_arg(opts, b'rev', b'bookmark') if outgoing or bundle: if len(revs) > 1: raise error.Abort(_(b"too many destinations")) if revs: dest = revs[0] else: dest = None revs = [] if rev: if revs: raise error.Abort(_(b'use only one form to specify the revision')) revs = rev elif bookmark: if bookmark not in repo._bookmarks: raise error.Abort(_(b"bookmark '%s' not found") % bookmark) revs = scmutil.bookmarkrevs(repo, bookmark) revs = scmutil.revrange(repo, revs) if outgoing: revs = _getoutgoing(repo, dest, revs) if bundle: opts[b'revs'] = [b"%d" % r for r in revs] # check if revision exist on the public destination publicurl = repo.ui.config(b'patchbomb', b'publicurl') if publicurl: repo.ui.debug(b'checking that revision exist in the public repo\n') try: publicpeer = hg.peer(repo, {}, publicurl) except error.RepoError: repo.ui.write_err( _(b'unable to access public repo: %s\n') % publicurl) raise if not publicpeer.capable(b'known'): repo.ui.debug(b'skipping existence checks: public repo too old\n') else: out = [repo[r] for r in revs] known = publicpeer.known(h.node() for h in out) missing = [] for idx, h in enumerate(out): if not known[idx]: missing.append(h) if missing: if len(missing) > 1: msg = _(b'public "%s" is missing %s and %i others') msg %= (publicurl, missing[0], len(missing) - 1) else: msg = _(b'public url %s is missing %s') msg %= (publicurl, missing[0]) missingrevs = [ctx.rev() for ctx in missing] revhint = b' '.join( b'-r %s' % h for h in repo.set(b'heads(%ld)', missingrevs)) hint = _(b"use 'hg push %s %s'") % (publicurl, revhint) raise error.Abort(msg, hint=hint) # start if date: start_time = dateutil.parsedate(date) else: start_time = dateutil.makedate() def genmsgid(id): return _msgid(id[:20], int(start_time[0])) # deprecated config: patchbomb.from sender = (opts.get(b'from') or ui.config(b'email', b'from') or ui.config(b'patchbomb', b'from') or prompt(ui, b'From', ui.username())) if bundle: stropts = pycompat.strkwargs(opts) bundledata = _getbundle(repo, dest, **stropts) bundleopts = stropts.copy() bundleopts.pop('bundle', None) # already processed msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts) else: msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts)) showaddrs = [] def getaddrs(header, ask=False, default=None): configkey = header.lower() opt = header.replace(b'-', b'_').lower() addrs = opts.get(opt) if addrs: showaddrs.append(b'%s: %s' % (header, b', '.join(addrs))) return mail.addrlistencode(ui, addrs, _charsets, opts.get(b'test')) # not on the command line: fallback to config and then maybe ask addr = ui.config(b'email', configkey) or ui.config( b'patchbomb', configkey) if not addr: specified = ui.hasconfig(b'email', configkey) or ui.hasconfig( b'patchbomb', configkey) if not specified and ask: addr = prompt(ui, header, default=default) if addr: showaddrs.append(b'%s: %s' % (header, addr)) return mail.addrlistencode(ui, [addr], _charsets, opts.get(b'test')) elif default: return mail.addrlistencode(ui, [default], _charsets, opts.get(b'test')) return [] to = getaddrs(b'To', ask=True) if not to: # we can get here in non-interactive mode raise error.Abort(_(b'no recipient addresses provided')) cc = getaddrs(b'Cc', ask=True, default=b'') bcc = getaddrs(b'Bcc') replyto = getaddrs(b'Reply-To') confirm = ui.configbool(b'patchbomb', b'confirm') confirm |= bool(opts.get(b'diffstat') or opts.get(b'confirm')) if confirm: ui.write(_(b'\nFinal summary:\n\n'), label=b'patchbomb.finalsummary') ui.write((b'From: %s\n' % sender), label=b'patchbomb.from') for addr in showaddrs: ui.write(b'%s\n' % addr, label=b'patchbomb.to') for m, subj, ds in msgs: ui.write((b'Subject: %s\n' % subj), label=b'patchbomb.subject') if ds: ui.write(ds, label=b'patchbomb.diffstats') ui.write(b'\n') if ui.promptchoice( _(b'are you sure you want to send (yn)?$$ &Yes $$ &No')): raise error.Abort(_(b'patchbomb canceled')) ui.write(b'\n') parent = opts.get(b'in_reply_to') or None # angle brackets may be omitted, they're not semantically part of the msg-id if parent is not None: parent = encoding.strfromlocal(parent) if not parent.startswith('<'): parent = '<' + parent if not parent.endswith('>'): parent += '>' sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1] sender = mail.addressencode(ui, sender, _charsets, opts.get(b'test')) sendmail = None firstpatch = None progress = ui.makeprogress(_(b'sending'), unit=_(b'emails'), total=len(msgs)) for i, (m, subj, ds) in enumerate(msgs): try: m['Message-Id'] = genmsgid(m['X-Mercurial-Node']) if not firstpatch: firstpatch = m['Message-Id'] m['X-Mercurial-Series-Id'] = firstpatch except TypeError: m['Message-Id'] = genmsgid('patchbomb') if parent: m['In-Reply-To'] = parent m['References'] = parent if not parent or 'X-Mercurial-Node' not in m: parent = m['Message-Id'] m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version().decode() m['Date'] = eutil.formatdate(start_time[0], localtime=True) start_time = (start_time[0] + 1, start_time[1]) m['From'] = sender m['To'] = ', '.join(to) if cc: m['Cc'] = ', '.join(cc) if bcc: m['Bcc'] = ', '.join(bcc) if replyto: m['Reply-To'] = ', '.join(replyto) if opts.get(b'test'): ui.status(_(b'displaying '), subj, b' ...\n') ui.pager(b'email') generator = mail.Generator(ui, mangle_from_=False) try: generator.flatten(m, False) ui.write(b'\n') except IOError as inst: if inst.errno != errno.EPIPE: raise else: if not sendmail: sendmail = mail.connect(ui, mbox=mbox) ui.status(_(b'sending '), subj, b' ...\n') progress.update(i, item=subj) if not mbox: # Exim does not remove the Bcc field del m['Bcc'] fp = stringio() generator = mail.Generator(fp, mangle_from_=False) generator.flatten(m, False) alldests = to + bcc + cc sendmail(sender_addr, alldests, fp.getvalue()) progress.complete()
def fix(ui, repo, *pats, **opts): """rewrite file content in changesets or working directory Runs any configured tools to fix the content of files. Only affects files with changes, unless file arguments are provided. Only affects changed lines of files, unless the --whole flag is used. Some tools may always affect the whole file regardless of --whole. If --working-dir is used, files with uncommitted changes in the working copy will be fixed. Note that no backup are made. If revisions are specified with --source, those revisions and their descendants will be checked, and they may be replaced with new revisions that have fixed file content. By automatically including the descendants, no merging, rebasing, or evolution will be required. If an ancestor of the working copy is included, then the working copy itself will also be fixed, and the working copy will be updated to the fixed parent. When determining what lines of each file to fix at each revision, the whole set of revisions being fixed is considered, so that fixes to earlier revisions are not forgotten in later ones. The --base flag can be used to override this default behavior, though it is not usually desirable to do so. """ opts = pycompat.byteskwargs(opts) cmdutil.check_at_most_one_arg(opts, b'all', b'source', b'rev') cmdutil.check_incompatible_arguments( opts, b'working_dir', [b'all', b'source'] ) with repo.wlock(), repo.lock(), repo.transaction(b'fix'): revstofix = getrevstofix(ui, repo, opts) basectxs = getbasectxs(repo, opts, revstofix) workqueue, numitems = getworkqueue( ui, repo, pats, opts, revstofix, basectxs ) basepaths = getbasepaths(repo, opts, workqueue, basectxs) fixers = getfixers(ui) # Rather than letting each worker independently fetch the files # (which also would add complications for shared/keepalive # connections), prefetch them all first. _prefetchfiles(repo, workqueue, basepaths) # There are no data dependencies between the workers fixing each file # revision, so we can use all available parallelism. def getfixes(items): for rev, path in items: ctx = repo[rev] olddata = ctx[path].data() metadata, newdata = fixfile( ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev] ) # Don't waste memory/time passing unchanged content back, but # produce one result per item either way. yield ( rev, path, metadata, newdata if newdata != olddata else None, ) results = worker.worker( ui, 1.0, getfixes, tuple(), workqueue, threadsafe=False ) # We have to hold on to the data for each successor revision in memory # until all its parents are committed. We ensure this by committing and # freeing memory for the revisions in some topological order. This # leaves a little bit of memory efficiency on the table, but also makes # the tests deterministic. It might also be considered a feature since # it makes the results more easily reproducible. filedata = collections.defaultdict(dict) aggregatemetadata = collections.defaultdict(list) replacements = {} wdirwritten = False commitorder = sorted(revstofix, reverse=True) with ui.makeprogress( topic=_(b'fixing'), unit=_(b'files'), total=sum(numitems.values()) ) as progress: for rev, path, filerevmetadata, newdata in results: progress.increment(item=path) for fixername, fixermetadata in filerevmetadata.items(): aggregatemetadata[fixername].append(fixermetadata) if newdata is not None: filedata[rev][path] = newdata hookargs = { b'rev': rev, b'path': path, b'metadata': filerevmetadata, } repo.hook( b'postfixfile', throw=False, **pycompat.strkwargs(hookargs) ) numitems[rev] -= 1 # Apply the fixes for this and any other revisions that are # ready and sitting at the front of the queue. Using a loop here # prevents the queue from being blocked by the first revision to # be ready out of order. while commitorder and not numitems[commitorder[-1]]: rev = commitorder.pop() ctx = repo[rev] if rev == wdirrev: writeworkingdir(repo, ctx, filedata[rev], replacements) wdirwritten = bool(filedata[rev]) else: replacerev(ui, repo, ctx, filedata[rev], replacements) del filedata[rev] cleanup(repo, replacements, wdirwritten) hookargs = { b'replacements': replacements, b'wdirwritten': wdirwritten, b'metadata': aggregatemetadata, } repo.hook(b'postfix', throw=True, **pycompat.strkwargs(hookargs))