def partialcommit(orig, ui, repo, *pats, **opts): patchfilename = opts.get('partials', None) if patchfilename: # attach a patch.filestore to this repo prior to calling commit() # the wrapped workingfilectx methods will see this filestore and use # the patched file data rather than the working copy data (for only the # files modified by the patch) fp = open(patchfilename, 'rb') store = patch.filestore() try: try: # patch files in tmp directory patch.patchrepo(ui, repo, repo['.'], store, fp, 1, None) store.keys = set(store.files.keys() + store.data.keys()) repo._filestore = store except patch.PatchError, e: raise util.Abort(str(e)) finally: fp.close() try: ret = orig(ui, repo, *pats, **opts) if hasattr(repo, '_filestore'): store.close() wlock = repo.wlock() try: # mark partially committed files for 'needing lookup' in # the dirstate. The next status call will find them as M for f in store.keys: repo.dirstate.normallookup(f) finally: wlock.release() return ret finally: if patchfilename: os.unlink(patchfilename)
def need_backup(self): '''Compare backup of committed changes to workspace''' if os.path.exists(self.bu.backupfile('nodes')): f = None try: try: f = open(self.bu.backupfile('nodes')) bnodes = set( [line.rstrip('\r\n') for line in f.readlines()]) f.close() except EnvironmentError, e: raise util.Abort("couldn't open backup node list: %s" % e) finally: if f and not f.closed: f.close() else: bnodes = set() outnodes = set(self._outgoing_nodes(self.ws.parent())) if outnodes != bnodes: return True return False
def pullrebase(orig, ui, repo, *args, **opts): 'Call rebase after pull if the latter has been invoked with --rebase' if opts.get('rebase'): if opts.get('update'): del opts['update'] ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') movemarkfrom = repo['.'].node() cmdutil.bailifchanged(repo) revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): pass commands.postincoming = _dummy try: orig(ui, repo, *args, **opts) finally: commands.postincoming = origpostincoming revspostpull = len(repo) if revspostpull > revsprepull: rebase(ui, repo, **opts) branch = repo[None].branch() dest = repo[branch].rev() if dest != repo['.'].rev(): # there was nothing to rebase we force an update hg.update(repo, dest) if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): ui.status( _("updating bookmark %s\n") % repo._bookmarkcurrent) else: if opts.get('tool'): raise util.Abort(_('--tool can only be used with --rebase')) orig(ui, repo, *args, **opts)
def revset_me(repo, subset, x): """``me()`` Changesets that you are involved in. """ if x: raise ParseError(_('me() does not take any arguments')) me = repo.ui.config('ui', 'username') if not me: raise util.Abort(_('"[ui] username" must be set to use me()')) ircnick = get_ircnick(repo.ui) n = encoding.lower(me) kind, pattern, matcher = revset._substringmatcher(n) def fltr(x): ctx = repo[x] if matcher(encoding.lower(ctx.user())): return True return ircnick in parse_reviewers(ctx.description()) return subset.filter(fltr)
def check_format(ui, repo, hooktype, node, parent1, parent2, **kwargs): if hooktype != 'pretxncommit': raise AttributeError, \ "This hook is only meant for pretxncommit, not %s" % hooktype modified, added, removed, deleted, unknown, ignore, clean = repo.status() verbose = 0 stats = ValidationStats() for f in modified + added: validate(f, stats, verbose, None) if stats: stats.dump() result = ui.prompt("invalid formatting\n(i)gnore or (a)bort?", "^[ia]$", "a") if result.startswith('i'): pass elif result.startswith('a'): return True else: raise util.Abort(_("Invalid response: '%s'") % result) return False
def prompt(query): if resp_all[0] is not None: return resp_all[0] if resp_file[0] is not None: return resp_file[0] while True: resps = _('[Ynsfdaq?]') choices = (_('&Yes, shelve this change'), _('&No, skip this change'), _('&Skip remaining changes to this file'), _('Shelve remaining changes to this &file'), _('&Done, skip remaining changes and files'), _('Shelve &all changes to all remaining files'), _('&Quit, shelving no changes'), _('&?')) r = ui.promptchoice("%s %s " % (query, resps), choices) if r == 7: c = shelve.__doc__.find('y - shelve this change') for l in shelve.__doc__[c:].splitlines(): if l: ui.write(_(l.strip()) + '\n') continue elif r == 0: # yes ret = 'y' elif r == 1: # no ret = 'n' elif r == 2: # Skip ret = resp_file[0] = 'n' elif r == 3: # file (shelve remaining) ret = resp_file[0] = 'y' elif r == 4: # done, skip remaining ret = resp_all[0] = 'n' elif r == 5: # all ret = resp_all[0] = 'y' elif r == 6: # quit raise util.Abort(_('user quit')) return ret
def message(ui, repo, ctx, ha, opts): oldctx = repo[ha] hg.update(repo, ctx.node()) fd, patchfile = tempfile.mkstemp(prefix='hg-histedit-') fp = os.fdopen(fd, 'w') diffopts = patch.diffopts(ui, opts) diffopts.git = True diffopts.ignorews = False diffopts.ignorewsamount = False diffopts.ignoreblanklines = False gen = patch.diff(repo, oldctx.parents()[0].node(), ha, opts=diffopts) for chunk in gen: fp.write(chunk) fp.close() try: files = set() try: applypatch(ui, repo, patchfile, files=files, eolmode=None) finally: os.unlink(patchfile) except Exception, inst: raise util.Abort( _('Fix up the change and run ' 'hg histedit --continue'))
def load(cls, repo): fp = repo.opener(cls._filename) try: version = int(fp.readline().strip()) if version != cls._version: raise util.Abort(_('this version of shelve is incompatible ' 'with the version used in this repo')) name = fp.readline().strip() wctx = fp.readline().strip() pendingctx = fp.readline().strip() parents = [bin(h) for h in fp.readline().split()] stripnodes = [bin(h) for h in fp.readline().split()] finally: fp.close() obj = cls() obj.name = name obj.wctx = repo[bin(wctx)] obj.pendingctx = repo[bin(pendingctx)] obj.parents = parents obj.stripnodes = stripnodes return obj
def parselog(self, fp): parents = [] message = [] node = revlog.nullid inmsg = False user = None date = None for line in fp.read().splitlines(): if inmsg: message.append(line) elif line.startswith('# User '): user = line[7:] elif line.startswith('# Date '): date = line[7:] elif line.startswith('# Node ID '): node = revlog.bin(line[10:]) elif line.startswith('# Parent '): parents.append(revlog.bin(line[9:])) elif not line.startswith('# '): inmsg = True message.append(line) if None in (user, date): raise util.Abort(_("filter corrupted changeset (no user or date)")) return (node, user, date, '\n'.join(message), parents)
def getreviewcaps(remote): """Obtain a set of review capabilities from the server. Returns empty set if no capabilities are defined (and the server presumably isn't a review repo). As a side effect, this function also validates that the client fulfills the advertised minimum requirements set by the server and aborts if not. """ requires = remote.capable('mozreviewrequires') if isinstance(requires, str): requires = set(requires.split(',')) if requires - clientcapabilities: raise util.Abort( _('reviewboard client extension is too old to speak to this ' 'server'), hint=_('upgrade your extension by running `hg -R %s pull -u`') % os.path.normpath(os.path.join(OUR_DIR, '..', '..'))) caps = remote.capable('mozreview') if isinstance(caps, bool): caps = '' return set(caps.split(','))
def usercachepath(ui, hash): path = ui.configpath(longname, 'usercache', None) if path: path = os.path.join(path, hash) else: if os.name == 'nt': appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA')) if appdata: path = os.path.join(appdata, longname, hash) elif platform.system() == 'Darwin': home = os.getenv('HOME') if home: path = os.path.join(home, 'Library', 'Caches', longname, hash) elif os.name == 'posix': path = os.getenv('XDG_CACHE_HOME') if path: path = os.path.join(path, longname, hash) else: home = os.getenv('HOME') if home: path = os.path.join(home, '.cache', longname, hash) else: raise util.Abort(_('unknown operating system: %s\n') % os.name) return path
def _walkstreamfiles(orig, repo): if state.shallowremote: # if we are shallow ourselves, stream our local commits if shallowrepo.requirement in repo.requirements: striplen = len(repo.store.path) + 1 readdir = repo.store.rawvfs.readdir visit = [os.path.join(repo.store.path, 'data')] while visit: p = visit.pop() for f, kind, st in readdir(p, stat=True): fp = p + '/' + f if kind == stat.S_IFREG: if not fp.endswith('.i') and not fp.endswith('.d'): n = util.pconvert(fp[striplen:]) yield (store.decodedir(n), n, st.st_size) if kind == stat.S_IFDIR: visit.append(fp) # Return .d and .i files that do not match the shallow pattern match = state.match or match.always(repo.root, '') for (u, e, s) in repo.store.datafiles(): f = u[5:-2] # trim data/... and .i/.d if not state.match(f): yield (u, e, s) for x in repo.store.topfiles(): yield x elif shallowrepo.requirement in repo.requirements: # don't allow cloning from a shallow repo to a full repo # since it would require fetching every version of every # file in order to create the revlogs. raise util.Abort( _("Cannot clone from a shallow repo " + "to a full repo.")) else: for x in orig(repo): yield x
def sslkwargs(ui, host): kws = { 'ssl_version': PROTOCOL_TLSv1, } hostfingerprint = ui.config('hostfingerprints', host) if hostfingerprint: return kws cacerts = ui.config('web', 'cacerts') if cacerts: cacerts = util.expandpath(cacerts) if not os.path.exists(cacerts): raise util.Abort(_('could not find web.cacerts: %s') % cacerts) elif cacerts is None and _plainapplepython(): dummycert = os.path.join(os.path.dirname(__file__), 'dummycert.pem') if os.path.exists(dummycert): ui.debug('using %s to enable OS X system CA\n' % dummycert) ui.setconfig('web', 'cacerts', dummycert, 'dummy') cacerts = dummycert if cacerts: kws.update({ 'ca_certs': cacerts, 'cert_reqs': CERT_REQUIRED, }) return kws
def parse_trust_args(params, opts): """ INTERNAL: Helper function to parse --hash and --fmsid. """ if opts.get('hash', []) == []: raise util.Abort("Use --hash to set the USK hash.") if len(opts['hash']) != 1: raise util.Abort("Only one --hash value is allowed.") if not is_hex_string(opts['hash'][0]): raise util.Abort("[%s] doesn't look like a USK hash." % opts['hash'][0]) if opts.get('fmsid', []) == []: raise util.Abort("Use --fmsid to set the FMS id.") if len(opts['fmsid']) != 1: raise util.Abort("Only one --fmsid value is allowed.") if not is_fms_id(opts['fmsid'][0]): raise util.Abort("[%s] doesn't look like an FMS id." % opts['fmsid'][0]) params['FMSREAD_HASH'] = opts['hash'][0] params['FMSREAD_FMSID'] = opts['fmsid'][0]
def mtnstdioreadpacket(self): read = None commandnbr = '' while read != ':': read = self.mtnreadfp.read(1) if not read: raise util.Abort(_('bad mtn packet - no end of commandnbr')) commandnbr += read commandnbr = commandnbr[:-1] stream = self.mtnreadfp.read(1) if stream not in 'mewptl': raise util.Abort(_('bad mtn packet - bad stream type %s') % stream) read = self.mtnreadfp.read(1) if read != ':': raise util.Abort(_('bad mtn packet - no divider before size')) read = None lengthstr = '' while read != ':': read = self.mtnreadfp.read(1) if not read: raise util.Abort(_('bad mtn packet - no end of packet size')) lengthstr += read try: length = long(lengthstr[:-1]) except TypeError: raise util.Abort( _('bad mtn packet - bad packet size %s') % lengthstr) read = self.mtnreadfp.read(length) if len(read) != length: raise util.Abort( _("bad mtn packet - unable to read full packet " "read %s of %s") % (len(read), length)) return (commandnbr, stream, length, read)
def fetch(ui, repo, source='default', **opts): '''pull changes from a remote repository, merge new changes if needed. This finds all changes from the repository at the specified path or URL and adds them to the local repository. If the pulled changes add a new branch head, the head is automatically merged, and the result of the merge is committed. Otherwise, the working directory is updated to include the new changes. When a merge occurs, the newly pulled changes are assumed to be "authoritative". The head of the new changes is used as the first parent, with local changes as the second. To switch the merge order, use --switch-parent. See 'hg help dates' for a list of formats valid for -d/--date. ''' date = opts.get('date') if date: opts['date'] = util.parsedate(date) parent, p2 = repo.dirstate.parents() branch = repo.dirstate.branch() branchnode = repo.branchtags().get(branch) if parent != branchnode: raise util.Abort(_('working dir not at branch tip ' '(use "hg update" to check out branch tip)')) if p2 != nullid: raise util.Abort(_('outstanding uncommitted merge')) wlock = lock = None try: wlock = repo.wlock() lock = repo.lock() mod, add, rem, del_ = repo.status()[:4] if mod or add or rem: raise util.Abort(_('outstanding uncommitted changes')) if del_: raise util.Abort(_('working directory is missing some files')) bheads = repo.branchheads(branch) bheads = [head for head in bheads if len(repo[head].children()) == 0] if len(bheads) > 1: raise util.Abort(_('multiple heads in this branch ' '(use "hg heads ." and "hg merge" to merge)')) other = hg.repository(cmdutil.remoteui(repo, opts), ui.expandpath(source)) ui.status(_('pulling from %s\n') % url.hidepassword(ui.expandpath(source))) revs = None if opts['rev']: try: revs = [other.lookup(rev) for rev in opts['rev']] except error.CapabilityError: err = _("Other repository doesn't support revision lookup, " "so a rev cannot be specified.") raise util.Abort(err) # Are there any changes at all? modheads = repo.pull(other, heads=revs) if modheads == 0: return 0 # Is this a simple fast-forward along the current branch? newheads = repo.branchheads(branch) newheads = [head for head in newheads if len(repo[head].children()) == 0] newchildren = repo.changelog.nodesbetween([parent], newheads)[2] if len(newheads) == 1: if newchildren[0] != parent: return hg.clean(repo, newchildren[0]) else: return # Are there more than one additional branch heads? newchildren = [n for n in newchildren if n != parent] newparent = parent if newchildren: newparent = newchildren[0] hg.clean(repo, newparent) newheads = [n for n in newheads if n != newparent] if len(newheads) > 1: ui.status(_('not merging with %d other new branch heads ' '(use "hg heads ." and "hg merge" to merge them)\n') % (len(newheads) - 1)) return # Otherwise, let's merge. err = False if newheads: # By default, we consider the repository we're pulling # *from* as authoritative, so we merge our changes into # theirs. if opts['switch_parent']: firstparent, secondparent = newparent, newheads[0] else: firstparent, secondparent = newheads[0], newparent ui.status(_('updating to %d:%s\n') % (repo.changelog.rev(firstparent), short(firstparent))) hg.clean(repo, firstparent) ui.status(_('merging with %d:%s\n') % (repo.changelog.rev(secondparent), short(secondparent))) err = hg.merge(repo, secondparent, remind=False) if not err: # we don't translate commit messages message = (cmdutil.logmessage(opts) or ('Automated merge with %s' % url.removeauth(other.url()))) editor = cmdutil.commiteditor if opts.get('force_editor') or opts.get('edit'): editor = cmdutil.commitforceeditor n = repo.commit(message, opts['user'], opts['date'], editor=editor) ui.status(_('new changeset %d:%s merges remote changes ' 'with local\n') % (repo.changelog.rev(n), short(n))) finally: release(lock, wlock)
def collapse(ui, repo, **opts): """collapse multiple revisions into one Collapse combines multiple consecutive changesets into a single changeset, preserving any descendants of the final changeset. The commit messages for the collapsed changesets are concatenated and may be edited before the collapse is completed. """ hg_vsn = re.match(r"[0-9.]+", util.version()).group(0) vsn_list = [int(x) for x in hg_vsn.split(".")] if vsn_list < [2, 0]: raise util.Abort( _('Mercurial version to low (%s), ' 'you need at least 2.0') % hg_vsn) try: from mercurial import scmutil rng = scmutil.revrange(repo, opts['rev']) except ImportError: rng = cmdutil.revrange(repo, opts['rev']) # run collapse in the repository root olddir = os.getcwd() os.chdir(repo.root) try: if opts['movelog']: movelog = open(opts['movelog'], 'a') else: movelog = False if opts['timedelta']: timedelta = float(opts['timedelta']) else: timedelta = float('inf') if not opts['auto']: if not rng: raise util.Abort(_('no revisions specified')) if opts['timedelta']: raise util.Abort(_('-t or --timedelta only valid with --auto')) if opts['userchange']: raise util.Abort( _('-u or --userchange only valid with --auto')) # FIXME add more options that don't work # FIXME FIXME: rework ui to make the distinction between auto # and not unnecessary. Integrate revsets (event disjoint) first = rng[0] last = rng[-1] revs = inbetween(repo, first, last) if not revs: raise util.Abort( _('revision %s is not an ancestor ' 'of revision %s\n') % (first, last)) elif len(revs) == 1: raise util.Abort(_('only one revision specified')) do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts) else: # auto mode if len(rng) == 0: start = 0 elif len(rng) == 1: start = rng[0] else: util.Abort(_('multiple revisions specified with auto mode')) count = 0 while count < 1 or opts['repeat']: if opts['usefirst']: revs = find_first_chunk(ui, repo, start, timedelta, opts) else: revs = find_last_chunk(ui, repo, start, timedelta, opts) if not revs: if count == 0: raise util.Abort(_('no revision chunk found\n')) else: break first = min(revs) last = max(revs) assert len(revs) > 1 do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts) count += 1 finally: os.chdir(olddir)
def do_collapse(ui, repo, first, last, revs, movelog, timedelta, opts): ui.debug(_('Collapsing revisions %s\n') % revs) if opts['debugdelay']: debug_delay = float(opts['debugdelay']) else: debug_delay = False for r in revs: if repo[r].user() != ui.username() and not opts['force']: raise util.Abort( _('revision %s does not belong to %s\n') % (r, ui.username())) if r != last: children = repo[r].children() if len(children) > 1: for c in children: if not c.rev() in revs: raise util.Abort( _('revision %s has child %s not ' 'being collapsed, please rebase\n') % (r, c.rev())) if r != first: parents = repo[r].parents() if len(parents) > 1: for p in parents: if not p.rev() in revs: raise util.Abort( _('revision %s has parent %s not ' 'being collapsed.') % (r, p.rev())) if len(repo[first].parents()) > 1: raise util.Abort( _('start revision %s has multiple parents, ' 'won\'t collapse.') % first) try: cmdutil.bailifchanged(repo) except AttributeError: cmdutil.bail_if_changed(repo) parent = repo[first].parents()[0] tomove = list(repo.changelog.descendants([last])) head_hgtags = get_hgtags_from_heads(ui, repo, last) if '.hgtags' in parent: parent_hgtags = parent['.hgtags'].data() else: parent_hgtags = False movemap = dict.fromkeys(tomove, nullrev) ui.debug(_('will move revisions: %s\n') % tomove) tagsmap = dict() if opts['noop']: ui.status(_('noop: not collapsing\n')) else: origparent = repo['.'].rev() collapsed = None try: branch = repo[last].branch() collapsed = makecollapsed(ui, repo, parent, revs, branch, tagsmap, parent_hgtags, movelog, opts) movemap[max(revs)] = collapsed movedescendants(ui, repo, collapsed, tomove, movemap, tagsmap, parent_hgtags, movelog, debug_delay) fix_hgtags(ui, repo, head_hgtags, tagsmap) except: merge.update(repo, repo[origparent].rev(), False, True, False) if collapsed: repair.strip(ui, repo, collapsed.node(), "strip") raise if not opts['keep']: ui.debug(_('stripping revision %d\n') % first) repair.strip(ui, repo, repo[first].node(), "strip") ui.status(_('collapse completed\n'))
def transplant(ui, repo, *revs, **opts): '''transplant changesets from another branch Selected changesets will be applied on top of the current working directory with the log of the original changeset. If --log is specified, log messages will have a comment appended of the form: (transplanted from CHANGESETHASH) You can rewrite the changelog message with the --filter option. Its argument will be invoked with the current changelog message as $1 and the patch as $2. If --source is specified, selects changesets from the named repository. If --branch is specified, selects changesets from the branch holding the named revision, up to that revision. If --all is specified, all changesets on the branch will be transplanted, otherwise you will be prompted to select the changesets you want. hg transplant --branch REVISION --all will rebase the selected branch (up to the named revision) onto your current working directory. You can optionally mark selected transplanted changesets as merge changesets. You will not be prompted to transplant any ancestors of a merged transplant, and you can merge descendants of them normally instead of transplanting them. If no merges or revisions are provided, hg transplant will start an interactive changeset browser. If a changeset application fails, you can fix the merge by hand and then resume where you left off by calling hg transplant --continue. ''' def getoneitem(opts, item, errmsg): val = opts.get(item) if val: if len(val) > 1: raise util.Abort(errmsg) else: return val[0] def getremotechanges(repo, url): sourcerepo = ui.expandpath(url) source = hg.repository(ui, sourcerepo) incoming = repo.findincoming(source, force=True) if not incoming: return (source, None, None) bundle = None if not source.local(): cg = source.changegroup(incoming, 'incoming') bundle = changegroup.writebundle(cg, None, 'HG10UN') source = bundlerepo.bundlerepository(ui, repo.root, bundle) return (source, incoming, bundle) def incwalk(repo, incoming, branches, match=util.always): if not branches: branches = None for node in repo.changelog.nodesbetween(incoming, branches)[0]: if match(node): yield node def transplantwalk(repo, root, branches, match=util.always): if not branches: branches = repo.heads() ancestors = [] for branch in branches: ancestors.append(repo.changelog.ancestor(root, branch)) for node in repo.changelog.nodesbetween(ancestors, branches)[0]: if match(node): yield node def checkopts(opts, revs): if opts.get('continue'): if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')): raise util.Abort( _('--continue is incompatible with branch, all or merge')) return if not (opts.get('source') or revs or opts.get('merge') or opts.get('branch')): raise util.Abort( _('no source URL, branch tag or revision list provided')) if opts.get('all'): if not opts.get('branch'): raise util.Abort(_('--all requires a branch revision')) if revs: raise util.Abort( _('--all is incompatible with a revision list')) checkopts(opts, revs) if not opts.get('log'): opts['log'] = ui.config('transplant', 'log') if not opts.get('filter'): opts['filter'] = ui.config('transplant', 'filter') tp = transplanter(ui, repo) p1, p2 = repo.dirstate.parents() if p1 == revlog.nullid: raise util.Abort(_('no revision checked out')) if not opts.get('continue'): if p2 != revlog.nullid: raise util.Abort(_('outstanding uncommitted merges')) m, a, r, d = repo.status()[:4] if m or a or r or d: raise util.Abort(_('outstanding local changes')) bundle = None source = opts.get('source') if source: (source, incoming, bundle) = getremotechanges(repo, source) else: source = repo try: if opts.get('continue'): tp.resume(repo, source, opts) return tf = tp.transplantfilter(repo, source, p1) if opts.get('prune'): prune = [ source.lookup(r) for r in cmdutil.revrange(source, opts.get('prune')) ] matchfn = lambda x: tf(x) and x not in prune else: matchfn = tf branches = map(source.lookup, opts.get('branch', ())) merges = map(source.lookup, opts.get('merge', ())) revmap = {} if revs: for r in cmdutil.revrange(source, revs): revmap[int(r)] = source.lookup(r) elif opts.get('all') or not merges: if source != repo: alltransplants = incwalk(source, incoming, branches, match=matchfn) else: alltransplants = transplantwalk(source, p1, branches, match=matchfn) if opts.get('all'): revs = alltransplants else: revs, newmerges = browserevs(ui, source, alltransplants, opts) merges.extend(newmerges) for r in revs: revmap[source.changelog.rev(r)] = r for r in merges: revmap[source.changelog.rev(r)] = r revs = revmap.keys() revs.sort() pulls = [] tp.apply(repo, source, revmap, merges, opts) finally: if bundle: source.close() os.unlink(bundle)
def sendrpc(self, msg): srv = xmlrpclib.Server(self.ciaurl) res = srv.hub.deliver(msg) if res is not True and res != 'queued.': raise util.Abort( _('%s returned an error: %s') % (self.ciaurl, res))
def genid(repo=None, fakeidpath=None): """Generate a unique identifier. Unique identifiers are treated as a black box. But under the hood, they consist of a time component and a random component. Each identifier is up to 64 bits. The first 32 bits are random. The final 32 bits are integer seconds since midnight UTC on January 1, 2000. We don't use UNIX epoch because dates from the 70's aren't interesting to us. The birthday paradox says we only need sqrt() attempts before we generate a collision. So for 32 bits, we need 2^16 or 65,536 generations on average before there is a collision. We estimate there will be a commit every 10s for the Firefox repo. The chance of a collision should be very small. Base62 is used as the encoding mechanism because it is safe for both URLs and revsets. We could get base66 for URLs, but the characters -~. could conflict with revsets. """ # Provide a backdoor to generate deterministic IDs. This is used for # testing purposes because tests want constant output. And since # commit IDs go into the commit and are part of the SHA-1, they need # to be deterministic. if repo and repo.ui.configbool('reviewboard', 'fakeids', False): fakeidpath = repo.vfs.join('genid') if fakeidpath: try: with open(fakeidpath, 'rb') as fh: data = fh.read() except IOError as e: if e.errno != errno.ENOENT: raise data = None if data: n = int(data) else: n = 0 seconds = EPOCH rnd = n with open(fakeidpath, 'wb') as fh: fh.write(str(n + 1)) else: now = int(time.time()) # May 5, 2015 sometime. if now < 1430860700: raise util.Abort('your system clock is wrong; fix your system ' 'clock') seconds = now - EPOCH rnd = random.SystemRandom().getrandbits(32) value = (rnd << 32) + seconds chars = [] while value > 0: quot, remain = divmod(value, 62) chars.append(BASE62_CHARS[remain]) value = quot return ''.join(reversed(chars))
def clone(self, remote, heads=[], stream=False): supported = True if (exchange and hasattr(exchange, '_maybeapplyclonebundle') and remote.capable('clonebundles')): supported = False self.ui.warn( _('(mercurial client has built-in support for ' 'bundle clone features; the "bundleclone" ' 'extension can likely safely be removed)\n')) if not self.ui.configbool('experimental', 'clonebundles', False): self.ui.warn( _('(but the experimental.clonebundles config ' 'flag is not enabled: enable it before ' 'disabling bundleclone or cloning from ' 'pre-generated bundles may not work)\n')) # We assume that presence of the bundleclone extension # means they want clonebundles enabled. Otherwise, why do # they have bundleclone enabled? So silently enable it. ui.setconfig('experimental', 'clonebundles', True) elif not remote.capable('bundles'): supported = False self.ui.debug(_('bundle clone not supported\n')) elif heads: supported = False self.ui.debug( _('cannot perform bundle clone if heads requested\n')) elif stream: supported = False self.ui.debug( _('ignoring bundle clone because stream was ' 'requested\n')) if not supported: return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) result = remote._call('bundles') if not result: self.ui.note(_('no bundles available; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) pyver = sys.version_info pyver = (pyver[0], pyver[1], pyver[2]) hgver = util.version() # Discard bit after '+'. hgver = hgver.split('+')[0] try: hgver = tuple([int(i) for i in hgver.split('.')[0:2]]) except ValueError: hgver = (0, 0) # Testing backdoors. if ui.config('bundleclone', 'fakepyver'): pyver = ui.configlist('bundleclone', 'fakepyver') pyver = tuple(int(v) for v in pyver) if ui.config('bundleclone', 'fakehgver'): hgver = ui.configlist('bundleclone', 'fakehgver') hgver = tuple(int(v) for v in hgver[0:2]) entries = [] snifilteredfrompython = False snifilteredfromhg = False for line in result.splitlines(): fields = line.split() url = fields[0] attrs = {} for rawattr in fields[1:]: key, value = rawattr.split('=', 1) attrs[urllib.unquote(key)] = urllib.unquote(value) # Filter out SNI entries if we don't support SNI. if attrs.get('requiresni') == 'true': skip = False if pyver < (2, 7, 9): # Take this opportunity to inform people they are using an # old, insecure Python. if not snifilteredfrompython: self.ui.warn( _('(your Python is older than 2.7.9 ' 'and does not support modern and ' 'secure SSL/TLS; please consider ' 'upgrading your Python to a secure ' 'version)\n')) snifilteredfrompython = True skip = True if hgver < (3, 3): if not snifilteredfromhg: self.ui.warn( _('(you Mercurial is old and does ' 'not support modern and secure ' 'SSL/TLS; please consider ' 'upgrading your Mercurial to 3.3+ ' 'which supports modern and secure ' 'SSL/TLS)\n')) snifilteredfromhg = True skip = True if skip: self.ui.warn( _('(ignoring URL on server that requires ' 'SNI)\n')) continue entries.append((url, attrs)) if not entries: # Don't fall back to normal clone because we don't want mass # fallback in the wild to barage servers expecting bundle # offload. raise util.Abort(_('no appropriate bundles available'), hint=_('you may wish to complain to the ' 'server operator')) # The configuration is allowed to define lists of preferred # attributes and values. If this is present, sort results according # to that preference. Otherwise, use manifest order and select the # first entry. prefers = self.ui.configlist('bundleclone', 'prefers', default=[]) if prefers: prefers = [p.split('=', 1) for p in prefers] def compareentry(a, b): aattrs = a[1] battrs = b[1] # Itereate over local preferences. for pkey, pvalue in prefers: avalue = aattrs.get(pkey) bvalue = battrs.get(pkey) # Special case for b is missing attribute and a matches # exactly. if avalue is not None and bvalue is None and avalue == pvalue: return -1 # Special case for a missing attribute and b matches # exactly. if bvalue is not None and avalue is None and bvalue == pvalue: return 1 # We can't compare unless the attribute is defined on # both entries. if avalue is None or bvalue is None: continue # Same values should fall back to next attribute. if avalue == bvalue: continue # Exact matches come first. if avalue == pvalue: return -1 if bvalue == pvalue: return 1 # Fall back to next attribute. continue # Entries could not be sorted based on attributes. This # says they are equal, which will fall back to index order, # which is what we want. return 0 entries = sorted(entries, cmp=compareentry) url, attrs = entries[0] if not url: self.ui.note( _('invalid bundle manifest; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) self.ui.status(_('downloading bundle %s\n' % url)) try: fh = hgurl.open(self.ui, url) # Stream clone data is not changegroup data. Handle it # specially. if 'stream' in attrs: reqs = set(attrs['stream'].split(',')) l = fh.readline() filecount, bytecount = map(int, l.split(' ', 1)) self.ui.status(_('streaming all changes\n')) consumev1(self, fh, filecount, bytecount) else: if exchange: cg = exchange.readbundle(self.ui, fh, 'stream') else: cg = changegroup.readbundle(fh, 'stream') # Mercurial 3.6 introduced cgNunpacker.apply(). # Before that, there was changegroup.addchangegroup(). # Before that, there was localrepository.addchangegroup(). if hasattr(cg, 'apply'): cg.apply(self, 'bundleclone', url) elif hasattr(changegroup, 'addchangegroup'): changegroup.addchangegroup(self, cg, 'bundleclone', url) else: self.addchangegroup(cg, 'bundleclone', url) self.ui.status(_('finishing applying bundle; pulling\n')) # Maintain compatibility with Mercurial 2.x. if exchange: return exchange.pull(self, remote, heads=heads) else: return self.pull(remote, heads=heads) except (urllib2.HTTPError, urllib2.URLError) as e: if isinstance(e, urllib2.HTTPError): msg = _('HTTP error fetching bundle: %s') % str(e) else: msg = _('error fetching bundle: %s') % e.reason # Don't fall back to regular clone unless explicitly told to. if not self.ui.configbool('bundleclone', 'fallbackonerror', False): raise util.Abort( msg, hint=_('consider contacting the ' 'server operator if this error persists')) self.ui.warn(msg + '\n') self.ui.warn(_('falling back to normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream)
def _connect(self): root = self.cvsroot conntype = None user, host = None, None cmd = ['cvs', 'server'] self.ui.status(_("connecting to %s\n") % root) if root.startswith(":pserver:"): root = root[9:] m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)', root) if m: conntype = "pserver" user, passw, serv, port, root = m.groups() if not user: user = "******" if not port: port = 2401 else: port = int(port) format0 = ":pserver:%s@%s:%s" % (user, serv, root) format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root) if not passw: passw = "A" cvspass = os.path.expanduser("~/.cvspass") try: pf = open(cvspass) for line in pf.read().splitlines(): part1, part2 = line.split(' ', 1) # /1 :pserver:[email protected]:2401/cvsroot/foo # Ah<Z if part1 == '/1': part1, part2 = part2.split(' ', 1) format = format1 # :pserver:[email protected]:/cvsroot/foo Ah<Z else: format = format0 if part1 == format: passw = part2 break pf.close() except IOError as inst: if inst.errno != errno.ENOENT: if not getattr(inst, 'filename', None): inst.filename = cvspass raise sck = socket.socket() sck.connect((serv, port)) sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw, "END AUTH REQUEST", ""])) if sck.recv(128) != "I LOVE YOU\n": raise util.Abort(_("CVS pserver authentication failed")) self.writep = self.readp = sck.makefile('r+') if not conntype and root.startswith(":local:"): conntype = "local" root = root[7:] if not conntype: # :ext:user@host/home/user/path/to/cvsroot if root.startswith(":ext:"): root = root[5:] m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) # Do not take Windows path "c:\foo\bar" for a connection strings if os.path.isdir(root) or not m: conntype = "local" else: conntype = "rsh" user, host, root = m.group(1), m.group(2), m.group(3) if conntype != "pserver": if conntype == "rsh": rsh = os.environ.get("CVS_RSH") or "ssh" if user: cmd = [rsh, '-l', user, host] + cmd else: cmd = [rsh, host] + cmd # popen2 does not support argument lists under Windows cmd = [util.shellquote(arg) for arg in cmd] cmd = util.quotecommand(' '.join(cmd)) self.writep, self.readp = util.popen2(cmd) self.realroot = root self.writep.write("Root %s\n" % root) self.writep.write("Valid-responses ok error Valid-requests Mode" " M Mbinary E Checked-in Created Updated" " Merged Removed\n") self.writep.write("valid-requests\n") self.writep.flush() r = self.readp.readline() if not r.startswith("Valid-requests"): raise util.Abort(_('unexpected response from CVS server ' '(expected "Valid-requests", but got %r)') % r) if "UseUnchanged" in r: self.writep.write("UseUnchanged\n") self.writep.flush() r = self.readp.readline()
def getchanges(self, rev, full): if full: raise util.Abort(_("convert from cvs do not support --full")) self._parse() return sorted(self.files[rev].iteritems()), {}, set()
def heads(self, *args, **opts): """ Whenever this function is hit, we abort. The traceback is useful for figuring out where to intercept the functionality. """ raise hgutil.Abort('command unavailable for Subversion repositories')
def replacechangesets(repo, oldnodes, createfn, backuptopic="replacing"): """Replace changesets with new versions. This is a generic function used to perform history rewriting. Given an iterable of input nodes, a function will be called which is expected to produce a new changeset to replace the input node. The function signature should be: def createfn(repo, ctx, revmap, copyfilectxfn): It is passed a repo, the changectx being rewritten, a map of old to new revisions that have been changed so far, and a function that can be used as the memctx callback for obtaining memfilectx when no file modifications are to be performed (a common pattern). The function should return an *uncommitted* memctx holding the new changeset info. We currently restrict that the createfn callback must return a new changeset and that no file changes may occur. Restricting file changes satisfies the requirements this function was invented for and keeps the implementation simple. After the memctx is obtained, it is committed. Children changesets are rebased automatically after all changesets have been rewritten. After the old to new mapping is obtained, bookmarks are moved and old changesets are made obsolete or stripped, depending on what is appropriate for the repo configuration. This function handles locking the repository and performing as many actions in a transaction as possible. Before any changes are made, we verify the state of the repo is sufficient for transformation to occur and abort otherwise. """ if not oldnodes: return {} repo = repo.unfiltered() # Validate function called properly. for node in oldnodes: if len(node) != 20: raise util.Abort("replacechangesets expects 20 byte nodes") uoldrevs = [repo[node].rev() for node in oldnodes] oldrevs = sorted(uoldrevs) if oldrevs != uoldrevs: raise util.Abort("must pass oldnodes in changelog order") # We may perform stripping and stripping inside a nested transaction # is a recipe for disaster. # currenttransaction was added in 3.3. Copy the implementation until we # drop 3.2 compatibility. if hasattr(repo, "currenttransaction"): intrans = repo.currenttransaction() else: if repo._transref and repo._transref().running(): intrans = True else: intrans = False if intrans: raise util.Abort( "cannot call replacechangesets when a transaction " "is active" ) # The revisions impacted by the current operation. This is essentially # all non-hidden children. We don't operate on hidden changesets because # there is no point - they are hidden and deemed not important. impactedrevs = list(repo.filtered("visible").revs("%ld::", oldrevs)) # If we'll need to update the working directory, don't do anything if there # are uncommitted changes, as this could cause a giant mess (merge # conflicts, etc). Note the comparison against impacted revs, as children # of rewritten changesets will be rebased below. dirstaterev = repo[repo.dirstate.p1()].rev() if dirstaterev in impactedrevs: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) obsenabled = False if hasattr(obsolete, "isenabled"): obsenabled = obsolete.isenabled(repo, "createmarkers") else: obsenabled = obsolete._enabled def adjustphase(repo, tr, phase, node): # transaction argument added in Mercurial 3.2. try: phases.advanceboundary(repo, tr, phase, [node]) phases.retractboundary(repo, tr, phase, [node]) except TypeError: phases.advanceboundary(repo, phase, [node]) phases.retractboundary(repo, phase, [node]) nodemap = {} wlock, lock, tr = None, None, None try: wlock = repo.wlock() lock = repo.lock() tr = repo.transaction("replacechangesets") # Create the new changesets. revmap = OrderedDict() for oldnode in oldnodes: oldctx = repo[oldnode] # Copy revmap out of paranoia. newctx = createfn(repo, oldctx, dict(revmap), preservefilectx(oldctx)) if not isinstance(newctx, context.memctx): raise util.Abort("createfn must return a context.memctx") if oldctx == newctx: raise util.Abort("createfn must create a new changeset") newnode = newctx.commit() # Needed so .manifestnode() works, which memctx doesn't have. newctx = repo[newnode] # This makes the implementation significantly simpler as we don't # need to worry about merges when we do auto rebasing later. if oldctx.manifestnode() != newctx.manifestnode(): raise util.Abort("we do not allow replacements to modify files") revmap[oldctx.rev()] = newctx.rev() nodemap[oldnode] = newnode # Do phase adjustment ourselves because we want callbacks to be as # dumb as possible. adjustphase(repo, tr, oldctx.phase(), newctx.node()) # Children of rewritten changesets are impacted as well. Rebase as # needed. for rev in impactedrevs: # It was handled by createfn() or by this loop already. if rev in revmap: continue oldctx = repo[rev] if oldctx.p1().rev() not in revmap: raise util.Abort( "unknown parent of child commit: %s" % oldctx.hex(), hint="please report this as a bug", ) parents = newparents(repo, oldctx, revmap) mctx = context.memctx( repo, parents, oldctx.description(), oldctx.files(), preservefilectx(oldctx), user=oldctx.user(), date=oldctx.date(), extra=oldctx.extra(), ) status = oldctx.p1().status(oldctx) mctx.modified = lambda: status[0] mctx.added = lambda: status[1] mctx.removed = lambda: status[2] newnode = mctx.commit() revmap[rev] = repo[newnode].rev() nodemap[oldctx.node()] = newnode # Retain phase. adjustphase(repo, tr, oldctx.phase(), newnode) ph = repo.ui.config("phases", "new-commit") try: repo.ui.setconfig("phases", "new-commit", oldctx.phase(), "rewriting") newnode = mctx.commit() revmap[rev] = repo[newnode].rev() finally: repo.ui.setconfig("phases", "new-commit", ph) # Move bookmarks to new nodes. bmchanges = [] oldactivebookmark = activebookmark(repo) for oldrev, newrev in revmap.items(): oldnode = repo[oldrev].node() for mark, bmnode in repo._bookmarks.items(): if bmnode == oldnode: bmchanges.append((mark, repo[newrev].node())) if bmchanges: # TODO unconditionally call applychanges() when support for # Mercurial 4.1 is dropped. if util.safehasattr(repo._bookmarks, "applychanges"): repo._bookmarks.applychanges(repo, tr, bmchanges) else: for mark, newnode in bmchanges: repo._bookmarks[mark] = newnode repo._bookmarks.recordchange(tr) # Update references to rewritten MQ patches. if hasattr(repo, "mq"): q = repo.mq for e in q.applied: if e.node in nodemap: e.node = nodemap[e.node] q.applieddirty = True # This no-ops if nothing is dirty. q.savedirty() # If obsolescence is enabled, obsolete the old changesets. if obsenabled: markers = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: markers.append((repo[oldrev], (repo[newrev],))) if markers: obsolete.createmarkers(repo, markers) # Move the working directory to the new node, if applicable. wdirrev = repo["."].rev() if wdirrev in revmap: hg.updaterepo(repo, repo[revmap[wdirrev]].node(), True) # The active bookmark is tracked by its symbolic name, not its # changeset. Since we didn't do anything that should change the # active bookmark, we shouldn't need to adjust it. if activebookmark(repo) != oldactivebookmark: raise util.Abort( "active bookmark changed; " "this should not occur!", hint="please file a bug", ) tr.close() # Unless obsolescence is enabled, strip any obsolete changesets. if not obsenabled: stripnodes = [] for oldrev, newrev in revmap.items(): if repo[oldrev] != repo[newrev]: stripnodes.append(repo[oldrev].node()) if stripnodes: repair.strip(repo.ui, repo, stripnodes, topic=backuptopic) finally: if tr: tr.release() lockmod.release(wlock, lock) return nodemap
if len(ids) == 1: return True else: return False def insertNewVersionIntoDatabase(self): self.executeDatabaseQuery( 'insert into versions (value, product_id) values (%s, (select id from products where name=%s ))', (self.tag, self.product)) self.connection.commit() def hook(ui, repo, hooktype, node=None, **kwargs): try: import MySQLdb as mysql global MySQLdb MySQLdb = mysql except ImportError, err: raise util.Abort('MySQL driver not installed: %s' % err) if node is None: raise util.Abort('Only hooks that have changesetid' 's can be used.') try: bzClient = BugzillaClient(ui, repo, node) if bzClient.commitContainsTag(): bzClient.insertTagIntoDatabase() except MySQLdb.MySQLError, err: raise util.Abort('Database error: %s' % err[1])
def convertsource(ui, path, type, rev): exceptions = [] if type and type not in [s[0] for s in source_converters]: raise util.Abort(_('%s: invalid source repository type') % type) for name, source, sortmode in source_converters: try: if not type or name == type: return source(ui, path, rev), sortmode except (NoRepo, MissingTool), inst: exceptions.append(inst) if not ui.quiet: for inst in exceptions: ui.write("%s\n" % inst) raise util.Abort(_('%s: missing or unsupported repository') % path) def convertsink(ui, path, type): if type and type not in [s[0] for s in sink_converters]: raise util.Abort(_('%s: invalid destination repository type') % type) for name, sink in sink_converters: try: if not type or name == type: return sink(ui, path) except NoRepo, inst: ui.note(_("convert: %s\n") % inst) except MissingTool, inst: raise util.Abort('%s\n' % inst) raise util.Abort(_('%s: unknown repository type') % path)
def patchbomb(ui, repo, *revs, **opts): '''send changesets by email By default, diffs are sent in the format generated by :hg:`export`, one per message. The series starts with a "[PATCH 0 of N]" introduction, which describes the series as a whole. Each patch email has a Subject line of "[PATCH M of N] ...", using the first line of the changeset description as the subject text. The message contains two or three parts. First, the changeset description. With the -d/--diffstat option, if the diffstat program is installed, the result of running diffstat on the patch is inserted. Finally, the patch itself, as generated by :hg:`export`. With the -d/--diffstat or -c/--confirm options, you will be presented with a final summary of all messages and asked for confirmation before the messages are sent. By default the patch is included as text in the email body for easy reviewing. Using the -a/--attach option will instead create an attachment for the patch. With -i/--inline an inline attachment will be created. With -o/--outgoing, emails will be generated for patches not found in the destination repository (or only those which are ancestors of the specified revisions if any are provided) With -b/--bundle, changesets are selected as for --outgoing, but a single email containing a binary Mercurial bundle as an attachment will be sent. With -m/--mbox, instead of previewing each patchbomb message in a pager or sending the messages directly, it will create a UNIX mailbox file with the patch emails. This mailbox file can be previewed with any mail user agent which supports UNIX mbox files. With -n/--test, all steps will run, but mail will not be sent. You will be prompted for an email recipient address, a subject and an introductory message describing the patches of your patchbomb. Then when all is done, patchbomb messages are displayed. If the PAGER environment variable is set, your pager will be fired up once for each patchbomb message, so you can verify everything is alright. In case email sending fails, you will find a backup of your series introductory message in ``.hg/last-email.txt``. Examples:: hg email -r 3000 # send patch 3000 only hg email -r 3000 -r 3001 # send patches 3000 and 3001 hg email -r 3000:3005 # send patches 3000 through 3005 hg email 3000 # send patch 3000 (deprecated) hg email -o # send all patches not in default hg email -o DEST # send all patches not in DEST hg email -o -r 3000 # send all ancestors of 3000 not in default hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST hg email -b # send bundle of all patches not in default hg email -b DEST # send bundle of all patches not in DEST hg email -b -r 3000 # bundle of all ancestors of 3000 not in default hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST hg email -o -m mbox && # generate an mbox file... mutt -R -f mbox # ... and view it with mutt hg email -o -m mbox && # generate an mbox file ... formail -s sendmail \\ # ... and use formail to send from the mbox -bm -t < mbox # ... using sendmail Before using this command, you will need to enable email in your hgrc. See the [email] section in hgrc(5) for details. ''' _charsets = mail._charsets(ui) bundle = opts.get('bundle') date = opts.get('date') mbox = opts.get('mbox') outgoing = opts.get('outgoing') rev = opts.get('rev') # internal option used by pbranches patches = opts.get('patches') def getoutgoing(dest, revs): '''Return the revisions present locally but not in dest''' dest = ui.expandpath(dest or 'default-push', dest or 'default') dest, branches = hg.parseurl(dest) revs, checkout = hg.addbranchrevs(repo, repo, branches, revs) other = hg.peer(repo, opts, dest) ui.status(_('comparing with %s\n') % util.hidepassword(dest)) common, _anyinc, _heads = discovery.findcommonincoming(repo, other) nodes = revs and map(repo.lookup, revs) or revs o = repo.changelog.findmissing(common, heads=nodes) if not o: ui.status(_("no changes found\n")) return [] return [str(repo.changelog.rev(r)) for r in o] def getpatches(revs): for r in scmutil.revrange(repo, revs): output = cStringIO.StringIO() cmdutil.export(repo, [r], fp=output, opts=patch.diffopts(ui, opts)) yield output.getvalue().split('\n') def getbundle(dest): tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-') tmpfn = os.path.join(tmpdir, 'bundle') try: commands.bundle(ui, repo, tmpfn, dest, **opts) fp = open(tmpfn, 'rb') data = fp.read() fp.close() return data finally: try: os.unlink(tmpfn) except: pass os.rmdir(tmpdir) if not (opts.get('test') or mbox): # really sending mail.validateconfig(ui) if not (revs or rev or outgoing or bundle or patches): raise util.Abort(_('specify at least one changeset with -r or -o')) if outgoing and bundle: raise util.Abort( _("--outgoing mode always on with --bundle;" " do not re-specify --outgoing")) if outgoing or bundle: if len(revs) > 1: raise util.Abort(_("too many destinations")) dest = revs and revs[0] or None revs = [] if rev: if revs: raise util.Abort(_('use only one form to specify the revision')) revs = rev if outgoing: revs = getoutgoing(dest, rev) if bundle: opts['revs'] = revs # start if date: start_time = util.parsedate(date) else: start_time = util.makedate() def genmsgid(id): return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn()) def getdescription(body, sender): if opts.get('desc'): body = open(opts.get('desc')).read() else: ui.write( _('\nWrite the introductory message for the ' 'patch series.\n\n')) body = ui.edit(body, sender) # Save series description in case sendmail fails msgfile = repo.opener('last-email.txt', 'wb') msgfile.write(body) msgfile.close() return body def getpatchmsgs(patches, patchnames=None): msgs = [] ui.write( _('This patch series consists of %d patches.\n\n') % len(patches)) # build the intro message, or skip it if the user declines if introwanted(opts, len(patches)): msg = makeintro(patches) if msg: msgs.append(msg) # are we going to send more than one message? numbered = len(msgs) + len(patches) > 1 # now generate the actual patch messages name = None for i, p in enumerate(patches): if patchnames: name = patchnames[i] msg = makepatch(ui, repo, p, opts, _charsets, i + 1, len(patches), numbered, name) msgs.append(msg) return msgs def makeintro(patches): tlen = len(str(len(patches))) flag = opts.get('flag') or '' if flag: flag = ' ' + ' '.join(flag) prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag) subj = (opts.get('subject') or prompt(ui, 'Subject: ', rest=prefix, default='')) if not subj: return None # skip intro if the user doesn't bother subj = prefix + ' ' + subj body = '' if opts.get('diffstat'): # generate a cumulative diffstat of the whole patch series diffstat = patch.diffstat(sum(patches, [])) body = '\n' + diffstat else: diffstat = None body = getdescription(body, sender) msg = mail.mimeencode(ui, body, _charsets, opts.get('test')) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) return (msg, subj, diffstat) def getbundlemsgs(bundle): subj = (opts.get('subject') or prompt(ui, 'Subject:', 'A bundle for your repository')) body = getdescription('', sender) msg = email.MIMEMultipart.MIMEMultipart() if body: msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test'))) datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle') datapart.set_payload(bundle) bundlename = '%s.hg' % opts.get('bundlename', 'bundle') datapart.add_header('Content-Disposition', 'attachment', filename=bundlename) email.Encoders.encode_base64(datapart) msg.attach(datapart) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test')) return [(msg, subj, None)] sender = (opts.get('from') or ui.config('email', 'from') or ui.config('patchbomb', 'from') or prompt(ui, 'From', ui.username())) if patches: msgs = getpatchmsgs(patches, opts.get('patchnames')) elif bundle: msgs = getbundlemsgs(getbundle(dest)) else: msgs = getpatchmsgs(list(getpatches(revs))) showaddrs = [] def getaddrs(header, ask=False, default=None): configkey = header.lower() opt = header.replace('-', '_').lower() addrs = opts.get(opt) if addrs: showaddrs.append('%s: %s' % (header, ', '.join(addrs))) return mail.addrlistencode(ui, addrs, _charsets, opts.get('test')) # not on the command line: fallback to config and then maybe ask addr = (ui.config('email', configkey) or ui.config('patchbomb', configkey) or '') if not addr and ask: addr = prompt(ui, header, default=default) if addr: showaddrs.append('%s: %s' % (header, addr)) return mail.addrlistencode(ui, [addr], _charsets, opts.get('test')) else: return default to = getaddrs('To', ask=True) if not to: # we can get here in non-interactive mode raise util.Abort(_('no recipient addresses provided')) cc = getaddrs('Cc', ask=True, default='') or [] bcc = getaddrs('Bcc') or [] replyto = getaddrs('Reply-To') if opts.get('diffstat') or opts.get('confirm'): ui.write(_('\nFinal summary:\n\n')) ui.write('From: %s\n' % sender) for addr in showaddrs: ui.write('%s\n' % addr) for m, subj, ds in msgs: ui.write('Subject: %s\n' % subj) if ds: ui.write(ds) ui.write('\n') if ui.promptchoice(_('are you sure you want to send (yn)?'), (_('&Yes'), _('&No'))): raise util.Abort(_('patchbomb canceled')) ui.write('\n') parent = opts.get('in_reply_to') or None # angle brackets may be omitted, they're not semantically part of the msg-id if parent is not None: if not parent.startswith('<'): parent = '<' + parent if not parent.endswith('>'): parent += '>' first = True sender_addr = email.Utils.parseaddr(sender)[1] sender = mail.addressencode(ui, sender, _charsets, opts.get('test')) sendmail = None for i, (m, subj, ds) in enumerate(msgs): try: m['Message-Id'] = genmsgid(m['X-Mercurial-Node']) except TypeError: m['Message-Id'] = genmsgid('patchbomb') if parent: m['In-Reply-To'] = parent m['References'] = parent if first: parent = m['Message-Id'] first = False m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version() m['Date'] = email.Utils.formatdate(start_time[0], localtime=True) start_time = (start_time[0] + 1, start_time[1]) m['From'] = sender m['To'] = ', '.join(to) if cc: m['Cc'] = ', '.join(cc) if bcc: m['Bcc'] = ', '.join(bcc) if replyto: m['Reply-To'] = ', '.join(replyto) if opts.get('test'): ui.status(_('Displaying '), subj, ' ...\n') ui.flush() if 'PAGER' in os.environ and not ui.plain(): fp = util.popen(os.environ['PAGER'], 'w') else: fp = ui generator = email.Generator.Generator(fp, mangle_from_=False) try: generator.flatten(m, 0) fp.write('\n') except IOError, inst: if inst.errno != errno.EPIPE: raise if fp is not ui: fp.close() else: if not sendmail: sendmail = mail.connect(ui, mbox=mbox) ui.status(_('Sending '), subj, ' ...\n') ui.progress(_('sending'), i, item=subj, total=len(msgs)) if not mbox: # Exim does not remove the Bcc field del m['Bcc'] fp = cStringIO.StringIO() generator = email.Generator.Generator(fp, mangle_from_=False) generator.flatten(m, 0) sendmail(sender_addr, to + bcc + cc, fp.getvalue())
def toposort(self, parents, sortmode): '''Return an ordering such that every uncommitted changeset is preceeded by all its uncommitted ancestors.''' def mapchildren(parents): """Return a (children, roots) tuple where 'children' maps parent revision identifiers to children ones, and 'roots' is the list of revisions without parents. 'parents' must be a mapping of revision identifier to its parents ones. """ visit = parents.keys() seen = set() children = {} roots = [] while visit: n = visit.pop(0) if n in seen: continue seen.add(n) # Ensure that nodes without parents are present in the # 'children' mapping. children.setdefault(n, []) hasparent = False for p in parents[n]: if not p in self.map: visit.append(p) hasparent = True children.setdefault(p, []).append(n) if not hasparent: roots.append(n) return children, roots # Sort functions are supposed to take a list of revisions which # can be converted immediately and pick one def makebranchsorter(): """If the previously converted revision has a child in the eligible revisions list, pick it. Return the list head otherwise. Branch sort attempts to minimize branch switching, which is harmful for Mercurial backend compression. """ prev = [None] def picknext(nodes): next = nodes[0] for n in nodes: if prev[0] in parents[n]: next = n break prev[0] = next return next return picknext def makesourcesorter(): """Source specific sort.""" keyfn = lambda n: self.commitcache[n].sortkey def picknext(nodes): return sorted(nodes, key=keyfn)[0] return picknext def makedatesorter(): """Sort revisions by date.""" dates = {} def getdate(n): if n not in dates: dates[n] = util.parsedate(self.commitcache[n].date) return dates[n] def picknext(nodes): return min([(getdate(n), n) for n in nodes])[1] return picknext if sortmode == 'branchsort': picknext = makebranchsorter() elif sortmode == 'datesort': picknext = makedatesorter() elif sortmode == 'sourcesort': picknext = makesourcesorter() else: raise util.Abort(_('unknown sort mode: %s') % sortmode) children, actives = mapchildren(parents) s = [] pendings = {} while actives: n = picknext(actives) actives.remove(n) s.append(n) # Update dependents list for c in children.get(n, []): if c not in pendings: pendings[c] = [p for p in parents[c] if p not in self.map] try: pendings[c].remove(n) except ValueError: raise util.Abort( _('cycle detected between %s and %s') % (recode(c), recode(n))) if not pendings[c]: # Parents are converted, node is eligible actives.insert(0, c) pendings[c] = None if len(s) != len(parents): raise util.Abort(_("not all revisions were sorted")) return s