def bundle_to_json( fh ): """ Convert the received HG10xx data stream (a mercurial 1.0 bundle created using hg push from the command line) to a json object. """ # See http://www.wstein.org/home/wstein/www/home/was/patches/hg_json hg_unbundle10_obj = readbundle( fh, None ) groups = [ group for group in unpack_groups( hg_unbundle10_obj ) ] return json.dumps( groups, indent=4 )
def bundle_to_json(fh): """ Convert the received HG10xx data stream (a mercurial 1.0 bundle created using hg push from the command line) to a json object. """ # See http://www.wstein.org/home/wstein/www/home/was/patches/hg_json hg_unbundle10_obj = readbundle(fh, None) groups = [group for group in unpack_groups(hg_unbundle10_obj)] return json.dumps(groups, indent=4)
def restore(self): '''Restore committed changes from backup''' bfile = self.bu.backupfile('bundle') if os.path.exists(bfile): f = None try: try: f = open(bfile, 'r') bundle = changegroup.readbundle(f, bfile) self.ws.repo.addchangegroup(bundle, 'strip', 'bundle:%s' % bfile) except EnvironmentError, e: raise util.Abort("couldn't restore committed changes: %s\n" " %s" % (bfile, e)) except HgLookupError, e: raise CdmNodeMissing("couldn't restore committed changes", e.name)
def restore(self): '''Restore committed changes from backup''' bfile = self.bu.backupfile('bundle') if os.path.exists(bfile): f = None try: try: f = open(bfile, 'r') bundle = changegroup.readbundle(f, bfile) self.ws.repo.addchangegroup(bundle, 'strip', 'bundle:%s' % bfile) except EnvironmentError, e: raise util.Abort("couldn't restore committed changes: %s\n" " %s" % (bfile, e)) finally: if f and not f.closed: f.close()
def restore(self): '''Restore committed changes from backup''' if not self.bu.exists('bundle'): return bpath = self.bu.backupfile('bundle') f = None try: try: f = self.bu.open('bundle') bundle = changegroup.readbundle(f, bpath) self.ws.repo.addchangegroup(bundle, 'strip', 'bundle:%s' % bpath) except EnvironmentError, e: raise util.Abort("couldn't restore committed changes: %s\n" " %s" % (bpath, e)) except error.LookupError, e: raise CdmNodeMissing("couldn't restore committed changes", e.name)
def to_json(ifilename, ofilename, compression='none'): """ Given an HG10xx file (Mercurial 1.0 bundle) ``ifilename``, convert it to JSON and dump it to ``ofilename`` """ if ifilename: ifile = open(ifilename, 'rb') else: ifile = sys.stdin fh = readbundle(ifile, ifilename) oobj = [group for group in unpack_groups(fh)] # this is unimplemented in Mercurial 1.8.4, unfortunately (?) #fh.close() if ofilename: ofile = open(ofilename, 'w') else: ofile = sys.stdout json.dump(oobj, ofile, indent=4) ofile.close()
def clone(self, remote, heads=[], stream=False): supported = True if (exchange and hasattr(exchange, '_maybeapplyclonebundle') and remote.capable('clonebundles')): supported = False self.ui.warn( _('(mercurial client has built-in support for ' 'bundle clone features; the "bundleclone" ' 'extension can likely safely be removed)\n')) if not self.ui.configbool('experimental', 'clonebundles', False): self.ui.warn( _('(but the experimental.clonebundles config ' 'flag is not enabled: enable it before ' 'disabling bundleclone or cloning from ' 'pre-generated bundles may not work)\n')) # We assume that presence of the bundleclone extension # means they want clonebundles enabled. Otherwise, why do # they have bundleclone enabled? So silently enable it. ui.setconfig('experimental', 'clonebundles', True) elif not remote.capable('bundles'): supported = False self.ui.debug(_('bundle clone not supported\n')) elif heads: supported = False self.ui.debug( _('cannot perform bundle clone if heads requested\n')) elif stream: supported = False self.ui.debug( _('ignoring bundle clone because stream was ' 'requested\n')) if not supported: return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) result = remote._call('bundles') if not result: self.ui.note(_('no bundles available; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) pyver = sys.version_info pyver = (pyver[0], pyver[1], pyver[2]) hgver = util.version() # Discard bit after '+'. hgver = hgver.split('+')[0] try: hgver = tuple([int(i) for i in hgver.split('.')[0:2]]) except ValueError: hgver = (0, 0) # Testing backdoors. if ui.config('bundleclone', 'fakepyver'): pyver = ui.configlist('bundleclone', 'fakepyver') pyver = tuple(int(v) for v in pyver) if ui.config('bundleclone', 'fakehgver'): hgver = ui.configlist('bundleclone', 'fakehgver') hgver = tuple(int(v) for v in hgver[0:2]) entries = [] snifilteredfrompython = False snifilteredfromhg = False for line in result.splitlines(): fields = line.split() url = fields[0] attrs = {} for rawattr in fields[1:]: key, value = rawattr.split('=', 1) attrs[urllib.unquote(key)] = urllib.unquote(value) # Filter out SNI entries if we don't support SNI. if attrs.get('requiresni') == 'true': skip = False if pyver < (2, 7, 9): # Take this opportunity to inform people they are using an # old, insecure Python. if not snifilteredfrompython: self.ui.warn( _('(your Python is older than 2.7.9 ' 'and does not support modern and ' 'secure SSL/TLS; please consider ' 'upgrading your Python to a secure ' 'version)\n')) snifilteredfrompython = True skip = True if hgver < (3, 3): if not snifilteredfromhg: self.ui.warn( _('(you Mercurial is old and does ' 'not support modern and secure ' 'SSL/TLS; please consider ' 'upgrading your Mercurial to 3.3+ ' 'which supports modern and secure ' 'SSL/TLS)\n')) snifilteredfromhg = True skip = True if skip: self.ui.warn( _('(ignoring URL on server that requires ' 'SNI)\n')) continue entries.append((url, attrs)) if not entries: # Don't fall back to normal clone because we don't want mass # fallback in the wild to barage servers expecting bundle # offload. raise util.Abort(_('no appropriate bundles available'), hint=_('you may wish to complain to the ' 'server operator')) # The configuration is allowed to define lists of preferred # attributes and values. If this is present, sort results according # to that preference. Otherwise, use manifest order and select the # first entry. prefers = self.ui.configlist('bundleclone', 'prefers', default=[]) if prefers: prefers = [p.split('=', 1) for p in prefers] def compareentry(a, b): aattrs = a[1] battrs = b[1] # Itereate over local preferences. for pkey, pvalue in prefers: avalue = aattrs.get(pkey) bvalue = battrs.get(pkey) # Special case for b is missing attribute and a matches # exactly. if avalue is not None and bvalue is None and avalue == pvalue: return -1 # Special case for a missing attribute and b matches # exactly. if bvalue is not None and avalue is None and bvalue == pvalue: return 1 # We can't compare unless the attribute is defined on # both entries. if avalue is None or bvalue is None: continue # Same values should fall back to next attribute. if avalue == bvalue: continue # Exact matches come first. if avalue == pvalue: return -1 if bvalue == pvalue: return 1 # Fall back to next attribute. continue # Entries could not be sorted based on attributes. This # says they are equal, which will fall back to index order, # which is what we want. return 0 entries = sorted(entries, cmp=compareentry) url, attrs = entries[0] if not url: self.ui.note( _('invalid bundle manifest; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) self.ui.status(_('downloading bundle %s\n' % url)) try: fh = hgurl.open(self.ui, url) # Stream clone data is not changegroup data. Handle it # specially. if 'stream' in attrs: reqs = set(attrs['stream'].split(',')) l = fh.readline() filecount, bytecount = map(int, l.split(' ', 1)) self.ui.status(_('streaming all changes\n')) consumev1(self, fh, filecount, bytecount) else: if exchange: cg = exchange.readbundle(self.ui, fh, 'stream') else: cg = changegroup.readbundle(fh, 'stream') # Mercurial 3.6 introduced cgNunpacker.apply(). # Before that, there was changegroup.addchangegroup(). # Before that, there was localrepository.addchangegroup(). if hasattr(cg, 'apply'): cg.apply(self, 'bundleclone', url) elif hasattr(changegroup, 'addchangegroup'): changegroup.addchangegroup(self, cg, 'bundleclone', url) else: self.addchangegroup(cg, 'bundleclone', url) self.ui.status(_('finishing applying bundle; pulling\n')) # Maintain compatibility with Mercurial 2.x. if exchange: return exchange.pull(self, remote, heads=heads) else: return self.pull(remote, heads=heads) except (urllib2.HTTPError, urllib2.URLError) as e: if isinstance(e, urllib2.HTTPError): msg = _('HTTP error fetching bundle: %s') % str(e) else: msg = _('error fetching bundle: %s') % e.reason # Don't fall back to regular clone unless explicitly told to. if not self.ui.configbool('bundleclone', 'fallbackonerror', False): raise util.Abort( msg, hint=_('consider contacting the ' 'server operator if this error persists')) self.ui.warn(msg + '\n') self.ui.warn(_('falling back to normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream)
def clone(self, remote, heads=[], stream=False): supported = True if (exchange and hasattr(exchange, '_maybeapplyclonebundle') and remote.capable('clonebundles')): supported = False self.ui.warn(_('(mercurial client has built-in support for ' 'bundle clone features; the "bundleclone" ' 'extension can likely safely be removed)\n')) if not self.ui.configbool('experimental', 'clonebundles', False): self.ui.warn(_('(but the experimental.clonebundles config ' 'flag is not enabled: enable it before ' 'disabling bundleclone or cloning from ' 'pre-generated bundles may not work)\n')) # We assume that presence of the bundleclone extension # means they want clonebundles enabled. Otherwise, why do # they have bundleclone enabled? So silently enable it. ui.setconfig('experimental', 'clonebundles', True) elif not remote.capable('bundles'): supported = False self.ui.debug(_('bundle clone not supported\n')) elif heads: supported = False self.ui.debug(_('cannot perform bundle clone if heads requested\n')) elif stream: supported = False self.ui.debug(_('ignoring bundle clone because stream was ' 'requested\n')) if not supported: return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) result = remote._call('bundles') if not result: self.ui.note(_('no bundles available; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) pyver = sys.version_info pyver = (pyver[0], pyver[1], pyver[2]) hgver = util.version() # Discard bit after '+'. hgver = hgver.split('+')[0] try: hgver = tuple([int(i) for i in hgver.split('.')[0:2]]) except ValueError: hgver = (0, 0) # Testing backdoors. if ui.config('bundleclone', 'fakepyver'): pyver = ui.configlist('bundleclone', 'fakepyver') pyver = tuple(int(v) for v in pyver) if ui.config('bundleclone', 'fakehgver'): hgver = ui.configlist('bundleclone', 'fakehgver') hgver = tuple(int(v) for v in hgver[0:2]) entries = [] snifilteredfrompython = False snifilteredfromhg = False for line in result.splitlines(): fields = line.split() url = fields[0] attrs = {} for rawattr in fields[1:]: key, value = rawattr.split('=', 1) attrs[urllib.unquote(key)] = urllib.unquote(value) # Filter out SNI entries if we don't support SNI. if attrs.get('requiresni') == 'true': skip = False if pyver < (2, 7, 9): # Take this opportunity to inform people they are using an # old, insecure Python. if not snifilteredfrompython: self.ui.warn(_('(your Python is older than 2.7.9 ' 'and does not support modern and ' 'secure SSL/TLS; please consider ' 'upgrading your Python to a secure ' 'version)\n')) snifilteredfrompython = True skip = True if hgver < (3, 3): if not snifilteredfromhg: self.ui.warn(_('(you Mercurial is old and does ' 'not support modern and secure ' 'SSL/TLS; please consider ' 'upgrading your Mercurial to 3.3+ ' 'which supports modern and secure ' 'SSL/TLS)\n')) snifilteredfromhg = True skip = True if skip: self.ui.warn(_('(ignoring URL on server that requires ' 'SNI)\n')) continue entries.append((url, attrs)) if not entries: # Don't fall back to normal clone because we don't want mass # fallback in the wild to barage servers expecting bundle # offload. raise util.Abort(_('no appropriate bundles available'), hint=_('you may wish to complain to the ' 'server operator')) # The configuration is allowed to define lists of preferred # attributes and values. If this is present, sort results according # to that preference. Otherwise, use manifest order and select the # first entry. prefers = self.ui.configlist('bundleclone', 'prefers', default=[]) if prefers: prefers = [p.split('=', 1) for p in prefers] def compareentry(a, b): aattrs = a[1] battrs = b[1] # Itereate over local preferences. for pkey, pvalue in prefers: avalue = aattrs.get(pkey) bvalue = battrs.get(pkey) # Special case for b is missing attribute and a matches # exactly. if avalue is not None and bvalue is None and avalue == pvalue: return -1 # Special case for a missing attribute and b matches # exactly. if bvalue is not None and avalue is None and bvalue == pvalue: return 1 # We can't compare unless the attribute is defined on # both entries. if avalue is None or bvalue is None: continue # Same values should fall back to next attribute. if avalue == bvalue: continue # Exact matches come first. if avalue == pvalue: return -1 if bvalue == pvalue: return 1 # Fall back to next attribute. continue # Entries could not be sorted based on attributes. This # says they are equal, which will fall back to index order, # which is what we want. return 0 entries = sorted(entries, cmp=compareentry) url, attrs = entries[0] if not url: self.ui.note(_('invalid bundle manifest; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) self.ui.status(_('downloading bundle %s\n' % url)) try: fh = hgurl.open(self.ui, url) # Stream clone data is not changegroup data. Handle it # specially. if 'stream' in attrs: reqs = set(attrs['stream'].split(',')) l = fh.readline() filecount, bytecount = map(int, l.split(' ', 1)) self.ui.status(_('streaming all changes\n')) consumev1(self, fh, filecount, bytecount) else: if exchange: cg = exchange.readbundle(self.ui, fh, 'stream') else: cg = changegroup.readbundle(fh, 'stream') # Mercurial 3.6 introduced cgNunpacker.apply(). # Before that, there was changegroup.addchangegroup(). # Before that, there was localrepository.addchangegroup(). if hasattr(cg, 'apply'): cg.apply(self, 'bundleclone', url) elif hasattr(changegroup, 'addchangegroup'): changegroup.addchangegroup(self, cg, 'bundleclone', url) else: self.addchangegroup(cg, 'bundleclone', url) self.ui.status(_('finishing applying bundle; pulling\n')) # Maintain compatibility with Mercurial 2.x. if exchange: return exchange.pull(self, remote, heads=heads) else: return self.pull(remote, heads=heads) except (urllib2.HTTPError, urllib2.URLError) as e: if isinstance(e, urllib2.HTTPError): msg = _('HTTP error fetching bundle: %s') % str(e) else: msg = _('error fetching bundle: %s') % e.reason # Don't fall back to regular clone unless explicitly told to. if not self.ui.configbool('bundleclone', 'fallbackonerror', False): raise util.Abort(msg, hint=_('consider contacting the ' 'server operator if this error persists')) self.ui.warn(msg + '\n') self.ui.warn(_('falling back to normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream)
def strip(ui, repo, nodelist, backup="all"): cl = repo.changelog # TODO delete the undo files, and handle undo of merge sets if isinstance(nodelist, str): nodelist = [nodelist] striplist = [cl.rev(node) for node in nodelist] striprev = min(striplist) keeppartialbundle = backup == 'strip' # Some revisions with rev > striprev may not be descendants of striprev. # We have to find these revisions and put them in a bundle, so that # we can restore them after the truncations. # To create the bundle we use repo.changegroupsubset which requires # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) tostrip = set(striplist) for rev in striplist: for desc in cl.descendants(rev): tostrip.add(desc) files = _collectfiles(repo, striprev) saverevs = _collectbrokencsets(repo, files, striprev) # compute heads saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): if r not in tostrip: saverevs.add(r) saveheads.difference_update(cl.parentrevs(r)) saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] # compute base nodes if saverevs: descendants = set(cl.descendants(*saverevs)) saverevs.difference_update(descendants) savebases = [cl.node(r) for r in saverevs] stripbases = [cl.node(r) for r in tostrip] bm = repo._bookmarks updatebm = [] for m in bm: rev = repo[bm[m]].rev() if rev in tostrip: updatebm.append(m) # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, stripbases, cl.heads(), node, 'backup') repo.ui.status(_("saved backup bundle to %s\n") % backupfile) if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest tr = repo.transaction("strip") offset = len(tr.entries) try: tr.startgroup() cl.strip(striprev, tr) mfst.strip(striprev, tr) for fn in files: repo.file(fn).strip(striprev, tr) tr.endgroup() try: for i in xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] repo.sopener(file, 'a').truncate(troffset) tr.close() except: tr.abort() raise if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) if not repo.ui.verbose: # silence internal shuffling chatter repo.ui.pushbuffer() repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) if not repo.ui.verbose: repo.ui.popbuffer() f.close() if not keeppartialbundle: os.unlink(chgrpfile) for m in updatebm: bm[m] = repo['.'].node() bookmarks.write(repo) except: if backupfile: ui.warn( _("strip failed, full bundle stored in '%s'\n") % backupfile) elif saveheads: ui.warn( _("strip failed, partial bundle stored in '%s'\n") % chgrpfile) raise repo.destroyed() # remove potential unknown phase # XXX using to_strip data would be faster phases.filterunknown(repo)
def strip(ui, repo, nodelist, backup="all", topic='backup'): repo = repo.unfiltered() repo.destroying() cl = repo.changelog # TODO handle undo of merge sets if isinstance(nodelist, str): nodelist = [nodelist] striplist = [cl.rev(node) for node in nodelist] striprev = min(striplist) keeppartialbundle = backup == 'strip' # Some revisions with rev > striprev may not be descendants of striprev. # We have to find these revisions and put them in a bundle, so that # we can restore them after the truncations. # To create the bundle we use repo.changegroupsubset which requires # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) tostrip = set(striplist) for rev in striplist: for desc in cl.descendants([rev]): tostrip.add(desc) files = _collectfiles(repo, striprev) saverevs = _collectbrokencsets(repo, files, striprev) # compute heads saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): if r not in tostrip: saverevs.add(r) saveheads.difference_update(cl.parentrevs(r)) saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] # compute base nodes if saverevs: descendants = set(cl.descendants(saverevs)) saverevs.difference_update(descendants) savebases = [cl.node(r) for r in saverevs] stripbases = [cl.node(r) for r in tostrip] # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but # is much faster newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip) if newbmtarget: newbmtarget = repo[newbmtarget[0]].node() else: newbmtarget = '.' bm = repo._bookmarks updatebm = [] for m in bm: rev = repo[bm[m]].rev() if rev in tostrip: updatebm.append(m) # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, stripbases, cl.heads(), node, topic) repo.ui.status(_("saved backup bundle to %s\n") % backupfile) repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile) if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest tr = repo.transaction("strip") offset = len(tr.entries) try: tr.startgroup() cl.strip(striprev, tr) mfst.strip(striprev, tr) for fn in files: repo.file(fn).strip(striprev, tr) tr.endgroup() try: for i in xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] repo.sopener(file, 'a').truncate(troffset) tr.close() except: # re-raises tr.abort() raise if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) if not repo.ui.verbose: # silence internal shuffling chatter repo.ui.pushbuffer() repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) if not repo.ui.verbose: repo.ui.popbuffer() f.close() if not keeppartialbundle: os.unlink(chgrpfile) # remove undo files for undofile in repo.undofiles(): try: os.unlink(undofile) except OSError, e: if e.errno != errno.ENOENT: ui.warn(_('error removing %s: %s\n') % (undofile, str(e))) for m in updatebm: bm[m] = repo[newbmtarget].node() bm.write()
repo.mq.checkapplied = saved tempopts = {} tempopts['message'] = "pending changes temporary commit" tempopts['addremove'] = True oldquiet = ui.quiet try: ui.quiet = True node = cmdutil.commit(ui, repo, commitfunc, None, tempopts) finally: ui.quiet = oldquiet tmpwctx = repo[node] try: fp = shelvedfile(repo, basename, 'hg').opener() gen = changegroup.readbundle(fp, fp.name) repo.addchangegroup(gen, 'unshelve', 'bundle:' + fp.name) nodes = [ctx.node() for ctx in repo.set('%d:', oldtiprev)] phases.retractboundary(repo, phases.secret, nodes) finally: fp.close() shelvectx = repo['tip'] # If the shelve is not immediately on top of the commit # we'll be merging with, rebase it to be on top. if tmpwctx.node() != shelvectx.parents()[0].node(): try: rebase.rebase(ui, repo, **{ 'rev' : [shelvectx.rev()], 'dest' : str(tmpwctx.rev()),
def clone(self, remote, heads=[], stream=False): supported = True if not remote.capable('bundles'): supported = False self.ui.debug(_('bundle clone not supported\n')) elif heads: supported = False self.ui.debug(_('cannot perform bundle clone if heads requested\n')) elif stream: supported = False self.ui.debug(_('ignoring bundle clone because stream was ' 'requested\n')) if not supported: return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) result = remote._call('bundles') if not result: self.ui.note(_('no bundles available; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) pyver = sys.version_info pyver = (pyver[0], pyver[1], pyver[2]) # Testing backdoor. if ui.config('bundleclone', 'fakepyver'): pyver = ui.configlist('bundleclone', 'fakepyver') pyver = tuple(int(v) for v in pyver) entries = [] snifiltered = False for line in result.splitlines(): fields = line.split() url = fields[0] attrs = {} for rawattr in fields[1:]: key, value = rawattr.split('=', 1) attrs[urllib.unquote(key)] = urllib.unquote(value) # Filter out SNI entries if we don't support SNI. if attrs.get('requiresni') == 'true' and pyver < (2, 7, 9): # Take this opportunity to inform people they are using an # old, insecure Python. if not snifiltered: self.ui.warn(_('(ignoring URL on server that requires ' 'SNI)\n')) self.ui.warn(_('(your Python is older than 2.7.9 and ' 'does not support modern and secure ' 'SSL/TLS; please consider upgrading ' 'your Python to a secure version)\n')) snifiltered = True continue entries.append((url, attrs)) if not entries: # Don't fall back to normal clone because we don't want mass # fallback in the wild to barage servers expecting bundle # offload. raise util.Abort(_('no appropriate bundles available'), hint=_('you may wish to complain to the ' 'server operator')) # The configuration is allowed to define lists of preferred # attributes and values. If this is present, sort results according # to that preference. Otherwise, use manifest order and select the # first entry. prefers = self.ui.configlist('bundleclone', 'prefers', default=[]) if prefers: prefers = [p.split('=', 1) for p in prefers] def compareentry(a, b): aattrs = a[1] battrs = b[1] # Itereate over local preferences. for pkey, pvalue in prefers: avalue = aattrs.get(pkey) bvalue = battrs.get(pkey) # Special case for b is missing attribute and a matches # exactly. if avalue is not None and bvalue is None and avalue == pvalue: return -1 # Special case for a missing attribute and b matches # exactly. if bvalue is not None and avalue is None and bvalue == pvalue: return 1 # We can't compare unless the attribute is defined on # both entries. if avalue is None or bvalue is None: continue # Same values should fall back to next attribute. if avalue == bvalue: continue # Exact matches come first. if avalue == pvalue: return -1 if bvalue == pvalue: return 1 # Fall back to next attribute. continue # Entries could not be sorted based on attributes. This # says they are equal, which will fall back to index order, # which is what we want. return 0 entries = sorted(entries, cmp=compareentry) url, attrs = entries[0] if not url: self.ui.note(_('invalid bundle manifest; using normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream) self.ui.status(_('downloading bundle %s\n' % url)) try: fh = hgurl.open(self.ui, url) # Stream clone data is not changegroup data. Handle it # specially. if 'stream' in attrs: reqs = set(attrs['stream'].split(',')) applystreamclone(self, reqs, fh) else: if exchange: cg = exchange.readbundle(self.ui, fh, 'stream') else: cg = changegroup.readbundle(fh, 'stream') if hasattr(changegroup, 'addchangegroup'): changegroup.addchangegroup(self, cg, 'bundleclone', url) else: self.addchangegroup(cg, 'bundleclone', url) self.ui.status(_('finishing applying bundle; pulling\n')) # Maintain compatibility with Mercurial 2.x. if exchange: return exchange.pull(self, remote, heads=heads) else: return self.pull(remote, heads=heads) except (urllib2.HTTPError, urllib2.URLError) as e: if isinstance(e, urllib2.HTTPError): msg = _('HTTP error fetching bundle: %s') % str(e) else: msg = _('error fetching bundle: %s') % e.reason # Don't fall back to regular clone unless explicitly told to. if not self.ui.configbool('bundleclone', 'fallbackonerror', False): raise util.Abort(msg, hint=_('consider contacting the ' 'server operator if this error persists')) self.ui.warn(msg + '\n') self.ui.warn(_('falling back to normal clone\n')) return super(bundleclonerepo, self).clone(remote, heads=heads, stream=stream)
def strip(ui, repo, nodelist, backup="all"): cl = repo.changelog # TODO delete the undo files, and handle undo of merge sets if isinstance(nodelist, str): nodelist = [nodelist] striplist = [cl.rev(node) for node in nodelist] striprev = min(striplist) keeppartialbundle = backup == 'strip' # Some revisions with rev > striprev may not be descendants of striprev. # We have to find these revisions and put them in a bundle, so that # we can restore them after the truncations. # To create the bundle we use repo.changegroupsubset which requires # the list of heads and bases of the set of interesting revisions. # (head = revision in the set that has no descendant in the set; # base = revision in the set that has no ancestor in the set) tostrip = set(striplist) for rev in striplist: for desc in cl.descendants(rev): tostrip.add(desc) files = _collectfiles(repo, striprev) saverevs = _collectbrokencsets(repo, files, striprev) # compute heads saveheads = set(saverevs) for r in xrange(striprev + 1, len(cl)): if r not in tostrip: saverevs.add(r) saveheads.difference_update(cl.parentrevs(r)) saveheads.add(r) saveheads = [cl.node(r) for r in saveheads] # compute base nodes if saverevs: descendants = set(cl.descendants(*saverevs)) saverevs.difference_update(descendants) savebases = [cl.node(r) for r in saverevs] stripbases = [cl.node(r) for r in tostrip] bm = repo._bookmarks updatebm = [] for m in bm: rev = repo[bm[m]].rev() if rev in tostrip: updatebm.append(m) # create a changegroup for all the branches we need to keep backupfile = None if backup == "all": backupfile = _bundle(repo, stripbases, cl.heads(), node, 'backup') repo.ui.status(_("saved backup bundle to %s\n") % backupfile) if saveheads or savebases: # do not compress partial bundle if we remove it from disk later chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', compress=keeppartialbundle) mfst = repo.manifest tr = repo.transaction("strip") offset = len(tr.entries) try: tr.startgroup() cl.strip(striprev, tr) mfst.strip(striprev, tr) for fn in files: repo.file(fn).strip(striprev, tr) tr.endgroup() try: for i in xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] repo.sopener(file, 'a').truncate(troffset) tr.close() except: tr.abort() raise if saveheads or savebases: ui.note(_("adding branch\n")) f = open(chgrpfile, "rb") gen = changegroup.readbundle(f, chgrpfile) if not repo.ui.verbose: # silence internal shuffling chatter repo.ui.pushbuffer() repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) if not repo.ui.verbose: repo.ui.popbuffer() f.close() if not keeppartialbundle: os.unlink(chgrpfile) for m in updatebm: bm[m] = repo['.'].node() bookmarks.write(repo) except: if backupfile: ui.warn(_("strip failed, full bundle stored in '%s'\n") % backupfile) elif saveheads: ui.warn(_("strip failed, partial bundle stored in '%s'\n") % chgrpfile) raise repo.destroyed() # remove potential unknown phase # XXX using to_strip data would be faster phases.filterunknown(repo)