def _generateoutputparts(head, bundlerepo, bundleroots, bundlefile): '''generates bundle that will be send to the user returns tuple with raw bundle string and bundle type ''' parts = [] if not _needsrebundling(head, bundlerepo): with util.posixfile(bundlefile, "rb") as f: unbundler = exchange.readbundle(bundlerepo.ui, f, bundlefile) if isinstance(unbundler, changegroup.cg1unpacker): part = bundle2.bundlepart('changegroup', data=unbundler._stream.read()) part.addparam('version', '01') parts.append(part) elif isinstance(unbundler, bundle2.unbundle20): haschangegroup = False for part in unbundler.iterparts(): if part.type == 'changegroup': haschangegroup = True newpart = bundle2.bundlepart(part.type, data=part.read()) for key, value in part.params.iteritems(): newpart.addparam(key, value) parts.append(newpart) if not haschangegroup: raise error.Abort( 'unexpected bundle without changegroup part, ' + 'head: %s' % hex(head), hint='report to administrator') else: raise error.Abort('unknown bundle type') else: parts = _rebundle(bundlerepo, bundleroots, head) return parts
def bundle2scratchbranch(op, part): '''unbundle a bundle2 part containing a changegroup to store''' bundler = bundle2.bundle20(op.repo.ui) cgversion = part.params.get('cgversion', '01') cgpart = bundle2.bundlepart('changegroup', data=part.read()) cgpart.addparam('version', cgversion) bundler.addpart(cgpart) buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = tempfile.mkstemp() try: try: fp = os.fdopen(fd, r'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, part.params, bundlefile) finally: try: os.unlink(bundlefile) except OSError as e: if e.errno != errno.ENOENT: raise return 1
def narrow_widen(repo, proto, oldincludes, oldexcludes, newincludes, newexcludes, commonheads, cgversion, known, ellipses): """wireprotocol command to send data when a narrow clone is widen. We will be sending a changegroup here. The current set of arguments which are required: oldincludes: the old includes of the narrow copy oldexcludes: the old excludes of the narrow copy newincludes: the new includes of the narrow copy newexcludes: the new excludes of the narrow copy commonheads: list of heads which are common between the server and client cgversion(maybe): the changegroup version to produce known: list of nodes which are known on the client (used in ellipses cases) ellipses: whether to send ellipses data or not """ preferuncompressed = False try: oldincludes = wireprototypes.decodelist(oldincludes) newincludes = wireprototypes.decodelist(newincludes) oldexcludes = wireprototypes.decodelist(oldexcludes) newexcludes = wireprototypes.decodelist(newexcludes) # validate the patterns narrowspec.validatepatterns(set(oldincludes)) narrowspec.validatepatterns(set(newincludes)) narrowspec.validatepatterns(set(oldexcludes)) narrowspec.validatepatterns(set(newexcludes)) common = wireprototypes.decodelist(commonheads) known = None if known: known = wireprototypes.decodelist(known) if ellipses == '0': ellipses = False else: ellipses = bool(ellipses) cgversion = cgversion newmatch = narrowspec.match(repo.root, include=newincludes, exclude=newexcludes) oldmatch = narrowspec.match(repo.root, include=oldincludes, exclude=oldexcludes) bundler = bundle2.widen_bundle(repo, oldmatch, newmatch, common, known, cgversion, ellipses) except error.Abort as exc: bundler = bundle2.bundle20(repo.ui) manargs = [('message', pycompat.bytestr(exc))] advargs = [] if exc.hint is not None: advargs.append(('hint', exc.hint)) bundler.addpart(bundle2.bundlepart('error:abort', manargs, advargs)) preferuncompressed = True chunks = bundler.getchunks() return wireprototypes.streamres(gen=chunks, prefer_uncompressed=preferuncompressed)
def storetobundlestore(orig, repo, op, unbundler): """stores the incoming bundle coming from push command to the bundlestore instead of applying on the revlogs""" repo.ui.status(_(b"storing changesets on the bundlestore\n")) bundler = bundle2.bundle20(repo.ui) # processing each part and storing it in bundler with bundle2.partiterator(repo, op, unbundler) as parts: for part in parts: bundlepart = None if part.type == b'replycaps': # This configures the current operation to allow reply parts. bundle2._processpart(op, part) else: bundlepart = bundle2.bundlepart(part.type, data=part.read()) for key, value in pycompat.iteritems(part.params): bundlepart.addparam(key, value) # Certain parts require a response if part.type in (b'pushkey', b'changegroup'): if op.reply is not None: rpart = op.reply.newpart(b'reply:%s' % part.type) rpart.addparam( b'in-reply-to', b'%d' % part.id, mandatory=False ) rpart.addparam(b'return', b'1', mandatory=False) op.records.add( part.type, { b'return': 1, }, ) if bundlepart: bundler.addpart(bundlepart) # storing the bundle in the bundlestore buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = pycompat.mkstemp() try: try: fp = os.fdopen(fd, 'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, {}, bundlefile) finally: try: os.unlink(bundlefile) except Exception: # we would rather see the original exception pass
def getscratchbranchparts(repo, peer, outgoing, ui, bookmark): if not outgoing.missing: raise error.Abort(_(b'no commits to push')) if scratchbranchparttype not in bundle2.bundle2caps(peer): raise error.Abort( _(b'no server support for %r') % scratchbranchparttype ) _validaterevset( repo, revsetlang.formatspec(b'%ln', outgoing.missing), bookmark ) supportedversions = changegroup.supportedoutgoingversions(repo) # Explicitly avoid using '01' changegroup version in infinitepush to # support general delta supportedversions.discard(b'01') cgversion = min(supportedversions) _handlelfs(repo, outgoing.missing) cg = changegroup.makestream(repo, outgoing, cgversion, b'push') params = {} params[b'cgversion'] = cgversion if bookmark: params[b'bookmark'] = bookmark # 'prevbooknode' is necessary for pushkey reply part params[b'bookprevnode'] = b'' bookmarks = repo._bookmarks if bookmark in bookmarks: params[b'bookprevnode'] = hex(bookmarks[bookmark]) # Do not send pushback bundle2 part with bookmarks if remotenames extension # is enabled. It will be handled manually in `_push()` if not isremotebooksenabled(ui): params[b'pushbackbookmarks'] = b'1' parts = [] # .upper() marks this as a mandatory part: server will abort if there's no # handler parts.append( bundle2.bundlepart( scratchbranchparttype.upper(), advisoryparams=pycompat.iteritems(params), data=cg, ) ) return parts
def getrebasepart(repo, peer, outgoing, onto, newhead=False): if not outgoing.missing: raise util.Abort(_('no commits to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise util.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revset.formatspec('%ln', outgoing.missing)) cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing) # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart(rebaseparttype.upper(), mandatoryparams={'onto': onto, 'newhead': repr(newhead), }.items(), data = cg)
def _rebundle(bundlerepo, bundleroots, unknownhead): ''' Bundle may include more revision then user requested. For example, if user asks for revision but bundle also consists its descendants. This function will filter out all revision that user is not requested. ''' parts = [] version = '02' outgoing = discovery.outgoing(bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]) cgstream = changegroup.makestream(bundlerepo, outgoing, version, 'pull') cgstream = util.chunkbuffer(cgstream).read() cgpart = bundle2.bundlepart('changegroup', data=cgstream) cgpart.addparam('version', version) parts.append(cgpart) return parts
def createrebasepart(repo, peer, outgoing, onto, newhead): if not outgoing.missing: raise error.Abort(_('no changesets to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise error.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing)) cg = changegroup.makestream(repo, outgoing, '01', 'push') # Explicitly notify the server what obsmarker versions the client supports # so the client could receive marker from the server. # # The core mercurial logic will do the right thing (enable obsmarker # capabilities in the pushback bundle) if obsmarker exchange is enabled # client-side. # # But we want the marker without enabling marker exchange, and our server # could reply a marker without exchange or even obsstore enabled. So we # bypass the "standard" way of capabilities check by sending the supported # versions directly in our own part. Note: do not enable "exchange" because # it has an unwanted side effect: pushing markers from client to server. # # "createmarkers" is all we need to be able to write a new marker. if obsolete.isenabled(repo, obsolete.createmarkersopt): obsmarkerversions = '\0'.join(str(v) for v in obsolete.formats) else: obsmarkerversions = '' # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart( rebaseparttype.upper(), mandatoryparams={ 'onto': onto, 'newhead': repr(newhead), }.items(), advisoryparams={ # advisory: (old) server could ignore this without error 'obsmarkerversions': obsmarkerversions, }.items(), data = cg)
def getrebasepart(repo, peer, outgoing, onto, newhead): if not outgoing.missing: raise error.Abort(_('no commits to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise error.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revset.formatspec('%ln', outgoing.missing)) cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing) # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart( rebaseparttype.upper(), mandatoryparams={ 'onto': onto, 'newhead': repr(newhead), }.items(), data = cg)
def _getbundlegitmetapart(bundler, repo, source, bundlecaps=None, **kwargs): '''send git metadata via bundle2''' if 'fb_gitmeta' in bundlecaps: filestooverwrite = gitmetafiles # Exclude the git-hg map file if the config indicates that the server # should only be serving the missing map data. _getbundle2partsgenerator # will serve the missing map data in this case. if repo.ui.configbool('gitlookup', 'onlymapdelta', False): filestooverwrite = filestooverwrite - set([gitmapfile]) for fname in sorted(filestooverwrite): f = _getfile(repo, fname) if not f: continue part = bundle2.bundlepart('b2x:fb:gitmeta', [('filename', fname)], data=f.read()) bundler.addpart(part)
def _getbundlegithgmappart(bundler, repo, source, bundlecaps=None, **kwargs): '''send missing git to hg map data via bundle2''' if 'fb_gitmeta' in bundlecaps: # Do nothing if the config indicates serving the complete git-hg map # file. _getbundlegitmetapart will handle serving the complete file in # this case. if not repo.ui.configbool('gitlookup', 'onlymapdelta', False): return mapfile = _getfile(repo, gitmapfile) if not mapfile: return commonheads = kwargs['common'] # If there are missing heads, we will sync everything. if _isheadmissing(repo, commonheads): commonheads = [] needfullsync = (len(commonheads) == 0) heads = repo.heads() newheads = set(hex(head) for head in heads) missingcommits = repo.changelog.findmissing(commonheads, heads) missinghashes = set(hex(commit) for commit in missingcommits) missinglines = _getmissinglines(mapfile, missinghashes) payload = _githgmappayload(needfullsync, newheads, missinglines) serializedpayload = payload.tojson() part = bundle2.bundlepart( 'b2x:fb:gitmeta:githgmap', [('filename', gitmapfile)], data = serializedpayload ) bundler.addpart(part)
def processparts(orig, repo, op, unbundler): # make sure we don't wrap processparts in case of `hg unbundle` if op.source == 'unbundle': return orig(repo, op, unbundler) # this server routes each push to bundle store if repo.ui.configbool('infinitepush', 'pushtobundlestore'): return storetobundlestore(orig, repo, op, unbundler) if unbundler.params.get('infinitepush') != 'True': return orig(repo, op, unbundler) handleallparts = repo.ui.configbool('infinitepush', 'storeallparts') bundler = bundle2.bundle20(repo.ui) cgparams = None with bundle2.partiterator(repo, op, unbundler) as parts: for part in parts: bundlepart = None if part.type == 'replycaps': # This configures the current operation to allow reply parts. bundle2._processpart(op, part) elif part.type == bundleparts.scratchbranchparttype: # Scratch branch parts need to be converted to normal # changegroup parts, and the extra parameters stored for later # when we upload to the store. Eventually those parameters will # be put on the actual bundle instead of this part, then we can # send a vanilla changegroup instead of the scratchbranch part. cgversion = part.params.get('cgversion', '01') bundlepart = bundle2.bundlepart('changegroup', data=part.read()) bundlepart.addparam('version', cgversion) cgparams = part.params # If we're not dumping all parts into the new bundle, we need to # alert the future pushkey and phase-heads handler to skip # the part. if not handleallparts: op.records.add(scratchbranchparttype + '_skippushkey', True) op.records.add(scratchbranchparttype + '_skipphaseheads', True) else: if handleallparts: # Ideally we would not process any parts, and instead just # forward them to the bundle for storage, but since this # differs from previous behavior, we need to put it behind a # config flag for incremental rollout. bundlepart = bundle2.bundlepart(part.type, data=part.read()) for key, value in part.params.iteritems(): bundlepart.addparam(key, value) # Certain parts require a response if part.type == 'pushkey': if op.reply is not None: rpart = op.reply.newpart('reply:pushkey') rpart.addparam('in-reply-to', str(part.id), mandatory=False) rpart.addparam('return', '1', mandatory=False) else: bundle2._processpart(op, part) if handleallparts: op.records.add(part.type, { 'return': 1, }) if bundlepart: bundler.addpart(bundlepart) # If commits were sent, store them if cgparams: buf = util.chunkbuffer(bundler.getchunks()) fd, bundlefile = tempfile.mkstemp() try: try: fp = os.fdopen(fd, r'wb') fp.write(buf.read()) finally: fp.close() storebundle(op, cgparams, bundlefile) finally: try: os.unlink(bundlefile) except Exception: # we would rather see the original exception pass