def commonheadspartgen(pushop, bundler): if rebaseparttype not in bundle2.bundle2caps(pushop.remote): # Server doesn't support pushrebase, so just fallback to normal push. return bundler.newpart(commonheadsparttype, data=''.join(pushop.outgoing.commonheads))
def partgen(pushop, bundler): bookmark = pushop.ui.config(experimental, configbookmark) scratchpush = pushop.ui.configbool(experimental, configscratchpush) if 'changesets' in pushop.stepsdone or not scratchpush: return if scratchbranchparttype not in bundle2.bundle2caps(pushop.remote): return pushop.stepsdone.add('changesets') if not pushop.outgoing.missing: pushop.ui.status(_('no changes found\n')) pushop.cgresult = 0 return # This parameter tells the server that the following bundle is an # infinitepush. This let's it switch the part processing to our infinitepush # code path. bundler.addparam("infinitepush", "True") scratchparts = bundleparts.getscratchbranchparts(pushop.repo, pushop.remote, pushop.outgoing, pushop.ui, bookmark) for scratchpart in scratchparts: bundler.addpart(scratchpart) def handlereply(op): # server either succeeds or aborts; no code to read pushop.cgresult = 1 return handlereply
def getscratchbranchparts(repo, peer, outgoing, ui, bookmark): if not outgoing.missing: raise error.Abort(_(b'no commits to push')) if scratchbranchparttype not in bundle2.bundle2caps(peer): raise error.Abort( _(b'no server support for %r') % scratchbranchparttype ) _validaterevset( repo, revsetlang.formatspec(b'%ln', outgoing.missing), bookmark ) supportedversions = changegroup.supportedoutgoingversions(repo) # Explicitly avoid using '01' changegroup version in infinitepush to # support general delta supportedversions.discard(b'01') cgversion = min(supportedversions) _handlelfs(repo, outgoing.missing) cg = changegroup.makestream(repo, outgoing, cgversion, b'push') params = {} params[b'cgversion'] = cgversion if bookmark: params[b'bookmark'] = bookmark # 'prevbooknode' is necessary for pushkey reply part params[b'bookprevnode'] = b'' bookmarks = repo._bookmarks if bookmark in bookmarks: params[b'bookprevnode'] = hex(bookmarks[bookmark]) # Do not send pushback bundle2 part with bookmarks if remotenames extension # is enabled. It will be handled manually in `_push()` if not isremotebooksenabled(ui): params[b'pushbackbookmarks'] = b'1' parts = [] # .upper() marks this as a mandatory part: server will abort if there's no # handler parts.append( bundle2.bundlepart( scratchbranchparttype.upper(), advisoryparams=pycompat.iteritems(params), data=cg, ) ) return parts
def rebasepartgen(pushop, bundler): onto = pushop.ui.config(experimental, configonto) if 'changesets' in pushop.stepsdone or not onto: return if (rebaseparttype not in bundle2.bundle2caps(pushop.remote) and checkremotenames()): # Server doesn't support pushrebase, but --to is valid in remotenames as # well, so just let it through. return pushop.stepsdone.add('changesets') if not pushop.outgoing.missing: # It's important that this text match the text found in upstream # Mercurial, since some tools rely on this string to know if a push # succeeded despite not pushing commits. pushop.ui.status(_('no changes found\n')) pushop.cgresult = 0 return # Force push means no rebasing, so let's just take the existing parent. if pushop.force: onto = donotrebasemarker rebaseparts = getrebaseparts(pushop.repo, pushop.remote, pushop.outgoing, onto, pushop.newbranch) for part in rebaseparts: bundler.addpart(part) # Tell the server which manifests to load before taking the lock. # This helps shorten the duration of the lock, which increases our potential # commit rate. missing = pushop.outgoing.missing roots = pushop.repo.set('parents(%ln) - %ln', missing, missing) preloadnodes = [hex(r.manifestnode()) for r in roots] bundler.addparam("preloadmanifests", ','.join(preloadnodes)) def handlereply(op): # server either succeeds or aborts; no code to read pushop.cgresult = 1 return handlereply
def getrebasepart(repo, peer, outgoing, onto, newhead=False): if not outgoing.missing: raise util.Abort(_('no commits to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise util.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revset.formatspec('%ln', outgoing.missing)) cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing) # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart(rebaseparttype.upper(), mandatoryparams={'onto': onto, 'newhead': repr(newhead), }.items(), data = cg)
def createrebasepart(repo, peer, outgoing, onto, newhead): if not outgoing.missing: raise error.Abort(_('no changesets to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise error.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revsetlang.formatspec('%ln', outgoing.missing)) cg = changegroup.makestream(repo, outgoing, '01', 'push') # Explicitly notify the server what obsmarker versions the client supports # so the client could receive marker from the server. # # The core mercurial logic will do the right thing (enable obsmarker # capabilities in the pushback bundle) if obsmarker exchange is enabled # client-side. # # But we want the marker without enabling marker exchange, and our server # could reply a marker without exchange or even obsstore enabled. So we # bypass the "standard" way of capabilities check by sending the supported # versions directly in our own part. Note: do not enable "exchange" because # it has an unwanted side effect: pushing markers from client to server. # # "createmarkers" is all we need to be able to write a new marker. if obsolete.isenabled(repo, obsolete.createmarkersopt): obsmarkerversions = '\0'.join(str(v) for v in obsolete.formats) else: obsmarkerversions = '' # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart( rebaseparttype.upper(), mandatoryparams={ 'onto': onto, 'newhead': repr(newhead), }.items(), advisoryparams={ # advisory: (old) server could ignore this without error 'obsmarkerversions': obsmarkerversions, }.items(), data = cg)
def getrebasepart(repo, peer, outgoing, onto, newhead): if not outgoing.missing: raise error.Abort(_('no commits to rebase')) if rebaseparttype not in bundle2.bundle2caps(peer): raise error.Abort(_('no server support for %r') % rebaseparttype) validaterevset(repo, revset.formatspec('%ln', outgoing.missing)) cg = changegroup.getlocalchangegroupraw(repo, 'push', outgoing) # .upper() marks this as a mandatory part: server will abort if there's no # handler return bundle2.bundlepart( rebaseparttype.upper(), mandatoryparams={ 'onto': onto, 'newhead': repr(newhead), }.items(), data = cg)
def push(repo, store, what, repo_heads, repo_branches, dry_run=False): def heads(): for sha1 in store.heads(repo_branches): yield '^%s' % store.changeset_ref(sha1) def local_bases(): h = chain(heads(), (w for w in what if w)) for c, t, p in GitHgHelper.rev_list('--topo-order', '--full-history', '--boundary', *h): if c[0] != '-': continue yield store.hg_changeset(c[1:]) for w in what: rev = store.hg_changeset(w) if rev: yield rev common = findcommon(repo, store, set(local_bases())) logging.info('common: %s', common) def revs(): for sha1 in common: yield '^%s' % store.changeset_ref(sha1) revs = chain(revs(), (w for w in what if w)) push_commits = list((c, p) for c, t, p in GitHgHelper.rev_list( '--topo-order', '--full-history', '--parents', '--reverse', *revs)) pushed = False if push_commits: has_root = any(len(p) == 40 for p in push_commits) force = all(v[1] for v in what.values()) if has_root and repo_heads: if not force: raise Exception('Cannot push a new root') else: logging.warn('Pushing a new root') if force: repo_heads = ['force'] else: if not repo_heads: repo_heads = [NULL_NODE_ID] repo_heads = [unhexlify(h) for h in repo_heads] if push_commits and not dry_run: if repo.local(): repo.local().ui.setconfig('server', 'validate', True) b2caps = bundle2caps(repo) if unbundle20 else {} logging.getLogger('bundle2').debug('%r', b2caps) if b2caps: b2caps['replycaps'] = encodecaps({'error': ['abort']}) cg = create_bundle(store, push_commits, b2caps) if not isinstance(repo, HelperRepo): cg = util.chunkbuffer(cg) if not b2caps: cg = cg1unpacker(cg, 'UN') reply = repo.unbundle(cg, repo_heads, '') if unbundle20 and isinstance(reply, unbundle20): parts = iter(reply.iterparts()) for part in parts: logging.getLogger('bundle2').debug('part: %s', part.type) logging.getLogger('bundle2').debug('params: %r', part.params) if part.type == 'output': sys.stderr.write(part.read()) elif part.type == 'reply:changegroup': # TODO: should check params['in-reply-to'] reply = int(part.params['return']) elif part.type == 'error:abort': raise error.Abort(part.params['message'], hint=part.params.get('hint')) else: logging.getLogger('bundle2').warning( 'ignoring bundle2 part: %s', part.type) pushed = reply != 0 return gitdag(push_commits) if pushed or dry_run else ()
def push(repo, store, what, repo_heads, repo_branches): store.init_fast_import() def heads(): for sha1 in store.heads(repo_branches): yield '^%s' % store.changeset_ref(sha1) def local_bases(): for c in Git.iter('rev-list', '--stdin', '--topo-order', '--full-history', '--boundary', *(w for w in what if w), stdin=heads()): if c[0] != '-': continue yield store.hg_changeset(c[1:]) for w in what: rev = store.hg_changeset(w) if rev: yield rev common = findcommon(repo, store, set(local_bases())) logging.info('common: %s' % common) def revs(): for sha1 in common: yield '^%s' % store.changeset_ref(sha1) push_commits = list(Git.iter('rev-list', '--stdin', '--topo-order', '--full-history', '--parents', '--reverse', *(w for w in what if w), stdin=revs())) pushed = False if push_commits: has_root = any(len(p) == 40 for p in push_commits) force = all(v[1] for v in what.values()) if has_root and repo_heads: if not force: raise Exception('Cannot push a new root') else: logging.warn('Pushing a new root') if force: repo_heads = ['force'] else: if not repo_heads: repo_heads = [NULL_NODE_ID] repo_heads = [unhexlify(h) for h in repo_heads] if repo.local(): repo.local().ui.setconfig('server', 'validate', True) b2caps = bundle2caps(repo) if unbundle20 else {} if b2caps and (repo.url().startswith(('http://', 'https://')) or not isinstance(repo, HelperRepo)): b2caps['replycaps'] = True cg = create_bundle(store, push_commits, b2caps) if not isinstance(repo, HelperRepo): cg = util.chunkbuffer(cg) if not b2caps: cg = cg1unpacker(cg, 'UN') reply = repo.unbundle(cg, repo_heads, '') if unbundle20 and isinstance(reply, unbundle20): parts = iter(reply.iterparts()) for part in parts: if part.type == 'output': sys.stderr.write(part.read()) elif part.type == 'reply:changegroup': # TODO: should check params['in-reply-to'] reply = int(part.params['return']) else: logging.getLogger('bundle2').warning( 'ignoring bundle2 part: %s', part.type) pushed = reply != 0 return gitdag(push_commits) if pushed else ()