Esempio n. 1
0
    def createindex(self, nodelocations, indexoffset):
        fileindexformat = self.INDEXFORMAT
        fileindexlength = self.INDEXENTRYLENGTH
        nodeindexformat = self.NODEINDEXFORMAT
        nodeindexlength = self.NODEINDEXENTRYLENGTH

        files = (
            (hashutil.sha1(filename).digest(), filename, offset, size)
            for filename, (offset, size) in pycompat.iteritems(self.files)
        )
        files = sorted(files)

        # node index is after file index size, file index, and node index size
        indexlensize = struct.calcsize(b'!Q')
        nodeindexoffset = (
            indexoffset
            + indexlensize
            + (len(files) * fileindexlength)
            + indexlensize
        )

        fileindexentries = []
        nodeindexentries = []
        nodecount = 0
        for namehash, filename, offset, size in files:
            # File section index
            nodelocations = self.entrylocations[filename]

            nodeindexsize = len(nodelocations) * nodeindexlength

            rawentry = struct.pack(
                fileindexformat,
                namehash,
                offset,
                size,
                nodeindexoffset,
                nodeindexsize,
            )
            # Node index
            nodeindexentries.append(
                struct.pack(constants.FILENAMESTRUCT, len(filename)) + filename
            )
            nodeindexoffset += constants.FILENAMESIZE + len(filename)

            for node, location in sorted(pycompat.iteritems(nodelocations)):
                nodeindexentries.append(
                    struct.pack(nodeindexformat, node, location)
                )
                nodecount += 1

            nodeindexoffset += len(nodelocations) * nodeindexlength

            fileindexentries.append(rawentry)

        nodecountraw = struct.pack(b'!Q', nodecount)
        return (
            b''.join(fileindexentries)
            + nodecountraw
            + b''.join(nodeindexentries)
        )
Esempio n. 2
0
def keepset(repo, keyfn, lastkeepkeys=None):
    """Computes a keepset which is not garbage collected.
    'keyfn' is a function that maps filename, node to a unique key.
    'lastkeepkeys' is an optional argument and if provided the keepset
    function updates lastkeepkeys with more keys and returns the result.
    """
    if not lastkeepkeys:
        keepkeys = set()
    else:
        keepkeys = lastkeepkeys

    # We want to keep:
    # 1. Working copy parent
    # 2. Draft commits
    # 3. Parents of draft commits
    # 4. Pullprefetch and bgprefetchrevs revsets if specified
    revs = [b'.', b'draft()', b'parents(draft())']
    prefetchrevs = repo.ui.config(b'remotefilelog', b'pullprefetch', None)
    if prefetchrevs:
        revs.append(b'(%s)' % prefetchrevs)
    prefetchrevs = repo.ui.config(b'remotefilelog', b'bgprefetchrevs', None)
    if prefetchrevs:
        revs.append(b'(%s)' % prefetchrevs)
    revs = b'+'.join(revs)

    revs = [b'sort((%s), "topo")' % revs]
    keep = scmutil.revrange(repo, revs)

    processed = set()
    lastmanifest = None

    # process the commits in toposorted order starting from the oldest
    for r in reversed(keep._list):
        if repo[r].p1().rev() in processed:
            # if the direct parent has already been processed
            # then we only need to process the delta
            m = repo[r].manifestctx().readdelta()
        else:
            # otherwise take the manifest and diff it
            # with the previous manifest if one exists
            if lastmanifest:
                m = repo[r].manifest().diff(lastmanifest)
            else:
                m = repo[r].manifest()
        lastmanifest = repo[r].manifest()
        processed.add(r)

        # populate keepkeys with keys from the current manifest
        if type(m) is dict:
            # m is a result of diff of two manifests and is a dictionary that
            # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple
            for filename, diff in pycompat.iteritems(m):
                if diff[0][0] is not None:
                    keepkeys.add(keyfn(filename, diff[0][0]))
        else:
            # m is a manifest object
            for filename, filenode in pycompat.iteritems(m):
                keepkeys.add(keyfn(filename, filenode))

    return keepkeys
Esempio n. 3
0
def writetostore(self, text, sidedata):
    # hg filelog metadata (includes rename, etc)
    hgmeta, offset = storageutil.parsemeta(text)
    if offset and offset > 0:
        # lfs blob does not contain hg filelog metadata
        text = text[offset:]

    # git-lfs only supports sha256
    oid = hex(hashlib.sha256(text).digest())
    self.opener.lfslocalblobstore.write(oid, text)

    # replace contents with metadata
    longoid = b'sha256:%s' % oid
    metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))

    # by default, we expect the content to be binary. however, LFS could also
    # be used for non-binary content. add a special entry for non-binary data.
    # this will be used by filectx.isbinary().
    if not stringutil.binary(text):
        # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
        metadata[b'x-is-binary'] = b'0'

    # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
    if hgmeta is not None:
        for k, v in pycompat.iteritems(hgmeta):
            metadata[b'x-hg-%s' % k] = v

    rawtext = metadata.serialize()
    return (rawtext, False)
Esempio n. 4
0
def parseoptions(ui, cmdoptions, args):
    cmdoptions = list(cmdoptions)
    opts = {}
    args = list(args)
    while True:
        try:
            args = fancyopts.fancyopts(list(args), cmdoptions, opts, True)
            break
        except getopt.GetoptError as ex:
            if "requires argument" in ex.msg:
                raise
            if ('--' + ex.opt) in ex.msg:
                flag = b'--' + pycompat.bytestr(ex.opt)
            elif ('-' + ex.opt) in ex.msg:
                flag = b'-' + pycompat.bytestr(ex.opt)
            else:
                raise error.Abort(
                    _(b"unknown option %s") % pycompat.bytestr(ex.opt))
            try:
                args.remove(flag)
            except Exception:
                msg = _(b"unknown option '%s' packed with other options")
                hint = _(b"please try passing the option as its own flag: -%s")
                raise error.Abort(
                    msg % pycompat.bytestr(ex.opt),
                    hint=hint % pycompat.bytestr(ex.opt),
                )

            ui.warn(_(b"ignoring unknown option %s\n") % flag)

    args = list([convert(x) for x in args])
    opts = dict([(k, convert(v)) if isinstance(v, bytes) else (k, v)
                 for k, v in pycompat.iteritems(opts)])

    return args, opts
 def nodetobmarks(self):
     if not self._nodetobmarks:
         bmarktonodes = self.bmarktonodes()
         self._nodetobmarks = {}
         for name, node in pycompat.iteritems(bmarktonodes):
             self._nodetobmarks.setdefault(node[0], []).append(name)
     return self._nodetobmarks
def gcserver(ui, repo):
    if not repo.ui.configbool(b"remotefilelog", b"server"):
        return

    neededfiles = set()
    heads = repo.revs(b"heads(tip~25000:) - null")

    cachepath = repo.vfs.join(b"remotefilelogcache")
    for head in heads:
        mf = repo[head].manifest()
        for filename, filenode in pycompat.iteritems(mf):
            filecachepath = os.path.join(cachepath, filename, hex(filenode))
            neededfiles.add(filecachepath)

    # delete unneeded older files
    days = repo.ui.configint(b"remotefilelog", b"serverexpiration")
    expiration = time.time() - (days * 24 * 60 * 60)

    progress = ui.makeprogress(_(b"removing old server cache"), unit=b"files")
    progress.update(0)
    for root, dirs, files in os.walk(cachepath):
        for file in files:
            filepath = os.path.join(root, file)
            progress.increment()
            if filepath in neededfiles:
                continue

            stat = os.stat(filepath)
            if stat.st_mtime < expiration:
                os.remove(filepath)

    progress.complete()
Esempio n. 7
0
 def getchanges(self, rev, full):
     ctx = self._changectx(rev)
     parents = self._parents(ctx)
     if full or not parents:
         files = copyfiles = ctx.manifest()
     if parents:
         if self._changescache[0] == rev:
             ma, r = self._changescache[1]
         else:
             ma, r = self._changedfiles(parents[0], ctx)
         if not full:
             files = ma + r
         copyfiles = ma
     # _getcopies() is also run for roots and before filtering so missing
     # revlogs are detected early
     copies = self._getcopies(ctx, parents, copyfiles)
     cleanp2 = set()
     if len(parents) == 2:
         d = parents[1].manifest().diff(ctx.manifest(), clean=True)
         for f, value in pycompat.iteritems(d):
             if value is None:
                 cleanp2.add(f)
     changes = [(f, rev) for f in files if f not in self.ignored]
     changes.sort()
     return changes, copies, cleanp2
Esempio n. 8
0
    def _willbecomenoop(memworkingcopy, ctx, pctx=None):
        """({path: content}, ctx, ctx) -> bool. test if a commit will be noop

        if it will become an empty commit (does not change anything, after the
        memworkingcopy overrides), return True. otherwise return False.
        """
        if not pctx:
            parents = ctx.parents()
            if len(parents) != 1:
                return False
            pctx = parents[0]
        if ctx.branch() != pctx.branch():
            return False
        if ctx.extra().get(b'close'):
            return False
        # ctx changes more files (not a subset of memworkingcopy)
        if not set(ctx.files()).issubset(set(memworkingcopy)):
            return False
        for path, content in pycompat.iteritems(memworkingcopy):
            if path not in pctx or path not in ctx:
                return False
            fctx = ctx[path]
            pfctx = pctx[path]
            if pfctx.flags() != fctx.flags():
                return False
            if pfctx.data() != content:
                return False
        return True
Esempio n. 9
0
def overlaydiffcontext(ctx, chunks):
    """(ctx, [crecord.uihunk]) -> memctx

    return a memctx with some [1] patches (chunks) applied to ctx.
    [1]: modifications are handled. renames, mode changes, etc. are ignored.
    """
    # sadly the applying-patch logic is hardly reusable, and messy:
    # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it
    #    needs a file stream of a patch and will re-parse it, while we have
    #    structured hunk objects at hand.
    # 2. a lot of different implementations about "chunk" (patch.hunk,
    #    patch.recordhunk, crecord.uihunk)
    # as we only care about applying changes to modified files, no mode
    # change, no binary diff, and no renames, it's probably okay to
    # re-invent the logic using much simpler code here.
    memworkingcopy = {}  # {path: content}
    patchmap = defaultdict(lambda: [])  # {path: [(a1, a2, [bline])]}
    for path, info in map(_parsechunk, chunks):
        if not path or not info:
            continue
        patchmap[path].append(info)
    for path, patches in pycompat.iteritems(patchmap):
        if path not in ctx or not patches:
            continue
        patches.sort(reverse=True)
        lines = mdiff.splitnewlines(ctx[path].data())
        for a1, a2, blines in patches:
            lines[a1:a2] = blines
        memworkingcopy[path] = b''.join(lines)
    return overlaycontext(memworkingcopy, ctx)
Esempio n. 10
0
    def repackhistory(self, ledger, target):
        ui = self.repo.ui

        byfile = {}
        for entry in pycompat.itervalues(ledger.entries):
            if entry.historysource:
                byfile.setdefault(entry.filename, {})[entry.node] = entry

        progress = ui.makeprogress(
            _(b"repacking history"), unit=self.unit, total=len(byfile)
        )
        for filename, entries in sorted(pycompat.iteritems(byfile)):
            ancestors = {}
            nodes = list(node for node in entries)

            for node in nodes:
                if node in ancestors:
                    continue
                ancestors.update(
                    self.history.getancestors(filename, node, known=ancestors)
                )

            # Order the nodes children first
            orderednodes = reversed(self._toposort(ancestors))

            # Write to the pack
            dontprocess = set()
            for node in orderednodes:
                p1, p2, linknode, copyfrom = ancestors[node]

                # If the node is marked dontprocess, but it's also in the
                # explicit entries set, that means the node exists both in this
                # file and in another file that was copied to this file.
                # Usually this happens if the file was copied to another file,
                # then the copy was deleted, then reintroduced without copy
                # metadata. The original add and the new add have the same hash
                # since the content is identical and the parents are null.
                if node in dontprocess and node not in entries:
                    # If copyfrom == filename, it means the copy history
                    # went to come other file, then came back to this one, so we
                    # should continue processing it.
                    if p1 != self.repo.nullid and copyfrom != filename:
                        dontprocess.add(p1)
                    if p2 != self.repo.nullid:
                        dontprocess.add(p2)
                    continue

                if copyfrom:
                    dontprocess.add(p1)

                target.add(filename, node, p1, p2, linknode, copyfrom)

                if node in entries:
                    entries[node].historyrepacked = True

            progress.increment()

        progress.complete()
        target.close(ledger=ledger)
    def iteritems(self):
        """Iterate over (name, node) tuples"""

        if not self.loaded:
            self._load()

        for k, vtup in pycompat.iteritems(self.potentialentries):
            yield (k, [bin(vtup[0])])
 def nodetobranch(self):
     if not self._nodetobranch:
         branchtonodes = self.branchtonodes()
         self._nodetobranch = {}
         for name, nodes in pycompat.iteritems(branchtonodes):
             for node in nodes:
                 self._nodetobranch.setdefault(node, []).append(name)
     return self._nodetobranch
Esempio n. 13
0
File: fix.py Progetto: CJX32/my_blog
def fixfile(ui, repo, opts, fixers, fixctx, path, basectxs):
    """Run any configured fixers that should affect the file in this context

    Returns the file content that results from applying the fixers in some order
    starting with the file's content in the fixctx. Fixers that support line
    ranges will affect lines that have changed relative to any of the basectxs
    (i.e. they will only avoid lines that are common to all basectxs).

    A fixer tool's stdout will become the file's new content if and only if it
    exits with code zero. The fixer tool's working directory is the repository's
    root.
    """
    metadata = {}
    newdata = fixctx[path].data()
    for fixername, fixer in pycompat.iteritems(fixers):
        if fixer.affects(opts, fixctx, path):
            ranges = lineranges(opts, path, basectxs, fixctx, newdata)
            command = fixer.command(ui, path, ranges)
            if command is None:
                continue
            ui.debug(b'subprocess: %s\n' % (command, ))
            proc = subprocess.Popen(
                procutil.tonativestr(command),
                shell=True,
                cwd=procutil.tonativestr(repo.root),
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
            )
            stdout, stderr = proc.communicate(newdata)
            if stderr:
                showstderr(ui, fixctx.rev(), fixername, stderr)
            newerdata = stdout
            if fixer.shouldoutputmetadata():
                try:
                    metadatajson, newerdata = stdout.split(b'\0', 1)
                    metadata[fixername] = pycompat.json_loads(metadatajson)
                except ValueError:
                    ui.warn(
                        _(b'ignored invalid output from fixer tool: %s\n') %
                        (fixername, ))
                    continue
            else:
                metadata[fixername] = None
            if proc.returncode == 0:
                newdata = newerdata
            else:
                if not stderr:
                    message = _(b'exited with status %d\n') % (
                        proc.returncode, )
                    showstderr(ui, fixctx.rev(), fixername, message)
                checktoolfailureaction(
                    ui,
                    _(b'no fixes will be applied'),
                    hint=_(b'use --config fix.failure=continue to apply any '
                           b'successful fixes anyway'),
                )
    return metadata, newdata
Esempio n. 14
0
def _savelocalbookmarks(repo, bookmarks):
    if not bookmarks:
        return
    with repo.wlock(), repo.lock(), repo.transaction(b'bookmark') as tr:
        changes = []
        for scratchbook, node in pycompat.iteritems(bookmarks):
            changectx = repo[node]
            changes.append((scratchbook, changectx.node()))
        repo._bookmarks.applychanges(repo, tr, changes)
Esempio n. 15
0
 def _cleanupoldcommits(self):
     replacements = {
         k: ([v] if v is not None else [])
         for k, v in pycompat.iteritems(self.replacemap)
     }
     if replacements:
         scmutil.cleanupnodes(
             self.repo, replacements, operation=b'absorb', fixphase=True
         )
Esempio n. 16
0
 def gettags(self):
     bytetags = {}
     for branch in self._bzrbranches():
         if not branch.supports_tags():
             return {}
         tagdict = branch.tags.get_tag_dict()
         for name, rev in pycompat.iteritems(tagdict):
             bytetags[self.recode(name)] = rev
     return bytetags
Esempio n. 17
0
 def __init__(self, ui, repo=None):
     if repo:
         sections = util.sortdict(DEFAULT_SECTIONS)
         custom_sections = getcustomadmonitions(repo)
         if custom_sections:
             sections.update(custom_sections)
         self._sections = list(pycompat.iteritems(sections))
     else:
         self._sections = list(DEFAULT_SECTIONS)
 def nodetohoists(self, hoist):
     if not self._nodetohoists:
         marktonodes = self.bmarktonodes()
         self._nodetohoists = {}
         hoist += b'/'
         for name, node in pycompat.iteritems(marktonodes):
             if name.startswith(hoist):
                 name = name[len(hoist):]
                 self._nodetohoists.setdefault(node[0], []).append(name)
     return self._nodetohoists
 def hoisttonodes(self, hoist):
     if not self._hoisttonodes:
         marktonodes = self.bmarktonodes()
         self._hoisttonodes = {}
         hoist += b'/'
         for name, node in pycompat.iteritems(marktonodes):
             if name.startswith(hoist):
                 name = name[len(hoist):]
                 self._hoisttonodes[name] = node
     return self._hoisttonodes
Esempio n. 20
0
def recordbookmarks(orig, store, fp):
    """Records all bookmark changes in the journal."""
    repo = store._repo
    if util.safehasattr(repo, 'journal'):
        oldmarks = bookmarks.bmstore(repo)
        for mark, value in pycompat.iteritems(store):
            oldvalue = oldmarks.get(mark, repo.nullid)
            if value != oldvalue:
                repo.journal.record(bookmarktype, mark, oldvalue, value)
    return orig(store, fp)
Esempio n. 21
0
    def puttags(self, tags):
        tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
        tagparent = tagparent or self.repo.nullid

        oldlines = set()
        for branch, heads in pycompat.iteritems(self.repo.branchmap()):
            for h in heads:
                if b'.hgtags' in self.repo[h]:
                    oldlines.update(
                        set(self.repo[h][b'.hgtags'].data().splitlines(True)))
        oldlines = sorted(list(oldlines))

        newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
        if newlines == oldlines:
            return None, None

        # if the old and new tags match, then there is nothing to update
        oldtags = set()
        newtags = set()
        for line in oldlines:
            s = line.strip().split(b' ', 1)
            if len(s) != 2:
                continue
            oldtags.add(s[1])
        for line in newlines:
            s = line.strip().split(b' ', 1)
            if len(s) != 2:
                continue
            if s[1] not in oldtags:
                newtags.add(s[1].strip())

        if not newtags:
            return None, None

        data = b"".join(newlines)

        def getfilectx(repo, memctx, f):
            return context.memfilectx(repo, memctx, f, data, False, False,
                                      None)

        self.ui.status(_(b"updating tags\n"))
        date = b"%d 0" % int(time.mktime(time.gmtime()))
        extra = {b'branch': self.tagsbranch}
        ctx = context.memctx(
            self.repo,
            (tagparent, None),
            b"update tags",
            [b".hgtags"],
            getfilectx,
            b"convert-repo",
            date,
            extra,
        )
        node = self.repo.commitctx(ctx)
        return hex(node), hex(tagparent)
Esempio n. 22
0
    def commonancestorsheads(self, a, b):
        """calculate all the heads of the common ancestors of nodes a and b"""

        if a == nullid or b == nullid:
            return nullid

        revmap, parentfunc = self._buildrevgraph(a, b)
        nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}

        ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
        return map(nodemap.__getitem__, ancs)
Esempio n. 23
0
 def _changedfiles(self, ctx1, ctx2):
     ma, r = [], []
     maappend = ma.append
     rappend = r.append
     d = ctx1.manifest().diff(ctx2.manifest())
     for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
         if node2 is None:
             rappend(f)
         else:
             maappend(f)
     return ma, r
Esempio n. 24
0
def parsepackmeta(metabuf):
    """like _parsepackmeta, but convert fields to desired types automatically.

    This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
    integers.
    """
    metadict = _parsepackmeta(metabuf)
    for k, v in pycompat.iteritems(metadict):
        if k in _metaitemtypes and int in _metaitemtypes[k]:
            metadict[k] = bin2int(v)
    return metadict
Esempio n. 25
0
def sumdicts(*dicts):
    """Adds all the values of *dicts together into one dictionary. This assumes
    the values in *dicts are all summable.

    e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
    """
    result = collections.defaultdict(lambda: 0)
    for dict in dicts:
        for k, v in pycompat.iteritems(dict):
            result[k] += v
    return result
Esempio n. 26
0
def _maybeaddpushbackpart(op, bookmark, newnode, oldnode, params):
    if params.get(b'pushbackbookmarks'):
        if op.reply and b'pushback' in op.reply.capabilities:
            params = {
                b'namespace': b'bookmarks',
                b'key': bookmark,
                b'new': newnode,
                b'old': oldnode,
            }
            op.reply.newpart(b'pushkey',
                             mandatoryparams=pycompat.iteritems(params))
Esempio n. 27
0
    def ancestor(self, a, b):
        if a == nullid or b == nullid:
            return nullid

        revmap, parentfunc = self._buildrevgraph(a, b)
        nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}

        ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
        if ancs:
            # choose a consistent winner when there's a tie
            return min(map(nodemap.__getitem__, ancs))
        return nullid
Esempio n. 28
0
def storetobundlestore(orig, repo, op, unbundler):
    """stores the incoming bundle coming from push command to the bundlestore
    instead of applying on the revlogs"""

    repo.ui.status(_(b"storing changesets on the bundlestore\n"))
    bundler = bundle2.bundle20(repo.ui)

    # processing each part and storing it in bundler
    with bundle2.partiterator(repo, op, unbundler) as parts:
        for part in parts:
            bundlepart = None
            if part.type == b'replycaps':
                # This configures the current operation to allow reply parts.
                bundle2._processpart(op, part)
            else:
                bundlepart = bundle2.bundlepart(part.type, data=part.read())
                for key, value in pycompat.iteritems(part.params):
                    bundlepart.addparam(key, value)

                # Certain parts require a response
                if part.type in (b'pushkey', b'changegroup'):
                    if op.reply is not None:
                        rpart = op.reply.newpart(b'reply:%s' % part.type)
                        rpart.addparam(
                            b'in-reply-to', b'%d' % part.id, mandatory=False
                        )
                        rpart.addparam(b'return', b'1', mandatory=False)

            op.records.add(
                part.type,
                {
                    b'return': 1,
                },
            )
            if bundlepart:
                bundler.addpart(bundlepart)

    # storing the bundle in the bundlestore
    buf = util.chunkbuffer(bundler.getchunks())
    fd, bundlefile = pycompat.mkstemp()
    try:
        try:
            fp = os.fdopen(fd, 'wb')
            fp.write(buf.read())
        finally:
            fp.close()
        storebundle(op, {}, bundlefile)
    finally:
        try:
            os.unlink(bundlefile)
        except Exception:
            # we would rather see the original exception
            pass
Esempio n. 29
0
def view(ui, repo, *etc, **opts):
    """start interactive history viewer"""
    opts = pycompat.byteskwargs(opts)
    os.chdir(repo.root)
    optstr = b' '.join(
        [b'--%s %s' % (k, v) for k, v in pycompat.iteritems(opts) if v])
    if repo.filtername is None:
        optstr += b'--hidden'

    cmd = ui.config(b"hgk", b"path") + b" %s %s" % (optstr, b" ".join(etc))
    ui.debug(b"running %s\n" % cmd)
    ui.system(cmd, blockedtag=b'hgk_view')
def getscratchbranchparts(repo, peer, outgoing, ui, bookmark):
    if not outgoing.missing:
        raise error.Abort(_(b'no commits to push'))

    if scratchbranchparttype not in bundle2.bundle2caps(peer):
        raise error.Abort(
            _(b'no server support for %r') % scratchbranchparttype
        )

    _validaterevset(
        repo, revsetlang.formatspec(b'%ln', outgoing.missing), bookmark
    )

    supportedversions = changegroup.supportedoutgoingversions(repo)
    # Explicitly avoid using '01' changegroup version in infinitepush to
    # support general delta
    supportedversions.discard(b'01')
    cgversion = min(supportedversions)
    _handlelfs(repo, outgoing.missing)
    cg = changegroup.makestream(repo, outgoing, cgversion, b'push')

    params = {}
    params[b'cgversion'] = cgversion
    if bookmark:
        params[b'bookmark'] = bookmark
        # 'prevbooknode' is necessary for pushkey reply part
        params[b'bookprevnode'] = b''
        bookmarks = repo._bookmarks
        if bookmark in bookmarks:
            params[b'bookprevnode'] = hex(bookmarks[bookmark])

    # Do not send pushback bundle2 part with bookmarks if remotenames extension
    # is enabled. It will be handled manually in `_push()`
    if not isremotebooksenabled(ui):
        params[b'pushbackbookmarks'] = b'1'

    parts = []

    # .upper() marks this as a mandatory part: server will abort if there's no
    #  handler
    parts.append(
        bundle2.bundlepart(
            scratchbranchparttype.upper(),
            advisoryparams=pycompat.iteritems(params),
            data=cg,
        )
    )

    return parts