Esempio n. 1
0
def _pushbundle2(pushop):
    """push data to the remote using bundle2

    The only currently supported type of data is changegroup but this will
    evolve in the future."""
    bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
    pushback = (pushop.trmanager
                and pushop.ui.configbool('experimental', 'bundle2.pushback'))

    # create reply capability
    capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
                                                      allowpushback=pushback))
    bundler.newpart('b2x:replycaps', data=capsblob)
    replyhandlers = []
    for partgenname in b2partsgenorder:
        partgen = b2partsgenmapping[partgenname]
        ret = partgen(pushop, bundler)
        if callable(ret):
            replyhandlers.append(ret)
    # do not push if nothing to push
    if bundler.nbparts <= 1:
        return
    stream = util.chunkbuffer(bundler.getchunks())
    try:
        reply = pushop.remote.unbundle(stream, ['force'], 'push')
    except error.BundleValueError, exc:
        raise util.Abort('missing support for %s' % exc)
Esempio n. 2
0
 def _readheader(self):
     """read the header and setup the object"""
     typesize = self._unpackheader(_fparttypesize)[0]
     self.type = self._fromheader(typesize)
     self.ui.debug('part type: "%s"\n' % self.type)
     self.id = self._unpackheader(_fpartid)[0]
     self.ui.debug('part id: "%s"\n' % self.id)
     # extract mandatory bit from type
     self.mandatory = (self.type != self.type.lower())
     self.type = self.type.lower()
     ## reading parameters
     # param count
     mancount, advcount = self._unpackheader(_fpartparamcount)
     self.ui.debug('part parameters: %i\n' % (mancount + advcount))
     # param size
     fparamsizes = _makefpartparamsizes(mancount + advcount)
     paramsizes = self._unpackheader(fparamsizes)
     # make it a list of couple again
     paramsizes = zip(paramsizes[::2], paramsizes[1::2])
     # split mandatory from advisory
     mansizes = paramsizes[:mancount]
     advsizes = paramsizes[mancount:]
     # retrieve param value
     manparams = []
     for key, value in mansizes:
         manparams.append((self._fromheader(key), self._fromheader(value)))
     advparams = []
     for key, value in advsizes:
         advparams.append((self._fromheader(key), self._fromheader(value)))
     self._initparams(manparams, advparams)
     ## part payload
     self._payloadstream = util.chunkbuffer(self._payloadchunks())
     # we read the data, tell it
     self._initialized = True
Esempio n. 3
0
def unbundle(header, fh):
    if header == "HG10UN":
        return fh
    elif not header.startswith("HG"):
        # old client with uncompressed bundle
        def generator(f):
            yield header
            for chunk in f:
                yield chunk

    elif header == "HG10GZ":

        def generator(f):
            zd = zlib.decompressobj()
            for chunk in f:
                yield zd.decompress(chunk)

    elif header == "HG10BZ":

        def generator(f):
            zd = bz2.BZ2Decompressor()
            zd.decompress("BZ")
            for chunk in util.filechunkiter(f, 4096):
                yield zd.decompress(chunk)

    return util.chunkbuffer(generator(fh))
Esempio n. 4
0
def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
              **kwargs):
    """return a full bundle (with potentially multiple kind of parts)

    Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
    passed. For now, the bundle can contain only changegroup, but this will
    changes when more part type will be available for bundle2.

    This is different from changegroup.getbundle that only returns an HG10
    changegroup bundle. They may eventually get reunited in the future when we
    have a clearer idea of the API we what to query different data.

    The implementation is at a very early stage and will get massive rework
    when the API of bundle is refined.
    """
    # build changegroup bundle here.
    cg = changegroup.getbundle(repo, source, heads=heads,
                               common=common, bundlecaps=bundlecaps)
    if bundlecaps is None or 'HG2X' not in bundlecaps:
        return cg
    # very crude first implementation,
    # the bundle API will change and the generation will be done lazily.
    b2caps = {}
    for bcaps in bundlecaps:
        if bcaps.startswith('bundle2='):
            blob = urllib.unquote(bcaps[len('bundle2='):])
            b2caps.update(bundle2.decodecaps(blob))
    bundler = bundle2.bundle20(repo.ui, b2caps)
    if cg:
        part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
        bundler.addpart(part)
    _getbundleextrapart(bundler, repo, source, heads=heads, common=common,
                        bundlecaps=bundlecaps, **kwargs)
    return util.chunkbuffer(bundler.getchunks())
Esempio n. 5
0
def _pushbundle2(pushop):
    """push data to the remote using bundle2

    The only currently supported type of data is changegroup but this will
    evolve in the future."""
    # Send known head to the server for race detection.
    capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
    caps = bundle2.decodecaps(capsblob)
    bundler = bundle2.bundle20(pushop.ui, caps)
    # create reply capability
    capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
    bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
    if not pushop.force:
        part = bundle2.bundlepart('B2X:CHECK:HEADS',
                                  data=iter(pushop.remoteheads))
        bundler.addpart(part)
    extrainfo = _pushbundle2extraparts(pushop, bundler)
    # add the changegroup bundle
    cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
    cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
    bundler.addpart(cgpart)
    stream = util.chunkbuffer(bundler.getchunks())
    try:
        reply = pushop.remote.unbundle(stream, ['force'], 'push')
    except bundle2.UnknownPartError, exc:
        raise util.Abort('missing support for %s' % exc)
Esempio n. 6
0
 def _readheader(self):
     """read the header and setup the object"""
     typesize = self._unpackheader(_fparttypesize)[0]
     self.type = self._fromheader(typesize)
     self.ui.debug('part type: "%s"\n' % self.type)
     self.id = self._unpackheader(_fpartid)[0]
     self.ui.debug('part id: "%s"\n' % self.id)
     # extract mandatory bit from type
     self.mandatory = (self.type != self.type.lower())
     self.type = self.type.lower()
     ## reading parameters
     # param count
     mancount, advcount = self._unpackheader(_fpartparamcount)
     self.ui.debug('part parameters: %i\n' % (mancount + advcount))
     # param size
     fparamsizes = _makefpartparamsizes(mancount + advcount)
     paramsizes = self._unpackheader(fparamsizes)
     # make it a list of couple again
     paramsizes = zip(paramsizes[::2], paramsizes[1::2])
     # split mandatory from advisory
     mansizes = paramsizes[:mancount]
     advsizes = paramsizes[mancount:]
     # retrieve param value
     manparams = []
     for key, value in mansizes:
         manparams.append((self._fromheader(key), self._fromheader(value)))
     advparams = []
     for key, value in advsizes:
         advparams.append((self._fromheader(key), self._fromheader(value)))
     self._initparams(manparams, advparams)
     ## part payload
     self._payloadstream = util.chunkbuffer(self._payloadchunks())
     # we read the data, tell it
     self._initialized = True
Esempio n. 7
0
def _pushbundle2(pushop):
    """push data to the remote using bundle2

    The only currently supported type of data is changegroup but this will
    evolve in the future."""
    # Send known head to the server for race detection.
    capsblob = urllib.unquote(pushop.remote.capable('bundle2-exp'))
    caps = bundle2.decodecaps(capsblob)
    bundler = bundle2.bundle20(pushop.ui, caps)
    # create reply capability
    capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
    bundler.addpart(bundle2.bundlepart('b2x:replycaps', data=capsblob))
    if not pushop.force:
        part = bundle2.bundlepart('B2X:CHECK:HEADS',
                                  data=iter(pushop.remoteheads))
        bundler.addpart(part)
    extrainfo = _pushbundle2extraparts(pushop, bundler)
    # add the changegroup bundle
    cg = changegroup.getlocalbundle(pushop.repo, 'push', pushop.outgoing)
    cgpart = bundle2.bundlepart('B2X:CHANGEGROUP', data=cg.getchunks())
    bundler.addpart(cgpart)
    stream = util.chunkbuffer(bundler.getchunks())
    try:
        reply = pushop.remote.unbundle(stream, ['force'], 'push')
    except bundle2.UnknownPartError, exc:
        raise util.Abort('missing support for %s' % exc)
Esempio n. 8
0
def getbundle(repo,
              source,
              heads=None,
              common=None,
              bundlecaps=None,
              **kwargs):
    """return a full bundle (with potentially multiple kind of parts)

    Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
    passed. For now, the bundle can contain only changegroup, but this will
    changes when more part type will be available for bundle2.

    This is different from changegroup.getchangegroup that only returns an HG10
    changegroup bundle. They may eventually get reunited in the future when we
    have a clearer idea of the API we what to query different data.

    The implementation is at a very early stage and will get massive rework
    when the API of bundle is refined.
    """
    # bundle10 case
    if bundlecaps is None or 'HG2Y' not in bundlecaps:
        if bundlecaps and not kwargs.get('cg', True):
            raise ValueError(
                _('request for bundle10 must include changegroup'))

        if kwargs:
            raise ValueError(
                _('unsupported getbundle arguments: %s') %
                ', '.join(sorted(kwargs.keys())))
        return changegroup.getchangegroup(repo,
                                          source,
                                          heads=heads,
                                          common=common,
                                          bundlecaps=bundlecaps)

    # bundle20 case
    b2caps = {}
    for bcaps in bundlecaps:
        if bcaps.startswith('bundle2='):
            blob = urllib.unquote(bcaps[len('bundle2='):])
            b2caps.update(bundle2.decodecaps(blob))
    bundler = bundle2.bundle20(repo.ui, b2caps)

    for name in getbundle2partsorder:
        func = getbundle2partsmapping[name]
        kwargs['heads'] = heads
        kwargs['common'] = common
        func(bundler,
             repo,
             source,
             bundlecaps=bundlecaps,
             b2caps=b2caps,
             **kwargs)

    return util.chunkbuffer(bundler.getchunks())
Esempio n. 9
0
    def _readheader(self):
        """read the header and setup the object"""
        typesize = self._unpackheader(_fparttypesize)[0]
        self.type = self._fromheader(typesize)
        self.ui.debug('part type: "%s"\n' % self.type)
        self.id = self._unpackheader(_fpartid)[0]
        self.ui.debug('part id: "%s"\n' % self.id)
        # extract mandatory bit from type
        self.mandatory = (self.type != self.type.lower())
        self.type = self.type.lower()
        ## reading parameters
        # param count
        mancount, advcount = self._unpackheader(_fpartparamcount)
        self.ui.debug('part parameters: %i\n' % (mancount + advcount))
        # param size
        fparamsizes = _makefpartparamsizes(mancount + advcount)
        paramsizes = self._unpackheader(fparamsizes)
        # make it a list of couple again
        paramsizes = zip(paramsizes[::2], paramsizes[1::2])
        # split mandatory from advisory
        mansizes = paramsizes[:mancount]
        advsizes = paramsizes[mancount:]
        # retrieve param value
        manparams = []
        for key, value in mansizes:
            manparams.append((self._fromheader(key), self._fromheader(value)))
        advparams = []
        for key, value in advsizes:
            advparams.append((self._fromheader(key), self._fromheader(value)))
        self._initparams(manparams, advparams)

        ## part payload
        def payloadchunks():
            payloadsize = self._unpack(_fpayloadsize)[0]
            self.ui.debug('payload chunk size: %i\n' % payloadsize)
            while payloadsize:
                if payloadsize == flaginterrupt:
                    # interruption detection, the handler will now read a
                    # single part and process it.
                    interrupthandler(self.ui, self._fp)()
                elif payloadsize < 0:
                    msg = 'negative payload chunk size: %i' % payloadsize
                    raise error.BundleValueError(msg)
                else:
                    yield self._readexact(payloadsize)
                payloadsize = self._unpack(_fpayloadsize)[0]
                self.ui.debug('payload chunk size: %i\n' % payloadsize)

        self._payloadstream = util.chunkbuffer(payloadchunks())
        # we read the data, tell it
        self._initialized = True
Esempio n. 10
0
    def _payloadchunks(self):
        """yield chunks of a the part payload

        Exists to handle the different methods to provide data to a part."""
        # we only support fixed size data now.
        # This will be improved in the future.
        if util.safehasattr(self.data, 'next'):
            buff = util.chunkbuffer(self.data)
            chunk = buff.read(preferedchunksize)
            while chunk:
                yield chunk
                chunk = buff.read(preferedchunksize)
        elif len(self.data):
            yield self.data
Esempio n. 11
0
    def _payloadchunks(self):
        """yield chunks of a the part payload

        Exists to handle the different methods to provide data to a part."""
        # we only support fixed size data now.
        # This will be improved in the future.
        if util.safehasattr(self.data, 'next'):
            buff = util.chunkbuffer(self.data)
            chunk = buff.read(preferedchunksize)
            while chunk:
                yield chunk
                chunk = buff.read(preferedchunksize)
        elif len(self.data):
            yield self.data
Esempio n. 12
0
 def _readheader(self):
     """read the header and setup the object"""
     typesize = self._unpackheader(_fparttypesize)[0]
     self.type = self._fromheader(typesize)
     self.ui.debug('part type: "%s"\n' % self.type)
     self.id = self._unpackheader(_fpartid)[0]
     self.ui.debug('part id: "%s"\n' % self.id)
     # extract mandatory bit from type
     self.mandatory = (self.type != self.type.lower())
     self.type = self.type.lower()
     ## reading parameters
     # param count
     mancount, advcount = self._unpackheader(_fpartparamcount)
     self.ui.debug('part parameters: %i\n' % (mancount + advcount))
     # param size
     fparamsizes = _makefpartparamsizes(mancount + advcount)
     paramsizes = self._unpackheader(fparamsizes)
     # make it a list of couple again
     paramsizes = zip(paramsizes[::2], paramsizes[1::2])
     # split mandatory from advisory
     mansizes = paramsizes[:mancount]
     advsizes = paramsizes[mancount:]
     # retrieve param value
     manparams = []
     for key, value in mansizes:
         manparams.append((self._fromheader(key), self._fromheader(value)))
     advparams = []
     for key, value in advsizes:
         advparams.append((self._fromheader(key), self._fromheader(value)))
     self._initparams(manparams, advparams)
     ## part payload
     def payloadchunks():
         payloadsize = self._unpack(_fpayloadsize)[0]
         self.ui.debug('payload chunk size: %i\n' % payloadsize)
         while payloadsize:
             if payloadsize == flaginterrupt:
                 # interruption detection, the handler will now read a
                 # single part and process it.
                 interrupthandler(self.ui, self._fp)()
             elif payloadsize < 0:
                 msg = 'negative payload chunk size: %i' %  payloadsize
                 raise error.BundleValueError(msg)
             else:
                 yield self._readexact(payloadsize)
             payloadsize = self._unpack(_fpayloadsize)[0]
             self.ui.debug('payload chunk size: %i\n' % payloadsize)
     self._payloadstream = util.chunkbuffer(payloadchunks())
     # we read the data, tell it
     self._initialized = True
Esempio n. 13
0
def getsubset(repo, outgoing, bundler, source, fastpath=False):
    repo = repo.unfiltered()
    commonrevs = outgoing.common
    csets = outgoing.missing
    heads = outgoing.missingheads
    # We go through the fast path if we get told to, or if all (unfiltered
    # heads have been requested (since we then know there all linkrevs will
    # be pulled by the client).
    heads.sort()
    fastpathlinkrev = fastpath or (repo.filtername is None
                                   and heads == sorted(repo.heads()))

    repo.hook('preoutgoing', throw=True, source=source)
    _changegroupinfo(repo, csets, source)
    gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
    return unbundle10(util.chunkbuffer(gengroup), 'UN')
Esempio n. 14
0
def getsubset(repo, outgoing, bundler, source, fastpath=False):
    repo = repo.unfiltered()
    commonrevs = outgoing.common
    csets = outgoing.missing
    heads = outgoing.missingheads
    # We go through the fast path if we get told to, or if all (unfiltered
    # heads have been requested (since we then know there all linkrevs will
    # be pulled by the client).
    heads.sort()
    fastpathlinkrev = fastpath or (
            repo.filtername is None and heads == sorted(repo.heads()))

    repo.hook('preoutgoing', throw=True, source=source)
    _changegroupinfo(repo, csets, source)
    gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
    return unbundle10(util.chunkbuffer(gengroup), 'UN')
Esempio n. 15
0
def decompressor(fh, alg):
    if alg == 'UN':
        return fh
    elif alg == 'GZ':
        def generator(f):
            zd = zlib.decompressobj()
            for chunk in util.filechunkiter(f):
                yield zd.decompress(chunk)
    elif alg == 'BZ':
        def generator(f):
            zd = bz2.BZ2Decompressor()
            zd.decompress("BZ")
            for chunk in util.filechunkiter(f, 4096):
                yield zd.decompress(chunk)
    else:
        raise util.Abort("unknown bundle compression '%s'" % alg)
    return util.chunkbuffer(generator(fh))
Esempio n. 16
0
def getbundle(repo,
              source,
              heads=None,
              common=None,
              bundlecaps=None,
              **kwargs):
    """return a full bundle (with potentially multiple kind of parts)

    Could be a bundle HG10 or a bundle HG2X depending on bundlecaps
    passed. For now, the bundle can contain only changegroup, but this will
    changes when more part type will be available for bundle2.

    This is different from changegroup.getbundle that only returns an HG10
    changegroup bundle. They may eventually get reunited in the future when we
    have a clearer idea of the API we what to query different data.

    The implementation is at a very early stage and will get massive rework
    when the API of bundle is refined.
    """
    # build changegroup bundle here.
    cg = changegroup.getbundle(repo,
                               source,
                               heads=heads,
                               common=common,
                               bundlecaps=bundlecaps)
    if bundlecaps is None or 'HG2X' not in bundlecaps:
        return cg
    # very crude first implementation,
    # the bundle API will change and the generation will be done lazily.
    b2caps = {}
    for bcaps in bundlecaps:
        if bcaps.startswith('bundle2='):
            blob = urllib.unquote(bcaps[len('bundle2='):])
            b2caps.update(bundle2.decodecaps(blob))
    bundler = bundle2.bundle20(repo.ui, b2caps)
    if cg:
        part = bundle2.bundlepart('b2x:changegroup', data=cg.getchunks())
        bundler.addpart(part)
    _getbundleextrapart(bundler,
                        repo,
                        source,
                        heads=heads,
                        common=common,
                        bundlecaps=bundlecaps,
                        **kwargs)
    return util.chunkbuffer(bundler.getchunks())
Esempio n. 17
0
def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
              **kwargs):
    """return a full bundle (with potentially multiple kind of parts)

    Could be a bundle HG10 or a bundle HG2Y depending on bundlecaps
    passed. For now, the bundle can contain only changegroup, but this will
    changes when more part type will be available for bundle2.

    This is different from changegroup.getchangegroup that only returns an HG10
    changegroup bundle. They may eventually get reunited in the future when we
    have a clearer idea of the API we what to query different data.

    The implementation is at a very early stage and will get massive rework
    when the API of bundle is refined.
    """
    # bundle10 case
    if bundlecaps is None or 'HG2Y' not in bundlecaps:
        if bundlecaps and not kwargs.get('cg', True):
            raise ValueError(_('request for bundle10 must include changegroup'))

        if kwargs:
            raise ValueError(_('unsupported getbundle arguments: %s')
                             % ', '.join(sorted(kwargs.keys())))
        return changegroup.getchangegroup(repo, source, heads=heads,
                                          common=common, bundlecaps=bundlecaps)

    # bundle20 case
    b2caps = {}
    for bcaps in bundlecaps:
        if bcaps.startswith('bundle2='):
            blob = urllib.unquote(bcaps[len('bundle2='):])
            b2caps.update(bundle2.decodecaps(blob))
    bundler = bundle2.bundle20(repo.ui, b2caps)

    kwargs['heads'] = heads
    kwargs['common'] = common

    for name in getbundle2partsorder:
        func = getbundle2partsmapping[name]
        func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
             **kwargs)

    return util.chunkbuffer(bundler.getchunks())
Esempio n. 18
0
def decompressor(fh, alg):
    if alg == 'UN':
        return fh
    elif alg == 'GZ':

        def generator(f):
            zd = zlib.decompressobj()
            for chunk in util.filechunkiter(f):
                yield zd.decompress(chunk)
    elif alg == 'BZ':

        def generator(f):
            zd = bz2.BZ2Decompressor()
            zd.decompress("BZ")
            for chunk in util.filechunkiter(f, 4096):
                yield zd.decompress(chunk)
    else:
        raise util.Abort("unknown bundle compression '%s'" % alg)
    return util.chunkbuffer(generator(fh))
Esempio n. 19
0
def unbundle(header, fh):
    if header == 'HG10UN':
        return fh
    elif not header.startswith('HG'):
        # old client with uncompressed bundle
        def generator(f):
            yield header
            for chunk in f:
                yield chunk
    elif header == 'HG10GZ':
        def generator(f):
            zd = zlib.decompressobj()
            for chunk in f:
                yield zd.decompress(chunk)
    elif header == 'HG10BZ':
        def generator(f):
            zd = bz2.BZ2Decompressor()
            zd.decompress("BZ")
            for chunk in util.filechunkiter(f, 4096):
                yield zd.decompress(chunk)
    return util.chunkbuffer(generator(fh))
Esempio n. 20
0
def _pushbundle2(pushop):
    """push data to the remote using bundle2

    The only currently supported type of data is changegroup but this will
    evolve in the future."""
    bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
    # create reply capability
    capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
    bundler.newpart('b2x:replycaps', data=capsblob)
    replyhandlers = []
    for partgen in bundle2partsgenerators:
        ret = partgen(pushop, bundler)
        if callable(ret):
            replyhandlers.append(ret)
    # do not push if nothing to push
    if bundler.nbparts <= 1:
        return
    stream = util.chunkbuffer(bundler.getchunks())
    try:
        reply = pushop.remote.unbundle(stream, ['force'], 'push')
    except error.BundleValueError, exc:
        raise util.Abort('missing support for %s' % exc)
Esempio n. 21
0
    def seek(self, offset, whence=0):
        if whence == 0:
            newpos = offset
        elif whence == 1:
            newpos = self._pos + offset
        elif whence == 2:
            if not self.consumed:
                self.read()
            newpos = self._chunkindex[-1][0] - offset
        else:
            raise ValueError('Unknown whence value: %r' % (whence,))

        if newpos > self._chunkindex[-1][0] and not self.consumed:
            self.read()
        if not 0 <= newpos <= self._chunkindex[-1][0]:
            raise ValueError('Offset out of range')

        if self._pos != newpos:
            chunk, internaloffset = self._findchunk(newpos)
            self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
            adjust = self.read(internaloffset)
            if len(adjust) != internaloffset:
                raise util.Abort(_('Seek failed\n'))
            self._pos = newpos
Esempio n. 22
0
    def seek(self, offset, whence=0):
        if whence == 0:
            newpos = offset
        elif whence == 1:
            newpos = self._pos + offset
        elif whence == 2:
            if not self.consumed:
                self.read()
            newpos = self._chunkindex[-1][0] - offset
        else:
            raise ValueError('Unknown whence value: %r' % (whence,))

        if newpos > self._chunkindex[-1][0] and not self.consumed:
            self.read()
        if not 0 <= newpos <= self._chunkindex[-1][0]:
            raise ValueError('Offset out of range')

        if self._pos != newpos:
            chunk, internaloffset = self._findchunk(newpos)
            self._payloadstream = util.chunkbuffer(self._payloadchunks(chunk))
            adjust = self.read(internaloffset)
            if len(adjust) != internaloffset:
                raise util.Abort(_('Seek failed\n'))
            self._pos = newpos
Esempio n. 23
0
def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
    gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
    return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
Esempio n. 24
0
 def _decompress(self, stream):
     return util.chunkbuffer(zgenerator(stream))
Esempio n. 25
0
def getsubset(repo, outgoing, bundler, source, fastpath=False, version='01'):
    gengroup = getsubsetraw(repo, outgoing, bundler, source, fastpath)
    return packermap[version][1](util.chunkbuffer(gengroup), 'UN')
Esempio n. 26
0
 def changegroupsubset(self, bases, heads, source):
     self.requirecap('changegroupsubset', _('look up remote changes'))
     baselst = " ".join([hex(n) for n in bases])
     headlst = " ".join([hex(n) for n in heads])
     f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
     return util.chunkbuffer(zgenerator(f))
Esempio n. 27
0
 def changegroup(self, nodes, kind):
     n = " ".join(map(hex, nodes))
     f = self.do_cmd("changegroup", roots=n)
     return util.chunkbuffer(zgenerator(f))
Esempio n. 28
0
def _pushsyncphase(pushop):
    """synchronise phase information locally and remotely"""
    unfi = pushop.repo.unfiltered()
    cheads = pushop.commonheads
    # even when we don't push, exchanging phase data is useful
    remotephases = pushop.remote.listkeys('phases')
    if (pushop.ui.configbool('ui', '_usedassubrepo', False)
        and remotephases    # server supports phases
        and pushop.ret is None # nothing was pushed
        and remotephases.get('publishing', False)):
        # When:
        # - this is a subrepo push
        # - and remote support phase
        # - and no changeset was pushed
        # - and remote is publishing
        # We may be in issue 3871 case!
        # We drop the possible phase synchronisation done by
        # courtesy to publish changesets possibly locally draft
        # on the remote.
        remotephases = {'publishing': 'True'}
    if not remotephases: # old server or public only reply from non-publishing
        _localphasemove(pushop, cheads)
        # don't push any phase data as there is nothing to push
    else:
        ana = phases.analyzeremotephases(pushop.repo, cheads,
                                         remotephases)
        pheads, droots = ana
        ### Apply remote phase on local
        if remotephases.get('publishing', False):
            _localphasemove(pushop, cheads)
        else: # publish = False
            _localphasemove(pushop, pheads)
            _localphasemove(pushop, cheads, phases.draft)
        ### Apply local phase on remote

        # Get the list of all revs draft on remote by public here.
        # XXX Beware that revset break if droots is not strictly
        # XXX root we may want to ensure it is but it is costly
        outdated = unfi.set('heads((%ln::%ln) and public())',
                            droots, cheads)

        b2caps = bundle2.bundle2caps(pushop.remote)
        if 'b2x:pushkey' in b2caps:
            # server supports bundle2, let's do a batched push through it
            #
            # This will eventually be unified with the changesets bundle2 push
            bundler = bundle2.bundle20(pushop.ui, b2caps)
            capsblob = bundle2.encodecaps(pushop.repo.bundle2caps)
            bundler.newpart('b2x:replycaps', data=capsblob)
            part2node = []
            enc = pushkey.encode
            for newremotehead in outdated:
                part = bundler.newpart('b2x:pushkey')
                part.addparam('namespace', enc('phases'))
                part.addparam('key', enc(newremotehead.hex()))
                part.addparam('old', enc(str(phases.draft)))
                part.addparam('new', enc(str(phases.public)))
                part2node.append((part.id, newremotehead))
            stream = util.chunkbuffer(bundler.getchunks())
            try:
                reply = pushop.remote.unbundle(stream, ['force'], 'push')
                op = bundle2.processbundle(pushop.repo, reply)
            except error.BundleValueError, exc:
                raise util.Abort('missing support for %s' % exc)
            for partid, node in part2node:
                partrep = op.records.getreplies(partid)
                results = partrep['pushkey']
                assert len(results) <= 1
                msg = None
                if not results:
                    msg = _('server ignored update of %s to public!\n') % node
                elif not int(results[0]['return']):
                    msg = _('updating %s to public failed!\n') % node
                if msg is not None:
                    pushop.ui.warn(msg)

        else:
Esempio n. 29
0
 def _callcompressable(self, cmd, **args):
     stream =  self._callstream(cmd, **args)
     return util.chunkbuffer(zgenerator(stream))
Esempio n. 30
0
 def changegroup(self, nodes, kind):
     n = " ".join(map(hex, nodes))
     f = self.do_cmd("changegroup", roots=n)
     return util.chunkbuffer(zgenerator(f))
Esempio n. 31
0
 def changegroupsubset(self, bases, heads, source):
     self.requirecap('changegroupsubset', _('look up remote changes'))
     baselst = " ".join([hex(n) for n in bases])
     headlst = " ".join([hex(n) for n in heads])
     f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
     return util.chunkbuffer(zgenerator(f))
Esempio n. 32
0
 def _decompress(self, stream):
     return util.chunkbuffer(zgenerator(stream))