Example #1
0
    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
        nodes = []

        transaction.addbackup(self._indexpath)

        for node, p1, p2, linknode, deltabase, delta, flags in deltas:
            linkrev = linkmapper(linknode)
            flags = flags or revlog.REVIDX_DEFAULT_FLAGS

            nodes.append(node)

            if node in self._indexbynode:
                continue

            # Need to resolve the fulltext from the delta base.
            if deltabase == nullid:
                text = mdiff.patch(b'', delta)
            else:
                text = mdiff.patch(self.revision(deltabase), delta)

            self._addrawrevision(node, text, transaction, linkrev, p1, p2,
                                 flags)

            if addrevisioncb:
                addrevisioncb(self, node)

        return nodes
Example #2
0
    def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
                 maybemissingparents=False):
        if maybemissingparents:
            raise error.Abort(_('simple store does not support missing parents '
                                'write mode'))

        nodes = []

        transaction.addbackup(self._indexpath)

        for node, p1, p2, linknode, deltabase, delta, flags in deltas:
            linkrev = linkmapper(linknode)
            flags = flags or revlog.REVIDX_DEFAULT_FLAGS

            nodes.append(node)

            if node in self._indexbynode:
                continue

            # Need to resolve the fulltext from the delta base.
            if deltabase == nullid:
                text = mdiff.patch(b'', delta)
            else:
                text = mdiff.patch(self.revision(deltabase), delta)

            self._addrawrevision(node, text, transaction, linkrev, p1, p2,
                                 flags)

            if addrevisioncb:
                addrevisioncb(self, node)

        return nodes
Example #3
0
 def apply_delta_b(self, src, delta):
     """
     Mercurial bdiff
     :param src:
     :param delta:
     :return:
     """
     return patch(src, delta)
def addchangegroupfiles(orig, repo, source, revmap, trp, pr, needfiles):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, pr, needfiles)

    files = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    while True:
        chunkdata = source.filelogheader()
        if not chunkdata:
            break
        f = chunkdata["filename"]
        repo.ui.debug("adding %s revisions\n" % f)
        pr()

        if not repo.shallowmatch(f):
            fl = repo.file(f)
            fl.addgroup(source, revmap, trp)
            continue

        chain = None
        while True:
            revisiondata = source.deltachunk(chain)
            if not revisiondata:
                break

            chain = revisiondata['node']

            revisiondatas[(f, chain)] = revisiondata
            queue.append((f, chain))

            if f not in visited:
                files += 1
                visited.add(f)

        if chain == None:
            raise util.Abort(_("received file revlog group is empty"))

    processed = set()
    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > len(queue) + 1:
            raise util.Abort(_("circular node dependency"))

        fl = repo.file(f)

        revisiondata = revisiondatas[(f, node)]
        p1 = revisiondata['p1']
        p2 = revisiondata['p2']
        linknode = revisiondata['cs']
        deltabase = revisiondata['deltabase']
        delta = revisiondata['delta']

        if not available(f, node, f, deltabase):
            continue

        base = fl.revision(deltabase)
        text = mdiff.patch(base, delta)
        if isinstance(text, buffer):
            text = str(text)

        meta, text = remotefilelog._parsemeta(text)
        if 'copy' in meta:
            copyfrom = meta['copy']
            copynode = bin(meta['copyrev'])
            copyfl = repo.file(copyfrom)
            if not available(f, node, copyfrom, copynode):
                continue

        for p in [p1, p2]:
            if p != nullid:
                if not available(f, node, f, p):
                    continue

        fl.add(text, meta, trp, linknode, p1, p2)
        processed.add((f, node))
        skipcount = 0

    repo.ui.progress(_('files'), None)

    return len(revisiondatas), files
Example #5
0
    def addgroup(
        self,
        deltas,
        linkmapper,
        transaction,
        addrevisioncb=None,
        duplicaterevisioncb=None,
        maybemissingparents=False,
    ):
        empty = True

        for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
            storeflags = 0

            if wireflags & repository.REVISION_FLAG_CENSORED:
                storeflags |= FLAG_CENSORED

            if wireflags & ~repository.REVISION_FLAG_CENSORED:
                raise SQLiteStoreError(b'unhandled revision flag')

            if maybemissingparents:
                if p1 != nullid and not self.hasnode(p1):
                    p1 = nullid
                    storeflags |= FLAG_MISSING_P1

                if p2 != nullid and not self.hasnode(p2):
                    p2 = nullid
                    storeflags |= FLAG_MISSING_P2

            baserev = self.rev(deltabase)

            # If base is censored, delta must be full replacement in a single
            # patch operation.
            if baserev != nullrev and self.iscensored(baserev):
                hlen = struct.calcsize(b'>lll')
                oldlen = len(self.rawdata(deltabase, _verifyhash=False))
                newlen = len(delta) - hlen

                if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
                    raise error.CensoredBaseError(self._path, deltabase)

            if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
                delta, baserev, lambda x: len(self.rawdata(x))
            ):
                storeflags |= FLAG_CENSORED

            linkrev = linkmapper(linknode)

            if node in self._revisions:
                # Possibly reset parents to make them proper.
                entry = self._revisions[node]

                if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
                    entry.p1node = p1
                    entry.p1rev = self._nodetorev[p1]
                    entry.flags &= ~FLAG_MISSING_P1

                    self._db.execute(
                        'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
                        (self._nodetorev[p1], entry.flags, entry.rid),
                    )

                if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
                    entry.p2node = p2
                    entry.p2rev = self._nodetorev[p2]
                    entry.flags &= ~FLAG_MISSING_P2

                    self._db.execute(
                        'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
                        (self._nodetorev[p1], entry.flags, entry.rid),
                    )

                if duplicaterevisioncb:
                    duplicaterevisioncb(self, node)
                empty = False
                continue

            if deltabase == nullid:
                text = mdiff.patch(b'', delta)
                storedelta = None
            else:
                text = None
                storedelta = (deltabase, delta)

            self._addrawrevision(
                node,
                text,
                transaction,
                linkrev,
                p1,
                p2,
                storedelta=storedelta,
                flags=storeflags,
            )

            if addrevisioncb:
                addrevisioncb(self, node)
            empty = False

        return not empty
Example #6
0
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
    if not shallowutil.isenabled(repo):
        return orig(repo, source, revmap, trp, expectedfiles, *args)

    newfiles = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
    while True:
        chunkdata = source.filelogheader()
        if not chunkdata:
            break
        f = chunkdata["filename"]
        repo.ui.debug("adding %s revisions\n" % f)
        progress.increment()

        if not repo.shallowmatch(f):
            fl = repo.file(f)
            deltas = source.deltaiter()
            fl.addgroup(deltas, revmap, trp)
            continue

        chain = None
        while True:
            # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
            revisiondata = source.deltachunk(chain)
            if not revisiondata:
                break

            chain = revisiondata[0]

            revisiondatas[(f, chain)] = revisiondata
            queue.append((f, chain))

            if f not in visited:
                newfiles += 1
                visited.add(f)

        if chain is None:
            raise error.Abort(_("received file revlog group is empty"))

    processed = set()

    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Prefetch the non-bundled revisions that we will need
    prefetchfiles = []
    for f, node in queue:
        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]

        for dependent in dependents:
            if dependent == nullid or (f, dependent) in revisiondatas:
                continue
            prefetchfiles.append((f, hex(dependent)))

    repo.fileservice.prefetch(prefetchfiles)

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > len(queue) + 1:
            raise error.Abort(_("circular node dependency"))

        fl = repo.file(f)

        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        node, p1, p2, linknode, deltabase, delta, flags = revisiondata

        if not available(f, node, f, deltabase):
            continue

        base = fl.revision(deltabase, raw=True)
        text = mdiff.patch(base, delta)
        if not isinstance(text, bytes):
            text = bytes(text)

        meta, text = shallowutil.parsemeta(text)
        if 'copy' in meta:
            copyfrom = meta['copy']
            copynode = bin(meta['copyrev'])
            if not available(f, node, copyfrom, copynode):
                continue

        for p in [p1, p2]:
            if p != nullid:
                if not available(f, node, f, p):
                    continue

        fl.add(text, meta, trp, linknode, p1, p2)
        processed.add((f, node))
        skipcount = 0

    progress.complete()

    return len(revisiondatas), newfiles
Example #7
0
def addchangegroupfiles(orig, repo, source, revmap, trp, pr, needfiles):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, pr, needfiles)

    files = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    while True:
        chunkdata = source.filelogheader()
        if not chunkdata:
            break
        f = chunkdata["filename"]
        repo.ui.debug("adding %s revisions\n" % f)
        pr()

        if not repo.shallowmatch(f):
            fl = repo.file(f)
            fl.addgroup(source, revmap, trp)
            continue

        chain = None
        while True:
            revisiondata = source.deltachunk(chain)
            if not revisiondata:
                break

            chain = revisiondata['node']

            revisiondatas[(f, chain)] = revisiondata
            queue.append((f, chain))

            if f not in visited:
                files += 1
                visited.add(f)

        if chain == None:
            raise util.Abort(_("received file revlog group is empty"))

    processed = set()

    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > len(queue) + 1:
            raise util.Abort(_("circular node dependency"))

        fl = repo.file(f)

        revisiondata = revisiondatas[(f, node)]
        p1 = revisiondata['p1']
        p2 = revisiondata['p2']
        linknode = revisiondata['cs']
        deltabase = revisiondata['deltabase']
        delta = revisiondata['delta']

        if not available(f, node, f, deltabase):
            continue

        base = fl.revision(deltabase)
        text = mdiff.patch(base, delta)
        if isinstance(text, buffer):
            text = str(text)

        meta, text = remotefilelog._parsemeta(text)
        if 'copy' in meta:
            copyfrom = meta['copy']
            copynode = bin(meta['copyrev'])
            copyfl = repo.file(copyfrom)
            if not available(f, node, copyfrom, copynode):
                continue

        for p in [p1, p2]:
            if p != nullid:
                if not available(f, node, f, p):
                    continue

        fl.add(text, meta, trp, linknode, p1, p2)
        processed.add((f, node))
        skipcount = 0

    repo.ui.progress(_('files'), None)

    return len(revisiondatas), files
Example #8
0
def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
    if not requirement in repo.requirements:
        return orig(repo, source, revmap, trp, expectedfiles, *args)

    files = 0
    newfiles = 0
    visited = set()
    revisiondatas = {}
    queue = []

    # Normal Mercurial processes each file one at a time, adding all
    # the new revisions for that file at once. In remotefilelog a file
    # revision may depend on a different file's revision (in the case
    # of a rename/copy), so we must lay all revisions down across all
    # files in topological order.

    # read all the file chunks but don't add them
    while True:
        chunkdata = source.filelogheader()
        if not chunkdata:
            break
        files += 1
        f = chunkdata["filename"]
        repo.ui.debug("adding %s revisions\n" % f)
        repo.ui.progress(_('files'), files, total=expectedfiles)

        if not repo.shallowmatch(f):
            fl = repo.file(f)
            deltas = source.deltaiter()
            fl.addgroup(deltas, revmap, trp)
            continue

        chain = None
        while True:
            # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
            revisiondata = source.deltachunk(chain)
            if not revisiondata:
                break

            chain = revisiondata[0]

            revisiondatas[(f, chain)] = revisiondata
            queue.append((f, chain))

            if f not in visited:
                newfiles += 1
                visited.add(f)

        if chain is None:
            raise error.Abort(_("received file revlog group is empty"))

    processed = set()
    def available(f, node, depf, depnode):
        if depnode != nullid and (depf, depnode) not in processed:
            if not (depf, depnode) in revisiondatas:
                # It's not in the changegroup, assume it's already
                # in the repo
                return True
            # re-add self to queue
            queue.insert(0, (f, node))
            # add dependency in front
            queue.insert(0, (depf, depnode))
            return False
        return True

    skipcount = 0

    # Prefetch the non-bundled revisions that we will need
    prefetchfiles = []
    for f, node in queue:
        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]

        for dependent in dependents:
            if dependent == nullid or (f, dependent) in revisiondatas:
                continue
            prefetchfiles.append((f, hex(dependent)))

    repo.fileservice.prefetch(prefetchfiles)

    # Apply the revisions in topological order such that a revision
    # is only written once it's deltabase and parents have been written.
    while queue:
        f, node = queue.pop(0)
        if (f, node) in processed:
            continue

        skipcount += 1
        if skipcount > len(queue) + 1:
            raise error.Abort(_("circular node dependency"))

        fl = repo.file(f)

        revisiondata = revisiondatas[(f, node)]
        # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
        node, p1, p2, linknode, deltabase, delta, flags = revisiondata

        if not available(f, node, f, deltabase):
            continue

        base = fl.revision(deltabase, raw=True)
        text = mdiff.patch(base, delta)
        if isinstance(text, buffer):
            text = str(text)

        meta, text = shallowutil.parsemeta(text)
        if 'copy' in meta:
            copyfrom = meta['copy']
            copynode = bin(meta['copyrev'])
            if not available(f, node, copyfrom, copynode):
                continue

        for p in [p1, p2]:
            if p != nullid:
                if not available(f, node, f, p):
                    continue

        fl.add(text, meta, trp, linknode, p1, p2)
        processed.add((f, node))
        skipcount = 0

    repo.ui.progress(_('files'), None)

    return len(revisiondatas), newfiles