Beispiel #1
0
def buildfileblobheader(size, flags, version=None):
    """return the header of a remotefilelog blob.

    see remotefilelogserver.createfileblob for the format.
    approximately the reverse of parsesizeflags.

    version could be 0 or 1, or None (auto decide).
    """
    # choose v0 if flags is empty, otherwise v1
    if version is None:
        version = int(bool(flags))
    if version == 1:
        header = b'v1\n%s%d\n%s%d' % (
            constants.METAKEYSIZE,
            size,
            constants.METAKEYFLAG,
            flags,
        )
    elif version == 0:
        if flags:
            raise error.ProgrammingError(b'fileblob v0 does not support flag')
        header = b'%d' % size
    else:
        raise error.ProgrammingError(b'unknown fileblob version %d' % version)
    return header
Beispiel #2
0
def shlexer(data=None, filepath=None, wordchars=None, whitespace=None):
    if data is None:
        if pycompat.ispy3:
            data = open(filepath, 'r', encoding=r'latin1')
        else:
            data = open(filepath, 'r')
    else:
        if filepath is not None:
            raise error.ProgrammingError(
                'shlexer only accepts data or filepath, not both')
        if pycompat.ispy3:
            data = data.decode('latin1')
    l = shlex.shlex(data, infile=filepath, posix=True)
    if whitespace is not None:
        l.whitespace_split = True
        if pycompat.ispy3:
            l.whitespace += whitespace.decode('latin1')
        else:
            l.whitespace += whitespace
    if wordchars is not None:
        if pycompat.ispy3:
            l.wordchars += wordchars.decode('latin1')
        else:
            l.wordchars += wordchars
    if pycompat.ispy3:
        return _shlexpy3proxy(l)
    return l
Beispiel #3
0
 def flags(self, node):
     if isinstance(node, int):
         raise error.ProgrammingError(
             b'remotefilelog does not accept integer rev for flags'
         )
     store = self.repo.contentstore
     return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
def wrappeddispatch(orig, repo, proto, command):
    """Wraps wireprotov1server.dispatch() to record command requests."""
    # TRACKING hg46
    # For historical reasons, SSH and HTTP use different log events. With
    # the unification of the dispatch code in 4.6, we could likely unify these.
    # Keep in mind this function is only called on 4.6+: 4.5 has a different
    # code path completely.

    if isinstance(proto, wireprotoserver.httpv1protocolhandler):
        logevent(repo.ui, repo._serverlog, 'BEGIN_PROTOCOL', command)
    elif isinstance(proto, wireprotoserver.sshv1protocolhandler):
        logevent(repo.ui, repo._serverlog, 'BEGIN_SSH_COMMAND', command)

        startusage = resource.getrusage(resource.RUSAGE_SELF)

        repo._serverlog.update({
            'requestid': str(uuid.uuid1()),
            'startcpu': startusage.ru_utime + startusage.ru_stime,
            'starttime': time.time(),
            'ui': weakref.ref(repo.ui),
        })
    else:
        raise error.ProgrammingError('unhandled protocol handler: %r' % proto)

    return orig(repo, proto, command)
def wrappeddispatch(orig, repo, proto, command):
    """Wraps wireprotov1server.dispatch() to record command requests."""
    # TRACKING hg46
    # For historical reasons, SSH and HTTP use different log events. With
    # the unification of the dispatch code in 4.6, we could likely unify these.
    # Keep in mind this function is only called on 4.6+: 4.5 has a different
    # code path completely.

    if isinstance(proto, wireprotoserver.httpv1protocolhandler):
        logevent(repo.ui, repo._serverlog, 'BEGIN_PROTOCOL', command)
    elif isinstance(proto, wireprotoserver.sshv1protocolhandler):
        logevent(repo.ui, repo._serverlog, 'BEGIN_SSH_COMMAND', command)

        startusage = resource.getrusage(resource.RUSAGE_SELF)

        repo._serverlog.update({
            'requestid': pycompat.bytestr(uuid.uuid1()),
            'startcpu': startusage.ru_utime + startusage.ru_stime,
            'starttime': time.time(),
            'ui': weakref.ref(repo.ui),
        })
    else:
        raise error.ProgrammingError(b'unhandled protocol handler: %r' % proto)

    # If the return type is a `pushres`, `_sshv1respondbytes` will be called twice.
    # We only want to log a completed SSH event on the second call, so flip the
    # `ignorecall` flag here.
    res = orig(repo, proto, command)
    if isinstance(res, wireprototypes.pushres):
        repo._serverlog['ignorecall'] = True

    return res
Beispiel #6
0
 def node(self, rev):
     # This is a hack.
     if isinstance(rev, int):
         raise error.ProgrammingError(
             b'remotefilelog does not convert integer rev to node'
         )
     return rev
Beispiel #7
0
    def _batch(self, pointers, localstore, action):
        if action not in ['upload', 'download']:
            raise error.ProgrammingError('invalid Git-LFS action: %s' % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get('size', 0) for x in objects)
        sizes = {}
        for obj in objects:
            sizes[obj.get('oid')] = obj.get('size', 0)
        topic = {
            'upload': _('lfs uploading'),
            'download': _('lfs downloading')
        }[action]
        if len(objects) > 1:
            self.ui.note(
                _('lfs: need to transfer %d objects (%s)\n') %
                (len(objects), util.bytecount(total)))
        self.ui.progress(topic, 0, total=total)

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get('size', 0)
                if self.ui.verbose:
                    if action == 'download':
                        msg = _('lfs: downloading %s (%s)\n')
                    elif action == 'upload':
                        msg = _('lfs: uploading %s (%s)\n')
                    self.ui.note(msg %
                                 (obj.get('oid'), util.bytecount(objsize)))
                retry = self.retry
                while True:
                    try:
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get('oid')
                        break
                    except socket.error as ex:
                        if retry > 0:
                            self.ui.note(
                                _('lfs: failed: %r (remaining retry %d)\n') %
                                (ex, retry))
                            retry -= 1
                            continue
                        raise

        # Until https multiplexing gets sorted out
        if self.ui.configbool('experimental', 'lfs.worker-enable'):
            oids = worker.worker(self.ui, 0.1, transfer, (),
                                 sorted(objects, key=lambda o: o.get('oid')))
        else:
            oids = transfer(sorted(objects, key=lambda o: o.get('oid')))

        processed = 0
        for _one, oid in oids:
            processed += sizes[oid]
            self.ui.progress(topic, processed, total=total)
            self.ui.note(_('lfs: processed: %s\n') % oid)
        self.ui.progress(topic, pos=None, total=total)
    def wrapped_advanceboundary(orig, repo, tr, targetphase, nodes):
        if targetphase in moves:
            raise error.ProgrammingError('already handled phase %r' %
                                         targetphase)

        if targetphase not in supported_phases:
            raise error.Abort(_('only draft and public phases are supported'))

        moves[targetphase] = nodes

        return orig(repo, tr, targetphase, nodes)
Beispiel #9
0
 def getllrev(f):
     """(fctx) -> int"""
     # f should not be a linelog revision
     if isinstance(f, int):
         raise error.ProgrammingError('f should not be an int')
     # f is a fctx, allocate linelog rev on demand
     hsh = f.node()
     rev = revmap.hsh2rev(hsh)
     if rev is None:
         rev = revmap.append(hsh, sidebranch=True, path=f.path())
     return rev
Beispiel #10
0
def extsetup(ui):
    # It's easier for tests to define the server behavior via environment
    # variables than config options. This is because `hg serve --stdio`
    # has to be invoked with a certain form for security reasons and
    # `dummyssh` can't just add `--config` flags to the command line.
    servermode = ui.environ.get(b'SSHSERVERMODE')

    if servermode == b'banner':
        wireprotoserver.sshserver = bannerserver
    elif servermode == b'no-hello':
        wireprotoserver.sshserver = prehelloserver
    elif servermode:
        raise error.ProgrammingError(b'unknown server mode: %s' % servermode)

    peermode = ui.config(b'sshpeer', b'mode')

    if peermode == b'extra-handshake-commands':
        extensions.wrapfunction(sshpeer, '_performhandshake', performhandshake)
    elif peermode:
        raise error.ProgrammingError(b'unknown peer mode: %s' % peermode)
Beispiel #11
0
    def rev(self, node):
        validatenode(node)

        try:
            self._indexbynode[node]
        except KeyError:
            raise error.LookupError(node, self._indexpath, _('no node'))

        for rev, entry in self._indexbyrev.items():
            if entry[b'node'] == node:
                return rev

        raise error.ProgrammingError('this should not occur')
Beispiel #12
0
 def changelogadd(orig, self, *args):
     oldlen = len(self)
     node = orig(self, *args)
     newlen = len(self)
     if oldlen != newlen:
         for oldargs in pendingfilecommits:
             log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
             linknode = self.node(link)
             if linknode == node:
                 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
             else:
                 raise error.ProgrammingError(
                     'pending multiple integer revisions are not supported')
     else:
         # "link" is actually wrong here (it is set to len(changelog))
         # if changelog remains unchanged, skip writing file revisions
         # but still do a sanity check about pending multiple revisions
         if len(set(x[3] for x in pendingfilecommits)) > 1:
             raise error.ProgrammingError(
                 'pending multiple integer revisions are not supported')
     del pendingfilecommits[:]
     return node
Beispiel #13
0
    def emitrevisions(
        self,
        nodes,
        nodesorder=None,
        revisiondata=False,
        assumehaveparentrevisions=False,
        deltamode=repository.CG_DELTAMODE_STD,
        sidedata_helpers=None,
    ):
        if nodesorder not in (b'nodes', b'storage', b'linear', None):
            raise error.ProgrammingError(
                b'unhandled value for nodesorder: %s' % nodesorder)

        nodes = [n for n in nodes if n != sha1nodeconstants.nullid]

        if not nodes:
            return

        # TODO perform in a single query.
        res = self._db.execute(
            'SELECT revnum, deltaid FROM fileindex '
            'WHERE pathid=? '
            '    AND node in (%s)' % (','.join(['?'] * len(nodes))),
            tuple([self._pathid] + nodes),
        )

        deltabases = {}

        for rev, deltaid in res:
            res = self._db.execute(
                'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
                (self._pathid, deltaid),
            )
            deltabases[rev] = res.fetchone()[0]

        # TODO define revdifffn so we can use delta from storage.
        for delta in storageutil.emitrevisions(
                self,
                nodes,
                nodesorder,
                sqliterevisiondelta,
                deltaparentfn=deltabases.__getitem__,
                revisiondata=revisiondata,
                assumehaveparentrevisions=assumehaveparentrevisions,
                deltamode=deltamode,
                sidedata_helpers=sidedata_helpers,
        ):

            yield delta
Beispiel #14
0
def buildpackmeta(metadict):
    """like _buildpackmeta, but typechecks metadict and normalize it.

    This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
    and METAKEYFLAG will be dropped if its value is 0.
    """
    newmeta = {}
    for k, v in (metadict or {}).iteritems():
        expectedtype = _metaitemtypes.get(k, (bytes,))
        if not isinstance(v, expectedtype):
            raise error.ProgrammingError('packmeta: wrong type of key %s' % k)
        # normalize int to binary buffer
        if int in expectedtype:
            # optimization: remove flag if it's 0 to save space
            if k == constants.METAKEYFLAG and v == 0:
                continue
            v = int2bin(v)
        newmeta[k] = v
    return _buildpackmeta(newmeta)
Beispiel #15
0
    def emitrevisions(self,
                      nodes,
                      nodesorder=None,
                      revisiondata=False,
                      assumehaveparentrevisions=False,
                      deltaprevious=False):
        if nodesorder not in ('nodes', 'storage', 'linear', None):
            raise error.ProgrammingError('unhandled value for nodesorder: %s' %
                                         nodesorder)

        nodes = [n for n in nodes if n != nullid]

        if not nodes:
            return

        # TODO perform in a single query.
        res = self._db.execute(
            r'SELECT revnum, deltaid FROM fileindex '
            r'WHERE pathid=? '
            r'    AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
            tuple([self._pathid] + nodes))

        deltabases = {}

        for rev, deltaid in res:
            res = self._db.execute(
                r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
                (self._pathid, deltaid))
            deltabases[rev] = res.fetchone()[0]

        # TODO define revdifffn so we can use delta from storage.
        for delta in storageutil.emitrevisions(
                self,
                nodes,
                nodesorder,
                sqliterevisiondelta,
                deltaparentfn=deltabases.__getitem__,
                revisiondata=revisiondata,
                assumehaveparentrevisions=assumehaveparentrevisions,
                deltaprevious=deltaprevious):

            yield delta
Beispiel #16
0
def performhandshake(orig, ui, stdin, stdout, stderr):
    """Wrapped version of sshpeer._performhandshake to send extra commands."""
    mode = ui.config(b'sshpeer', b'handshake-mode')
    if mode == b'pre-no-args':
        ui.debug(b'sending no-args command\n')
        stdin.write(b'no-args\n')
        stdin.flush()
        return orig(ui, stdin, stdout, stderr)
    elif mode == b'pre-multiple-no-args':
        ui.debug(b'sending unknown1 command\n')
        stdin.write(b'unknown1\n')
        ui.debug(b'sending unknown2 command\n')
        stdin.write(b'unknown2\n')
        ui.debug(b'sending unknown3 command\n')
        stdin.write(b'unknown3\n')
        stdin.flush()
        return orig(ui, stdin, stdout, stderr)
    else:
        raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' %
                                     mode)
Beispiel #17
0
 def walk(tree):
     op = tree[0]
     if op == b'symbol':
         drev = _parsedrev(tree[1])
         if drev:
             return smartset.baseset([drev])
         elif tree[1] in _knownstatusnames:
             drevs = [r for r in validids
                      if _getstatusname(prefetched[r]) == tree[1]]
             return smartset.baseset(drevs)
         else:
             raise error.Abort(_(b'unknown symbol: %s') % tree[1])
     elif op in {b'and_', b'add', b'sub'}:
         assert len(tree) == 3
         return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
     elif op == b'group':
         return walk(tree[1])
     elif op == b'ancestors':
         return getstack(walk(tree[1]))
     else:
         raise error.ProgrammingError(b'illegal tree: %r' % tree)
Beispiel #18
0
def _buildpackmeta(metadict):
    """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)

    The dict contains raw content - both keys and values are strings.
    Upper-level business may want to serialize some of other types (like
    integers) to strings before calling this function.

    raise ProgrammingError when metadata key is illegal, or ValueError if
    length limit is exceeded
    """
    metabuf = ''
    for k, v in sorted((metadict or {}).iteritems()):
        if len(k) != 1:
            raise error.ProgrammingError('packmeta: illegal key: %s' % k)
        if len(v) > 0xfffe:
            raise ValueError('metadata value is too long: 0x%x > 0xfffe'
                             % len(v))
        metabuf += k
        metabuf += struct.pack('!H', len(v))
        metabuf += v
    # len(metabuf) is guaranteed representable in 4 bytes, because there are
    # only 256 keys, and for each value, len(value) <= 0xfffe.
    return metabuf
Beispiel #19
0
def _find_nearest_ancestor_introducing_node(
    db, gitrepo, file_path, walk_start, filenode
):
    """Find the nearest ancestor that introduces a file node.

    Args:
      db: a handle to our sqlite database.
      gitrepo: A pygit2.Repository instance.
      file_path: the path of a file in the repo
      walk_start: a pygit2.Oid that is a commit where we should start walking
                  for our nearest ancestor.

    Returns:
      A hexlified SHA that is the commit ID of the next-nearest parent.
    """
    assert isinstance(file_path, str), 'file_path must be str, got %r' % type(
        file_path
    )
    assert isinstance(filenode, str), 'filenode must be str, got %r' % type(
        filenode
    )
    parent_options = {
        row[0].decode('ascii')
        for row in db.execute(
            'SELECT node FROM changedfiles '
            'WHERE filename = ? AND filenode = ?',
            (file_path, filenode),
        )
    }
    inner_walker = gitrepo.walk(walk_start, _OUR_ORDER)
    for w in inner_walker:
        if w.id.hex in parent_options:
            return w.id.hex
    raise error.ProgrammingError(
        'Unable to find introducing commit for %s node %s from %s',
        (file_path, filenode, walk_start),
    )
Beispiel #20
0
 def append(self, hsh, sidebranch=False, path=None, flush=False):
     """add a binary hg hash and return the mapped linelog revision.
     if flush is True, incrementally update the file.
     """
     if hsh in self._hsh2rev:
         raise error.CorruptedFileError(b'%r is in revmap already' %
                                        hex(hsh))
     if len(hsh) != _hshlen:
         raise hgerror.ProgrammingError(b'hsh must be %d-char long' %
                                        _hshlen)
     idx = len(self._rev2hsh)
     flag = 0
     if sidebranch:
         flag |= sidebranchflag
     if path is not None and path != self._renamepaths[-1]:
         flag |= renameflag
         self._renamerevs.append(idx)
         self._renamepaths.append(path)
     self._rev2hsh.append(hsh)
     self._rev2flag.append(flag)
     self._hsh2rev[hsh] = idx
     if flush:
         self.flush()
     return idx
Beispiel #21
0
    def _batch(self, pointers, localstore, action):
        if action not in [b'upload', b'download']:
            raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)

        response = self._batchrequest(pointers, action)
        objects = self._extractobjects(response, pointers, action)
        total = sum(x.get(b'size', 0) for x in objects)
        sizes = {}
        for obj in objects:
            sizes[obj.get(b'oid')] = obj.get(b'size', 0)
        topic = {
            b'upload': _(b'lfs uploading'),
            b'download': _(b'lfs downloading'),
        }[action]
        if len(objects) > 1:
            self.ui.note(
                _(b'lfs: need to transfer %d objects (%s)\n')
                % (len(objects), util.bytecount(total))
            )

        def transfer(chunk):
            for obj in chunk:
                objsize = obj.get(b'size', 0)
                if self.ui.verbose:
                    if action == b'download':
                        msg = _(b'lfs: downloading %s (%s)\n')
                    elif action == b'upload':
                        msg = _(b'lfs: uploading %s (%s)\n')
                    self.ui.note(
                        msg % (obj.get(b'oid'), util.bytecount(objsize))
                    )
                retry = self.retry
                while True:
                    try:
                        self._basictransfer(obj, action, localstore)
                        yield 1, obj.get(b'oid')
                        break
                    except socket.error as ex:
                        if retry > 0:
                            self.ui.note(
                                _(b'lfs: failed: %r (remaining retry %d)\n')
                                % (stringutil.forcebytestr(ex), retry)
                            )
                            retry -= 1
                            continue
                        raise

        # Until https multiplexing gets sorted out
        if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
            oids = worker.worker(
                self.ui,
                0.1,
                transfer,
                (),
                sorted(objects, key=lambda o: o.get(b'oid')),
            )
        else:
            oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))

        with self.ui.makeprogress(topic, total=total) as progress:
            progress.update(0)
            processed = 0
            blobs = 0
            for _one, oid in oids:
                processed += sizes[oid]
                blobs += 1
                progress.update(processed)
                self.ui.note(_(b'lfs: processed: %s\n') % oid)

        if blobs > 0:
            if action == b'upload':
                self.ui.status(
                    _(b'lfs: uploaded %d files (%s)\n')
                    % (blobs, util.bytecount(processed))
                )
            elif action == b'download':
                self.ui.status(
                    _(b'lfs: downloaded %d files (%s)\n')
                    % (blobs, util.bytecount(processed))
                )
Beispiel #22
0
    def _addrawrevision(
        self,
        node,
        revisiondata,
        transaction,
        linkrev,
        p1,
        p2,
        storedelta=None,
        flags=0,
    ):
        if self._pathid is None:
            res = self._db.execute(
                'INSERT INTO filepath (path) VALUES (?)', (self._path,)
            )
            self._pathid = res.lastrowid

        # For simplicity, always store a delta against p1.
        # TODO we need a lot more logic here to make behavior reasonable.

        if storedelta:
            deltabase, delta = storedelta

            if isinstance(deltabase, int):
                deltabase = self.node(deltabase)

        else:
            assert revisiondata is not None
            deltabase = p1

            if deltabase == nullid:
                delta = revisiondata
            else:
                delta = mdiff.textdiff(
                    self.revision(self.rev(deltabase)), revisiondata
                )

        # File index stores a pointer to its delta and the parent delta.
        # The parent delta is stored via a pointer to the fileindex PK.
        if deltabase == nullid:
            baseid = None
        else:
            baseid = self._revisions[deltabase].rid

        # Deltas are stored with a hash of their content. This allows
        # us to de-duplicate. The table is configured to ignore conflicts
        # and it is faster to just insert and silently noop than to look
        # first.
        deltahash = hashutil.sha1(delta).digest()

        if self._compengine == b'zstd':
            deltablob = self._cctx.compress(delta)
            compression = COMPRESSION_ZSTD
        elif self._compengine == b'zlib':
            deltablob = zlib.compress(delta)
            compression = COMPRESSION_ZLIB
        elif self._compengine == b'none':
            deltablob = delta
            compression = COMPRESSION_NONE
        else:
            raise error.ProgrammingError(
                b'unhandled compression engine: %s' % self._compengine
            )

        # Don't store compressed data if it isn't practical.
        if len(deltablob) >= len(delta):
            deltablob = delta
            compression = COMPRESSION_NONE

        deltaid = insertdelta(self._db, compression, deltahash, deltablob)

        rev = len(self)

        if p1 == nullid:
            p1rev = nullrev
        else:
            p1rev = self._nodetorev[p1]

        if p2 == nullid:
            p2rev = nullrev
        else:
            p2rev = self._nodetorev[p2]

        rid = self._db.execute(
            'INSERT INTO fileindex ('
            '    pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
            '    deltaid, deltabaseid) '
            '    VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
            (
                self._pathid,
                rev,
                node,
                p1rev,
                p2rev,
                linkrev,
                flags,
                deltaid,
                baseid,
            ),
        ).lastrowid

        entry = revisionentry(
            rid=rid,
            rev=rev,
            node=node,
            p1rev=p1rev,
            p2rev=p2rev,
            p1node=p1,
            p2node=p2,
            linkrev=linkrev,
            flags=flags,
        )

        self._nodetorev[node] = rev
        self._revtonode[rev] = node
        self._revisions[node] = entry

        return node
Beispiel #23
0
    def censorrevision(self, tr, censornode, tombstone=b''):
        tombstone = storageutil.packmeta({b'censored': tombstone}, b'')

        # This restriction is cargo culted from revlogs and makes no sense for
        # SQLite, since columns can be resized at will.
        if len(tombstone) > len(self.rawdata(censornode)):
            raise error.Abort(
                _(b'censor tombstone must be no longer than censored data')
            )

        # We need to replace the censored revision's data with the tombstone.
        # But replacing that data will have implications for delta chains that
        # reference it.
        #
        # While "better," more complex strategies are possible, we do something
        # simple: we find delta chain children of the censored revision and we
        # replace those incremental deltas with fulltexts of their corresponding
        # revision. Then we delete the now-unreferenced delta and original
        # revision and insert a replacement.

        # Find the delta to be censored.
        censoreddeltaid = self._db.execute(
            'SELECT deltaid FROM fileindex WHERE id=?',
            (self._revisions[censornode].rid,),
        ).fetchone()[0]

        # Find all its delta chain children.
        # TODO once we support storing deltas for !files, we'll need to look
        # for those delta chains too.
        rows = list(
            self._db.execute(
                'SELECT id, pathid, node FROM fileindex '
                'WHERE deltabaseid=? OR deltaid=?',
                (censoreddeltaid, censoreddeltaid),
            )
        )

        for row in rows:
            rid, pathid, node = row

            fulltext = resolvedeltachain(
                self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
            )

            deltahash = hashutil.sha1(fulltext).digest()

            if self._compengine == b'zstd':
                deltablob = self._cctx.compress(fulltext)
                compression = COMPRESSION_ZSTD
            elif self._compengine == b'zlib':
                deltablob = zlib.compress(fulltext)
                compression = COMPRESSION_ZLIB
            elif self._compengine == b'none':
                deltablob = fulltext
                compression = COMPRESSION_NONE
            else:
                raise error.ProgrammingError(
                    b'unhandled compression engine: %s' % self._compengine
                )

            if len(deltablob) >= len(fulltext):
                deltablob = fulltext
                compression = COMPRESSION_NONE

            deltaid = insertdelta(self._db, compression, deltahash, deltablob)

            self._db.execute(
                'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
                'WHERE id=?',
                (deltaid, rid),
            )

        # Now create the tombstone delta and replace the delta on the censored
        # node.
        deltahash = hashutil.sha1(tombstone).digest()
        tombstonedeltaid = insertdelta(
            self._db, COMPRESSION_NONE, deltahash, tombstone
        )

        flags = self._revisions[censornode].flags
        flags |= FLAG_CENSORED

        self._db.execute(
            'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
            'WHERE pathid=? AND node=?',
            (flags, tombstonedeltaid, self._pathid, censornode),
        )

        self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))

        self._refreshindex()
        self._revisioncache.clear()
Beispiel #24
0
 def join(self, path):
     """split the path at first two characters, like: XX/XXXXX..."""
     if not _lfsre.match(path):
         raise error.ProgrammingError('unexpected lfs path: %s' % path)
     return super(lfsvfs, self).join(path[0:2], path[2:])
Beispiel #25
0
def _index_repo(
    gitrepo,
    db,
    logfn=lambda x: None,
    progress_factory=lambda *args, **kwargs: None,
):
    # Identify all references so we can tell the walker to visit all of them.
    all_refs = gitrepo.listall_references()
    possible_heads = set()
    prog = progress_factory(b'refs')
    for pos, ref in enumerate(all_refs):
        if prog is not None:
            prog.update(pos)
        if not (
            ref.startswith('refs/heads/')  # local branch
            or ref.startswith('refs/tags/')  # tag
            or ref.startswith('refs/remotes/')  # remote branch
            or ref.startswith('refs/hg/')  # from this extension
        ):
            continue
        try:
            start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT)
        except ValueError:
            # No commit to be found, so we don't care for hg's purposes.
            continue
        possible_heads.add(start.id)
    # Optimization: if the list of heads hasn't changed, don't
    # reindex, the changelog. This doesn't matter on small
    # repositories, but on even moderately deep histories (eg cpython)
    # this is a very important performance win.
    #
    # TODO: we should figure out how to incrementally index history
    # (preferably by detecting rewinds!) so that we don't have to do a
    # full changelog walk every time a new commit is created.
    cache_heads = {
        pycompat.sysstr(x[0])
        for x in db.execute('SELECT node FROM possible_heads')
    }
    walker = None
    cur_cache_heads = {h.hex for h in possible_heads}
    if cur_cache_heads == cache_heads:
        return
    logfn(b'heads mismatch, rebuilding dagcache\n')
    for start in possible_heads:
        if walker is None:
            walker = gitrepo.walk(start, _OUR_ORDER)
        else:
            walker.push(start)

    # Empty out the existing changelog. Even for large-ish histories
    # we can do the top-level "walk all the commits" dance very
    # quickly as long as we don't need to figure out the changed files
    # list.
    db.execute('DELETE FROM changelog')
    if prog is not None:
        prog.complete()
    prog = progress_factory(b'commits')
    # This walker is sure to visit all the revisions in history, but
    # only once.
    for pos, commit in enumerate(walker):
        if prog is not None:
            prog.update(pos)
        p1 = p2 = nullhex
        if len(commit.parents) > 2:
            raise error.ProgrammingError(
                (
                    b"git support can't handle octopus merges, "
                    b"found a commit with %d parents :("
                )
                % len(commit.parents)
            )
        if commit.parents:
            p1 = commit.parents[0].id.hex
        if len(commit.parents) == 2:
            p2 = commit.parents[1].id.hex
        db.execute(
            'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)',
            (pos, commit.id.hex, p1, p2),
        )

        num_changedfiles = db.execute(
            "SELECT COUNT(*) from changedfiles WHERE node = ?",
            (commit.id.hex,),
        ).fetchone()[0]
        if not num_changedfiles:
            files = {}
            # I *think* we only need to check p1 for changed files
            # (and therefore linkrevs), because any node that would
            # actually have this commit as a linkrev would be
            # completely new in this rev.
            p1 = commit.parents[0].id.hex if commit.parents else None
            if p1 is not None:
                patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS)
            else:
                patchgen = commit.tree.diff_to_tree(
                    swap=True, flags=_DIFF_FLAGS
                )
            new_files = (p.delta.new_file for p in patchgen)
            files = {
                nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
            }
            for p, n in files.items():
                # We intentionally set NULLs for any file parentage
                # information so it'll get demand-computed later. We
                # used to do it right here, and it was _very_ slow.
                db.execute(
                    'INSERT INTO changedfiles ('
                    'node, filename, filenode, p1node, p1filenode, p2node, '
                    'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)',
                    (commit.id.hex, p, n, None, None, None, None),
                )
    db.execute('DELETE FROM heads')
    db.execute('DELETE FROM possible_heads')
    for hid in possible_heads:
        h = hid.hex
        db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,))
        haschild = db.execute(
            'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h)
        ).fetchone()[0]
        if not haschild:
            db.execute('INSERT INTO heads (node) VALUES(?)', (h,))

    db.commit()
    if prog is not None:
        prog.complete()
Beispiel #26
0
def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode):
    """Given a starting commit and path, fill in a filelog's parent pointers.

    Args:
      gitrepo: a pygit2.Repository
      db: a handle to our sqlite database
      startcommit: a hexlified node id for the commit to start at
      path: the path of the file whose parent pointers we should fill in.
      filenode: the hexlified node id of the file at startcommit

    TODO: make filenode optional
    """
    assert isinstance(
        startcommit, str
    ), 'startcommit must be str, got %r' % type(startcommit)
    assert isinstance(
        startfilenode, str
    ), 'startfilenode must be str, got %r' % type(startfilenode)
    visit = collections.deque([(startcommit, startfilenode)])
    while visit:
        cnode, filenode = visit.popleft()
        commit = gitrepo[cnode]
        parents = []
        for parent in commit.parents:
            t = parent.tree
            for comp in path.split('/'):
                try:
                    t = gitrepo[t[comp].id]
                except KeyError:
                    break
            else:
                introducer = _find_nearest_ancestor_introducing_node(
                    db, gitrepo, path, parent.id, t.id.hex
                )
                parents.append((introducer, t.id.hex))
        p1node = p1fnode = p2node = p2fnode = gitutil.nullgit
        for par, parfnode in parents:
            found = int(
                db.execute(
                    'SELECT COUNT(*) FROM changedfiles WHERE '
                    'node = ? AND filename = ? AND filenode = ? AND '
                    'p1node NOT NULL',
                    (par, path, parfnode),
                ).fetchone()[0]
            )
            if found == 0:
                assert par is not None
                visit.append((par, parfnode))
        if parents:
            p1node, p1fnode = parents[0]
        if len(parents) == 2:
            p2node, p2fnode = parents[1]
        if len(parents) > 2:
            raise error.ProgrammingError(
                b"git support can't handle octopus merges"
            )
        db.execute(
            'UPDATE changedfiles SET '
            'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? '
            'WHERE node = ? AND filename = ? AND filenode = ?',
            (p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode),
        )
    db.commit()