Exemplo n.º 1
0
    def chlogwalk():
        count = len(repo)
        i = count
        l = [0] * 100
        chunk = 100
        while True:
            if chunk > i:
                chunk = i
                i = 0
            else:
                i -= chunk

            for x in pycompat.xrange(chunk):
                if i + x >= count:
                    l[chunk - x:] = [0] * (chunk - x)
                    break
                if full is not None:
                    if (i + x) in repo:
                        l[x] = repo[i + x]
                        l[x].changeset() # force reading
                else:
                    if (i + x) in repo:
                        l[x] = 1
            for x in pycompat.xrange(chunk - 1, -1, -1):
                if l[x] != 0:
                    yield (i + x, full is not None and l[x] or None)
            if i == 0:
                break
Exemplo n.º 2
0
    def markledger(self, ledger, options=None):
        if options and options.get(constants.OPTION_PACKSONLY):
            return
        treename = b''
        rl = revlog.revlog(self._svfs, b'00manifesttree.i')
        startlinkrev = self._repackstartlinkrev
        endlinkrev = self._repackendlinkrev
        for rev in pycompat.xrange(len(rl) - 1, -1, -1):
            linkrev = rl.linkrev(rev)
            if linkrev < startlinkrev:
                break
            if linkrev > endlinkrev:
                continue
            node = rl.node(rev)
            ledger.markdataentry(self, treename, node)
            ledger.markhistoryentry(self, treename, node)

        for path, encoded, size in self._store.datafiles():
            if path[:5] != b'meta/' or path[-2:] != b'.i':
                continue

            treename = path[5 : -len(b'/00manifest.i')]

            rl = revlog.revlog(self._svfs, path)
            for rev in pycompat.xrange(len(rl) - 1, -1, -1):
                linkrev = rl.linkrev(rev)
                if linkrev < startlinkrev:
                    break
                if linkrev > endlinkrev:
                    continue
                node = rl.node(rev)
                ledger.markdataentry(self, treename, node)
                ledger.markhistoryentry(self, treename, node)
Exemplo n.º 3
0
    def _showchanges(self, fm, alines, blines, chunk, fixups):
        def trim(line):
            if line.endswith(b'\n'):
                line = line[:-1]
            return line

        # this is not optimized for perf but _showchanges only gets executed
        # with an extra command-line flag.
        a1, a2, b1, b2 = chunk
        aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1)
        for idx, fa1, fa2, fb1, fb2 in fixups:
            for i in pycompat.xrange(fa1, fa2):
                aidxs[i - a1] = (max(idx, 1) - 1) // 2
            for i in pycompat.xrange(fb1, fb2):
                bidxs[i - b1] = (max(idx, 1) - 1) // 2

        fm.startitem()
        fm.write(
            b'hunk',
            b'        %s\n',
            b'@@ -%d,%d +%d,%d @@' % (a1, a2 - a1, b1, b2 - b1),
            label=b'diff.hunk',
        )
        fm.data(path=self.path, linetype=b'hunk')

        def writeline(idx, diffchar, line, linetype, linelabel):
            fm.startitem()
            node = b''
            if idx:
                ctx = self.fctxs[idx]
                fm.context(fctx=ctx)
                node = ctx.hex()
                self.ctxaffected.add(ctx.changectx())
            fm.write(b'node', b'%-7.7s ', node, label=b'absorb.node')
            fm.write(
                b'diffchar ' + linetype,
                b'%s%s\n',
                diffchar,
                line,
                label=linelabel,
            )
            fm.data(path=self.path, linetype=linetype)

        for i in pycompat.xrange(a1, a2):
            writeline(
                aidxs[i - a1],
                b'-',
                trim(alines[i]),
                b'deleted',
                b'diff.deleted',
            )
        for i in pycompat.xrange(b1, b2):
            writeline(
                bidxs[i - b1],
                b'+',
                trim(blines[i]),
                b'inserted',
                b'diff.inserted',
            )
Exemplo n.º 4
0
 def flush(self):
     """write the state down to the file"""
     if not self.path:
         return
     if self._lastmaxrev == -1:  # write the entire file
         with open(self.path, b'wb') as f:
             f.write(self.HEADER)
             for i in pycompat.xrange(1, len(self._rev2hsh)):
                 self._writerev(i, f)
     else:  # append incrementally
         with open(self.path, b'ab') as f:
             for i in pycompat.xrange(self._lastmaxrev + 1,
                                      len(self._rev2hsh)):
                 self._writerev(i, f)
     self._lastmaxrev = self.maxrev
Exemplo n.º 5
0
    def iterentries(self):
        # Start at 1 to skip the header
        offset = 1
        while offset < self.datasize:
            data = self._data
            # <2 byte len> + <filename>
            filenamelen = struct.unpack(
                '!H', data[offset:offset + constants.FILENAMESIZE])[0]
            offset += constants.FILENAMESIZE
            filename = data[offset:offset + filenamelen]
            offset += filenamelen

            revcount = struct.unpack('!I',
                                     data[offset:offset + ENTRYCOUNTSIZE])[0]
            offset += ENTRYCOUNTSIZE

            for i in pycompat.xrange(revcount):
                entry = struct.unpack(PACKFORMAT,
                                      data[offset:offset + PACKENTRYLENGTH])
                offset += PACKENTRYLENGTH

                copyfrom = data[offset:offset + entry[ANC_COPYFROM]]
                offset += entry[ANC_COPYFROM]

                yield (filename, entry[ANC_NODE], entry[ANC_P1NODE],
                       entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom)

                self._pagedin += PACKENTRYLENGTH

            # If we've read a lot of data from the mmap, free some memory.
            self.freememory()
Exemplo n.º 6
0
    def write(self, annotatedresult, lines=None, existinglines=None):
        if annotatedresult:
            self._writecomma()

        pieces = [(name, map(f, annotatedresult))
                  for f, sep, name, enc in self.funcmap]
        if lines is not None:
            pieces.append(('line', lines))
        pieces.sort()

        seps = [','] * len(pieces[:-1]) + ['']

        result = ''
        lasti = len(annotatedresult) - 1
        for i in pycompat.xrange(len(annotatedresult)):
            result += '\n {\n'
            for j, p in enumerate(pieces):
                k, vs = p
                result += (
                    '  "%s": %s%s\n' %
                    (k, templatefilters.json(vs[i], paranoid=False), seps[j]))
            result += ' }%s' % ('' if i == lasti else ',')
        if lasti >= 0:
            self.needcomma = True

        self.ui.write(result)
Exemplo n.º 7
0
def _checkhook(ui, repo, node, headsonly):
    # Get revisions to check and touched files at the same time
    ensureenabled(ui)
    files = set()
    revs = set()
    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
        revs.add(rev)
        if headsonly:
            ctx = repo[rev]
            files.update(ctx.files())
            for pctx in ctx.parents():
                revs.discard(pctx.rev())
    failed = []
    for rev in revs:
        ctx = repo[rev]
        eol = parseeol(ui, repo, [ctx.node()])
        if eol:
            failed.extend(eol.checkrev(repo, ctx, files))

    if failed:
        eols = {b'to-lf': b'CRLF', b'to-crlf': b'LF'}
        msgs = []
        for f, target, node in sorted(failed):
            msgs.append(
                _(b"  %s in %s should not have %s line endings") %
                (f, node, eols[target]))
        raise errormod.Abort(
            _(b"end-of-line check failed:\n") + b"\n".join(msgs))
Exemplo n.º 8
0
    def _resolvefilenames(self, hashes):
        """Given a list of filename hashes that are present in the
        remotefilelog store, return a mapping from filename->hash.

        This is useful when converting remotefilelog blobs into other storage
        formats.
        """
        if not hashes:
            return {}

        filenames = {}
        missingfilename = set(hashes)

        # Start with a full manifest, since it'll cover the majority of files
        for filename in self.repo['tip'].manifest():
            sha = hashlib.sha1(filename).digest()
            if sha in missingfilename:
                filenames[filename] = sha
                missingfilename.discard(sha)

        # Scan the changelog until we've found every file name
        cl = self.repo.unfiltered().changelog
        for rev in pycompat.xrange(len(cl) - 1, -1, -1):
            if not missingfilename:
                break
            files = cl.readfiles(cl.node(rev))
            for filename in files:
                sha = hashlib.sha1(filename).digest()
                if sha in missingfilename:
                    filenames[filename] = sha
                    missingfilename.discard(sha)

        return filenames
Exemplo n.º 9
0
def _openlogfile(ui, vfs):
    def rotate(oldpath, newpath):
        try:
            vfs.unlink(newpath)
        except OSError as err:
            if err.errno != errno.ENOENT:
                ui.debug("warning: cannot remove '%s': %s\n" %
                         (newpath, err.strerror))
        try:
            if newpath:
                vfs.rename(oldpath, newpath)
        except OSError as err:
            if err.errno != errno.ENOENT:
                ui.debug("warning: cannot rename '%s' to '%s': %s\n" %
                         (newpath, oldpath, err.strerror))

    maxsize = ui.configbytes('blackbox', 'maxsize')
    name = 'blackbox.log'
    if maxsize > 0:
        try:
            st = vfs.stat(name)
        except OSError:
            pass
        else:
            if st.st_size >= maxsize:
                path = vfs.join(name)
                maxfiles = ui.configint('blackbox', 'maxfiles')
                for i in pycompat.xrange(maxfiles - 1, 1, -1):
                    rotate(oldpath='%s.%d' % (path, i - 1),
                           newpath='%s.%d' % (path, i))
                rotate(oldpath=path, newpath=maxfiles > 0 and path + '.1')
    return vfs(name, 'a')
Exemplo n.º 10
0
def _computeincrementalpack(files, opts):
    """Given a set of pack files along with the configuration options, this
    function computes the list of files that should be packed as part of an
    incremental repack.

    It tries to strike a balance between keeping incremental repacks cheap (i.e.
    packing small things when possible, and rolling the packs up to the big ones
    over time).
    """

    limits = list(
        sorted((util.sizetoint(s) for s in opts[b'generations']), reverse=True)
    )
    limits.append(0)

    # Group the packs by generation (i.e. by size)
    generations = []
    for i in pycompat.xrange(len(limits)):
        generations.append([])

    sizes = {}
    for prefix, mode, stat in files:
        size = stat.st_size
        if size > opts[b'repackmaxpacksize']:
            continue

        sizes[prefix] = size
        for i, limit in enumerate(limits):
            if size > limit:
                generations[i].append(prefix)
                break

    # Steps for picking what packs to repack:
    # 1. Pick the largest generation with > gencountlimit pack files.
    # 2. Take the smallest three packs.
    # 3. While total-size-of-packs < repacksizelimit: add another pack

    # Find the largest generation with more than gencountlimit packs
    genpacks = []
    for i, limit in enumerate(limits):
        if len(generations[i]) > opts[b'gencountlimit']:
            # Sort to be smallest last, for easy popping later
            genpacks.extend(
                sorted(generations[i], reverse=True, key=lambda x: sizes[x])
            )
            break

    # Take as many packs from the generation as we can
    chosenpacks = genpacks[-3:]
    genpacks = genpacks[:-3]
    repacksize = sum(sizes[n] for n in chosenpacks)
    while (
        repacksize < opts[b'repacksizelimit']
        and genpacks
        and len(chosenpacks) < opts[b'maxrepackpacks']
    ):
        chosenpacks.append(genpacks.pop())
        repacksize += sizes[chosenpacks[-1]]

    return chosenpacks
Exemplo n.º 11
0
def _txnhook(ui, repo, hooktype, node, source, user, **kwargs):
    # deprecated config: acl.config
    cfg = ui.config('acl', 'config')
    if cfg:
        ui.readconfig(cfg, sections=['acl.groups', 'acl.allow.branches',
            'acl.deny.branches', 'acl.allow', 'acl.deny'])

    allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
    denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
    allow = buildmatch(ui, repo, user, 'acl.allow')
    deny = buildmatch(ui, repo, user, 'acl.deny')

    for rev in pycompat.xrange(repo[node].rev(), len(repo)):
        ctx = repo[rev]
        branch = ctx.branch()
        if denybranches and denybranches(branch):
            raise error.Abort(_('acl: user "%s" denied on branch "%s"'
                               ' (changeset "%s")')
                               % (user, branch, ctx))
        if allowbranches and not allowbranches(branch):
            raise error.Abort(_('acl: user "%s" not allowed on branch "%s"'
                               ' (changeset "%s")')
                               % (user, branch, ctx))
        ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
        % (ctx, branch))

        for f in ctx.files():
            if deny and deny(f):
                raise error.Abort(_('acl: user "%s" denied on "%s"'
                ' (changeset "%s")') % (user, f, ctx))
            if allow and not allow(f):
                raise error.Abort(_('acl: user "%s" not allowed on "%s"'
                ' (changeset "%s")') % (user, f, ctx))
        ui.debug('acl: path access granted: "%s"\n' % ctx)
Exemplo n.º 12
0
def convertedges(line):
    line = ' %s ' % line
    pretty = []
    for idx in pycompat.xrange(len(line) - 2):
        pretty.append(
            prettyedge(line[idx:idx + 1], line[idx + 1:idx + 2],
                       line[idx + 2:idx + 3]))
    return ''.join(pretty)
Exemplo n.º 13
0
    def _findsection(self, name):
        params = self.params
        namehash = hashlib.sha1(name).digest()
        fanoutkey = struct.unpack(params.fanoutstruct,
                                  namehash[:params.fanoutprefix])[0]
        fanout = self._fanouttable

        start = fanout[fanoutkey] + params.indexstart
        indexend = self._indexend

        for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
            end = fanout[i] + params.indexstart
            if end != start:
                break
        else:
            end = indexend

        entry = self._bisect(namehash, start, end, self.INDEXENTRYLENGTH)
        if not entry:
            raise KeyError(name)

        rawentry = struct.unpack(self.INDEXFORMAT, entry)
        x, offset, size, nodeindexoffset, nodeindexsize = rawentry
        rawnamelen = self._index[nodeindexoffset:nodeindexoffset +
                                 constants.FILENAMESIZE]
        actualnamelen = struct.unpack(b'!H', rawnamelen)[0]
        nodeindexoffset += constants.FILENAMESIZE
        actualname = self._index[nodeindexoffset:nodeindexoffset +
                                 actualnamelen]
        if actualname != name:
            raise KeyError(b"found file name %s when looking for %s" %
                           (actualname, name))
        nodeindexoffset += actualnamelen

        filenamelength = struct.unpack(
            b'!H', self._data[offset:offset + constants.FILENAMESIZE])[0]
        offset += constants.FILENAMESIZE

        actualname = self._data[offset:offset + filenamelength]
        offset += filenamelength

        if name != actualname:
            raise KeyError(b"found file name %s when looking for %s" %
                           (actualname, name))

        # Skip entry list size
        offset += ENTRYCOUNTSIZE

        nodelistoffset = offset
        nodelistsize = (size - constants.FILENAMESIZE - filenamelength -
                        ENTRYCOUNTSIZE)
        return (
            name,
            nodelistoffset,
            nodelistsize,
            nodeindexoffset,
            nodeindexsize,
        )
Exemplo n.º 14
0
    def is_reachable(ar, reachable, sha):
        if len(ar) == 0:
            return 1
        mask = 0
        for i in pycompat.xrange(len(ar)):
            if sha in reachable[i]:
                mask |= 1 << i

        return mask
Exemplo n.º 15
0
 def _fanouttable(self):
     params = self.params
     rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize]
     fanouttable = []
     for i in pycompat.xrange(0, params.fanoutcount):
         loc = i * 4
         fanoutentry = struct.unpack(b'!I', rawfanout[loc:loc + 4])[0]
         fanouttable.append(fanoutentry)
     return fanouttable
Exemplo n.º 16
0
 def _checkoutlinelog(self):
     """() -> [str]. check out file contents from linelog"""
     contents = []
     for i in pycompat.xrange(len(self.contents)):
         rev = (i + 1) * 2
         self.linelog.annotate(rev)
         content = b''.join(map(self._getline, self.linelog.annotateresult))
         contents.append(content)
     return contents
Exemplo n.º 17
0
def _rebaserestoredcommit(ui, repo, opts, tr, oldtiprev, basename, pctx,
                          tmpwctx, shelvectx, branchtorestore, activebookmark):
    """Rebase restored commit from its original location to a destination"""
    # If the shelve is not immediately on top of the commit
    # we'll be merging with, rebase it to be on top.
    if tmpwctx.node() == shelvectx.parents()[0].node():
        return shelvectx

    overrides = {
        ('ui', 'forcemerge'): opts.get('tool', ''),
        ('phases', 'new-commit'): phases.secret,
    }
    with repo.ui.configoverride(overrides, 'unshelve'):
        ui.status(_('rebasing shelved changes\n'))
        stats = merge.graft(repo,
                            shelvectx,
                            shelvectx.p1(),
                            labels=['shelve', 'working-copy'],
                            keepconflictparent=True)
        if stats.unresolvedcount:
            tr.close()

            nodestoremove = [
                repo.changelog.node(rev)
                for rev in pycompat.xrange(oldtiprev, len(repo))
            ]
            shelvedstate.save(repo, basename, pctx,
                              tmpwctx, nodestoremove, branchtorestore,
                              opts.get('keep'), activebookmark)
            raise error.InterventionRequired(
                _("unresolved conflicts (see 'hg resolve', then "
                  "'hg unshelve --continue')"))

        with repo.dirstate.parentchange():
            repo.setparents(tmpwctx.node(), nodemod.nullid)
            newnode = repo.commit(text=shelvectx.description(),
                                  extra=shelvectx.extra(),
                                  user=shelvectx.user(),
                                  date=shelvectx.date())

        if newnode is None:
            # If it ended up being a no-op commit, then the normal
            # merge state clean-up path doesn't happen, so do it
            # here. Fix issue5494
            merge.mergestate.clean(repo)
            shelvectx = tmpwctx
            msg = _('note: unshelved changes already existed '
                    'in the working copy\n')
            ui.status(msg)
        else:
            shelvectx = repo[newnode]
            hg.updaterepo(repo, tmpwctx.node(), False)

    return shelvectx
Exemplo n.º 18
0
 def shortest(self, node, minlength=1):
     nodehex = hex(node)
     for attempt in pycompat.xrange(minlength, len(nodehex) + 1):
         candidate = nodehex[:attempt]
         matches = int(
             self._db.execute(
                 'SELECT COUNT(*) FROM changelog WHERE node LIKE ?',
                 (pycompat.sysstr(candidate + b'%'), ),
             ).fetchone()[0])
         if matches == 1:
             return candidate
     return nodehex
Exemplo n.º 19
0
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
    halt = False
    seen = set()
    # we try to walk changesets in reverse order from newest to
    # oldest, so that if we see a file multiple times, we take the
    # newest version as canonical. this prevents us from blocking a
    # changegroup that contains an unacceptable commit followed later
    # by a commit that fixes the problem.
    tip = repo[b'tip']
    for rev in pycompat.xrange(
        repo.changelog.tiprev(), repo[node].rev() - 1, -1
    ):
        c = repo[rev]
        for f in c.files():
            if f in seen or f not in tip or f not in c:
                continue
            seen.add(f)
            data = c[f].data()
            if not stringutil.binary(data) and newline in data:
                if not halt:
                    ui.warn(
                        _(
                            b'attempt to commit or push text file(s) '
                            b'using %s line endings\n'
                        )
                        % newlinestr[newline]
                    )
                ui.warn(_(b'in %s: %s\n') % (short(c.node()), f))
                halt = True
    if halt and hooktype == b'pretxnchangegroup':
        crlf = newlinestr[newline].lower()
        filter = filterstr[newline]
        ui.warn(
            _(
                b'\nTo prevent this mistake in your local repository,\n'
                b'add to Mercurial.ini or .hg/hgrc:\n'
                b'\n'
                b'[hooks]\n'
                b'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
                b'\n'
                b'and also consider adding:\n'
                b'\n'
                b'[extensions]\n'
                b'win32text =\n'
                b'[encode]\n'
                b'** = %sencode:\n'
                b'[decode]\n'
                b'** = %sdecode:\n'
            )
            % (crlf, crlf, filter, filter)
        )
    return halt
Exemplo n.º 20
0
 def wrapped(self, *args, **kwargs):
     retrylog = self.retrylog or noop
     funcname = fn.__name__
     for i in pycompat.xrange(self.numattempts):
         if i > 0:
             retrylog('re-attempting (n=%d) %s\n' % (i, funcname))
             self.markforrefresh()
         try:
             return fn(self, *args, **kwargs)
         except KeyError:
             pass
     # retries exhausted
     retrylog('retries exhausted in %s, raising KeyError\n' % funcname)
     raise
Exemplo n.º 21
0
 def _buildlinelog(self):
     """calculate the initial linelog based on self.content{,line}s.
     this is similar to running a partial "annotate".
     """
     llog = linelog.linelog()
     a, alines = b'', []
     for i in pycompat.xrange(len(self.contents)):
         b, blines = self.contents[i], self.contentlines[i]
         llrev = i * 2 + 1
         chunks = self._alldiffchunks(a, b, alines, blines)
         for a1, a2, b1, b2 in reversed(list(chunks)):
             llog.replacelines(llrev, a1, a2, b1, b2)
         a, alines = b, blines
     return llog
Exemplo n.º 22
0
    def _analysediffchunk(self, chunk, annotated):
        """analyse a different chunk and return new fixups found

        return [] if no lines from the chunk can be safely applied.

        the chunk (or lines) cannot be safely applied, if, for example:
          - the modified (deleted) lines belong to a public changeset
            (self.fctxs[0])
          - the chunk is a pure insertion and the adjacent lines (at most 2
            lines) belong to different non-public changesets, or do not belong
            to any non-public changesets.
          - the chunk is modifying lines from different changesets.
            in this case, if the number of lines deleted equals to the number
            of lines added, assume it's a simple 1:1 map (could be wrong).
            otherwise, give up.
          - the chunk is modifying lines from a single non-public changeset,
            but other revisions touch the area as well. i.e. the lines are
            not continuous as seen from the linelog.
        """
        a1, a2, b1, b2 = chunk
        # find involved indexes from annotate result
        involved = annotated[a1:a2]
        if not involved and annotated:  # a1 == a2 and a is not empty
            # pure insertion, check nearby lines. ignore lines belong
            # to the public (first) changeset (i.e. annotated[i][0] == 1)
            nearbylinenums = {a2, max(0, a1 - 1)}
            involved = [
                annotated[i] for i in nearbylinenums if annotated[i][0] != 1
            ]
        involvedrevs = list(set(r for r, l in involved))
        newfixups = []
        if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True):
            # chunk belongs to a single revision
            rev = involvedrevs[0]
            if rev > 1:
                fixuprev = rev + 1
                newfixups.append((fixuprev, a1, a2, b1, b2))
        elif a2 - a1 == b2 - b1 or b1 == b2:
            # 1:1 line mapping, or chunk was deleted
            for i in pycompat.xrange(a1, a2):
                rev, linenum = annotated[i]
                if rev > 1:
                    if b1 == b2:  # deletion, simply remove that single line
                        nb1 = nb2 = 0
                    else:  # 1:1 line mapping, change the corresponding rev
                        nb1 = b1 + i - a1
                        nb2 = nb1 + 1
                    fixuprev = rev + 1
                    newfixups.append((fixuprev, i, i + 1, nb1, nb2))
        return self._optimizefixups(newfixups)
Exemplo n.º 23
0
    def _find(self, node):
        params = self.params
        fanoutkey = struct.unpack(params.fanoutstruct,
                                  node[:params.fanoutprefix])[0]
        fanout = self._fanouttable

        start = fanout[fanoutkey] + params.indexstart
        indexend = self._indexend

        # Scan forward to find the first non-same entry, which is the upper
        # bound.
        for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
            end = fanout[i] + params.indexstart
            if end != start:
                break
        else:
            end = indexend

        # Bisect between start and end to find node
        index = self._index
        startnode = index[start:start + NODELENGTH]
        endnode = index[end:end + NODELENGTH]
        entrylen = self.INDEXENTRYLENGTH
        if startnode == node:
            entry = index[start:start + entrylen]
        elif endnode == node:
            entry = index[end:end + entrylen]
        else:
            while start < end - entrylen:
                mid = start + (end - start) / 2
                mid = mid - ((mid - params.indexstart) % entrylen)
                midnode = index[mid:mid + NODELENGTH]
                if midnode == node:
                    entry = index[mid:mid + entrylen]
                    break
                if node > midnode:
                    start = mid
                    startnode = midnode
                elif node < midnode:
                    end = mid
                    endnode = midnode
            else:
                return None

        return struct.unpack(self.INDEXFORMAT, entry)
Exemplo n.º 24
0
    def write(self, annotatedresult, lines=None, existinglines=None):
        """(annotateresult, [str], set([rev, linenum])) -> None. write output.
        annotateresult can be [(node, linenum, path)], or [(node, linenum)]
        """
        pieces = []  # [[str]]
        maxwidths = []  # [int]

        # calculate padding
        for f, sep, name, enc in self.funcmap:
            l = [enc(f(x)) for x in annotatedresult]
            pieces.append(l)
            if name in [b'node', b'date']:  # node and date has fixed size
                l = l[:1]
            widths = pycompat.maplist(encoding.colwidth, set(l))
            maxwidth = max(widths) if widths else 0
            maxwidths.append(maxwidth)

        # buffered output
        result = b''
        for i in pycompat.xrange(len(annotatedresult)):
            for j, p in enumerate(pieces):
                sep = self.funcmap[j][1]
                padding = b' ' * (maxwidths[j] - len(p[i]))
                result += sep + padding + p[i]
            if lines:
                if existinglines is None:
                    result += b': ' + lines[i]
                else:  # extra formatting showing whether a line exists
                    key = (annotatedresult[i][0], annotatedresult[i][1])
                    if key in existinglines:
                        result += b':  ' + lines[i]
                    else:
                        result += b': ' + self.ui.label(
                            b'-' + lines[i], b'diff.deleted'
                        )

            if result[-1:] != b'\n':
                result += b'\n'

        self.ui.write(result)
Exemplo n.º 25
0
    def testLargePack(self):
        """Test creating and reading from a large pack with over X entries.
        This causes it to use a 2^16 fanout table instead."""
        total = basepack.SMALLFANOUTCUTOFF + 1
        revisions = []
        for i in pycompat.xrange(total):
            filename = b"foo-%d" % i
            node = self.getFakeHash()
            p1 = self.getFakeHash()
            p2 = self.getFakeHash()
            linknode = self.getFakeHash()
            revisions.append((filename, node, p1, p2, linknode, None))

        pack = self.createPack(revisions)
        self.assertEquals(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)

        for filename, node, p1, p2, linknode, copyfrom in revisions:
            actual = pack.getancestors(filename, node)[node]
            self.assertEquals(p1, actual[0])
            self.assertEquals(p2, actual[1])
            self.assertEquals(linknode, actual[2])
            self.assertEquals(copyfrom, actual[3])
Exemplo n.º 26
0
def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
    if not isinstance(rlog, remotefilelog.remotefilelog):
        for c in super(cls, self).group(nodelist, rlog, lookup, units=units):
            yield c
        return

    if len(nodelist) == 0:
        yield self.close()
        return

    nodelist = shallowutil.sortnodes(nodelist, rlog.parents)

    # add the parent of the first rev
    p = rlog.parents(nodelist[0])[0]
    nodelist.insert(0, p)

    # build deltas
    for i in pycompat.xrange(len(nodelist) - 1):
        prev, curr = nodelist[i], nodelist[i + 1]
        linknode = lookup(curr)
        for c in self.nodechunk(rlog, curr, prev, linknode):
            yield c

    yield self.close()
Exemplo n.º 27
0
def _decorate(fctx):
    text = fctx.data()
    linecount = text.count('\n')
    if text and not text.endswith('\n'):
        linecount += 1
    return ([(fctx, i) for i in pycompat.xrange(linecount)], text)
Exemplo n.º 28
0
    def _resolvelines(self, annotateresult, revmap, linelog):
        """(annotateresult) -> [line]. designed for annotatealllines.
        this is probably the most inefficient code in the whole fastannotate
        directory. but we have made a decision that the linelog does not
        store line contents. so getting them requires random accesses to
        the revlog data, since they can be many, it can be very slow.
        """
        # [llrev]
        revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
        result = [None] * len(annotateresult)
        # {(rev, linenum): [lineindex]}
        key2idxs = collections.defaultdict(list)
        for i in pycompat.xrange(len(result)):
            key2idxs[(revs[i], annotateresult[i][1])].append(i)
        while key2idxs:
            # find an unresolved line and its linelog rev to annotate
            hsh = None
            try:
                for (rev, _linenum), idxs in key2idxs.iteritems():
                    if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
                        continue
                    hsh = annotateresult[idxs[0]][0]
                    break
            except StopIteration:  # no more unresolved lines
                return result
            if hsh is None:
                # the remaining key2idxs are not in main branch, resolving them
                # using the hard way...
                revlines = {}
                for (rev, linenum), idxs in key2idxs.iteritems():
                    if rev not in revlines:
                        hsh = annotateresult[idxs[0]][0]
                        if self.ui.debugflag:
                            self.ui.debug('fastannotate: reading %s line #%d '
                                          'to resolve lines %r\n' %
                                          (node.short(hsh), linenum, idxs))
                        fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
                        lines = mdiff.splitnewlines(fctx.data())
                        revlines[rev] = lines
                    for idx in idxs:
                        result[idx] = revlines[rev][linenum]
                assert all(x is not None for x in result)
                return result

            # run the annotate and the lines should match to the file content
            self.ui.debug('fastannotate: annotate %s to resolve lines\n' %
                          node.short(hsh))
            linelog.annotate(rev)
            fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
            annotated = linelog.annotateresult
            lines = mdiff.splitnewlines(fctx.data())
            if len(lines) != len(annotated):
                raise faerror.CorruptedFileError('unexpected annotated lines')
            # resolve lines from the annotate result
            for i, line in enumerate(lines):
                k = annotated[i]
                if k in key2idxs:
                    for idx in key2idxs[k]:
                        result[idx] = line
                    del key2idxs[k]
        return result
Exemplo n.º 29
0
def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
    '''Convert log into changesets.'''

    ui.status(_('creating changesets\n'))

    # try to order commitids by date
    mindate = {}
    for e in log:
        if e.commitid:
            if e.commitid not in mindate:
                mindate[e.commitid] = e.date
            else:
                mindate[e.commitid] = min(e.date, mindate[e.commitid])

    # Merge changesets
    log.sort(key=lambda x: (mindate.get(x.commitid, (-1, 0)),
                            x.commitid or '', x.comment,
                            x.author, x.branch or '', x.date, x.branchpoints))

    changesets = []
    files = set()
    c = None
    for i, e in enumerate(log):

        # Check if log entry belongs to the current changeset or not.

        # Since CVS is file-centric, two different file revisions with
        # different branchpoints should be treated as belonging to two
        # different changesets (and the ordering is important and not
        # honoured by cvsps at this point).
        #
        # Consider the following case:
        # foo 1.1 branchpoints: [MYBRANCH]
        # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
        #
        # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
        # later version of foo may be in MYBRANCH2, so foo should be the
        # first changeset and bar the next and MYBRANCH and MYBRANCH2
        # should both start off of the bar changeset. No provisions are
        # made to ensure that this is, in fact, what happens.
        if not (c and e.branchpoints == c.branchpoints and
                (# cvs commitids
                 (e.commitid is not None and e.commitid == c.commitid) or
                 (# no commitids, use fuzzy commit detection
                  (e.commitid is None or c.commitid is None) and
                   e.comment == c.comment and
                   e.author == c.author and
                   e.branch == c.branch and
                   ((c.date[0] + c.date[1]) <=
                    (e.date[0] + e.date[1]) <=
                    (c.date[0] + c.date[1]) + fuzz) and
                   e.file not in files))):
            c = changeset(comment=e.comment, author=e.author,
                          branch=e.branch, date=e.date,
                          entries=[], mergepoint=e.mergepoint,
                          branchpoints=e.branchpoints, commitid=e.commitid)
            changesets.append(c)

            files = set()
            if len(changesets) % 100 == 0:
                t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
                ui.status(stringutil.ellipsis(t, 80) + '\n')

        c.entries.append(e)
        files.add(e.file)
        c.date = e.date       # changeset date is date of latest commit in it

    # Mark synthetic changesets

    for c in changesets:
        # Synthetic revisions always get their own changeset, because
        # the log message includes the filename.  E.g. if you add file3
        # and file4 on a branch, you get four log entries and three
        # changesets:
        #   "File file3 was added on branch ..." (synthetic, 1 entry)
        #   "File file4 was added on branch ..." (synthetic, 1 entry)
        #   "Add file3 and file4 to fix ..."     (real, 2 entries)
        # Hence the check for 1 entry here.
        c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic

    # Sort files in each changeset

    def entitycompare(l, r):
        'Mimic cvsps sorting order'
        l = l.file.split('/')
        r = r.file.split('/')
        nl = len(l)
        nr = len(r)
        n = min(nl, nr)
        for i in range(n):
            if i + 1 == nl and nl < nr:
                return -1
            elif i + 1 == nr and nl > nr:
                return +1
            elif l[i] < r[i]:
                return -1
            elif l[i] > r[i]:
                return +1
        return 0

    for c in changesets:
        c.entries.sort(key=functools.cmp_to_key(entitycompare))

    # Sort changesets by date

    odd = set()
    def cscmp(l, r):
        d = sum(l.date) - sum(r.date)
        if d:
            return d

        # detect vendor branches and initial commits on a branch
        le = {}
        for e in l.entries:
            le[e.rcs] = e.revision
        re = {}
        for e in r.entries:
            re[e.rcs] = e.revision

        d = 0
        for e in l.entries:
            if re.get(e.rcs, None) == e.parent:
                assert not d
                d = 1
                break

        for e in r.entries:
            if le.get(e.rcs, None) == e.parent:
                if d:
                    odd.add((l, r))
                d = -1
                break
        # By this point, the changesets are sufficiently compared that
        # we don't really care about ordering. However, this leaves
        # some race conditions in the tests, so we compare on the
        # number of files modified, the files contained in each
        # changeset, and the branchpoints in the change to ensure test
        # output remains stable.

        # recommended replacement for cmp from
        # https://docs.python.org/3.0/whatsnew/3.0.html
        c = lambda x, y: (x > y) - (x < y)
        # Sort bigger changes first.
        if not d:
            d = c(len(l.entries), len(r.entries))
        # Try sorting by filename in the change.
        if not d:
            d = c([e.file for e in l.entries], [e.file for e in r.entries])
        # Try and put changes without a branch point before ones with
        # a branch point.
        if not d:
            d = c(len(l.branchpoints), len(r.branchpoints))
        return d

    changesets.sort(key=functools.cmp_to_key(cscmp))

    # Collect tags

    globaltags = {}
    for c in changesets:
        for e in c.entries:
            for tag in e.tags:
                # remember which is the latest changeset to have this tag
                globaltags[tag] = c

    for c in changesets:
        tags = set()
        for e in c.entries:
            tags.update(e.tags)
        # remember tags only if this is the latest changeset to have it
        c.tags = sorted(tag for tag in tags if globaltags[tag] is c)

    # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
    # by inserting dummy changesets with two parents, and handle
    # {{mergefrombranch BRANCHNAME}} by setting two parents.

    if mergeto is None:
        mergeto = br'{{mergetobranch ([-\w]+)}}'
    if mergeto:
        mergeto = re.compile(mergeto)

    if mergefrom is None:
        mergefrom = br'{{mergefrombranch ([-\w]+)}}'
    if mergefrom:
        mergefrom = re.compile(mergefrom)

    versions = {}    # changeset index where we saw any particular file version
    branches = {}    # changeset index where we saw a branch
    n = len(changesets)
    i = 0
    while i < n:
        c = changesets[i]

        for f in c.entries:
            versions[(f.rcs, f.revision)] = i

        p = None
        if c.branch in branches:
            p = branches[c.branch]
        else:
            # first changeset on a new branch
            # the parent is a changeset with the branch in its
            # branchpoints such that it is the latest possible
            # commit without any intervening, unrelated commits.

            for candidate in pycompat.xrange(i):
                if c.branch not in changesets[candidate].branchpoints:
                    if p is not None:
                        break
                    continue
                p = candidate

        c.parents = []
        if p is not None:
            p = changesets[p]

            # Ensure no changeset has a synthetic changeset as a parent.
            while p.synthetic:
                assert len(p.parents) <= 1, \
                       _('synthetic changeset cannot have multiple parents')
                if p.parents:
                    p = p.parents[0]
                else:
                    p = None
                    break

            if p is not None:
                c.parents.append(p)

        if c.mergepoint:
            if c.mergepoint == 'HEAD':
                c.mergepoint = None
            c.parents.append(changesets[branches[c.mergepoint]])

        if mergefrom:
            m = mergefrom.search(c.comment)
            if m:
                m = m.group(1)
                if m == 'HEAD':
                    m = None
                try:
                    candidate = changesets[branches[m]]
                except KeyError:
                    ui.warn(_("warning: CVS commit message references "
                              "non-existent branch %r:\n%s\n")
                            % (pycompat.bytestr(m), c.comment))
                if m in branches and c.branch != m and not candidate.synthetic:
                    c.parents.append(candidate)

        if mergeto:
            m = mergeto.search(c.comment)
            if m:
                if m.groups():
                    m = m.group(1)
                    if m == 'HEAD':
                        m = None
                else:
                    m = None   # if no group found then merge to HEAD
                if m in branches and c.branch != m:
                    # insert empty changeset for merge
                    cc = changeset(
                        author=c.author, branch=m, date=c.date,
                        comment='convert-repo: CVS merge from branch %s'
                        % c.branch,
                        entries=[], tags=[],
                        parents=[changesets[branches[m]], c])
                    changesets.insert(i + 1, cc)
                    branches[m] = i + 1

                    # adjust our loop counters now we have inserted a new entry
                    n += 1
                    i += 2
                    continue

        branches[c.branch] = i
        i += 1

    # Drop synthetic changesets (safe now that we have ensured no other
    # changesets can have them as parents).
    i = 0
    while i < len(changesets):
        if changesets[i].synthetic:
            del changesets[i]
        else:
            i += 1

    # Number changesets

    for i, c in enumerate(changesets):
        c.id = i + 1

    if odd:
        for l, r in odd:
            if l.id is not None and r.id is not None:
                ui.warn(_('changeset %d is both before and after %d\n')
                        % (l.id, r.id))

    ui.status(_('%d changeset entries\n') % len(changesets))

    hook.hook(ui, None, "cvschangesets", True, changesets=changesets)

    return changesets
Exemplo n.º 30
0
 def __iter__(self):
     return iter(pycompat.xrange(len(self._revisions)))