def annotate(**map):
        last = None
        if binary(fctx.data()):
            mt = (mimetypes.guess_type(fctx.path())[0]
                  or 'application/octet-stream')
            lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
                                '(binary:%s)' % mt)])
        else:
            lines = enumerate(fctx.annotate(follow=True, linenumber=True))
        for lineno, ((f, targetline), l) in lines:
            fnode = f.filenode()

            if last != fnode:
                last = fnode

            yield {
                "parity": parity.next(),
                "node": hex(f.node()),
                "rev": f.rev(),
                "author": f.user(),
                "desc": f.description(),
                "file": f.path(),
                "targetline": targetline,
                "line": l,
                "lineid": "l%d" % (lineno + 1),
                "linenumber": "% 6d" % (lineno + 1)
            }
 def shrinklines(self, fname, lines):
     '''Returns lines with keyword substitutions removed.'''
     if self.match(fname):
         text = ''.join(lines)
         if not util.binary(text):
             return _shrinktext(text, self.re_kwexp.sub).splitlines(True)
     return lines
def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt

    def lines():
        for lineno, t in enumerate(text.splitlines(True)):
            yield {
                "line": t,
                "lineid": "l%d" % (lineno + 1),
                "linenumber": "% 6d" % (lineno + 1),
                "parity": parity.next()
            }

    return tmpl("filerevision",
                file=f,
                path=webutil.up(f),
                text=lines(),
                rev=fctx.rev(),
                node=hex(fctx.node()),
                author=fctx.user(),
                date=fctx.date(),
                desc=fctx.description(),
                branch=webutil.nodebranchnodefault(fctx),
                parent=webutil.parents(fctx),
                child=webutil.children(fctx),
                rename=webutil.renamelink(fctx),
                permissions=fctx.manifest().flags(f))
示例#4
0
def tolf(s, params, ui, **kwargs):
    """Filter to convert to LF EOLs."""
    if util.binary(s):
        return s
    if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
        return s
    return eolre.sub('\n', s)
示例#5
0
 def commitctx(self, ctx, error=False):
     for f in sorted(ctx.added() + ctx.modified()):
         if not self._eolfile(f):
             continue
         data = ctx[f].data()
         if util.binary(data):
             # We should not abort here, since the user should
             # be able to say "** = native" to automatically
             # have all non-binary files taken care of.
             continue
         if inconsistenteol(data):
             raise util.Abort(
                 _("inconsistent newline style "
                   "in %s\n" % f))
     return super(eolrepo, self).commitctx(ctx, error)
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
    halt = False
    seen = set()
    # we try to walk changesets in reverse order from newest to
    # oldest, so that if we see a file multiple times, we take the
    # newest version as canonical. this prevents us from blocking a
    # changegroup that contains an unacceptable commit followed later
    # by a commit that fixes the problem.
    tip = repo['tip']
    for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
        c = repo[rev]
        for f in c.files():
            if f in seen or f not in tip or f not in c:
                continue
            seen.add(f)
            data = c[f].data()
            if not util.binary(data) and newline in data:
                if not halt:
                    ui.warn(
                        _('Attempt to commit or push text file(s) '
                          'using %s line endings\n') % newlinestr[newline])
                ui.warn(_('in %s: %s\n') % (short(c.node()), f))
                halt = True
    if halt and hooktype == 'pretxnchangegroup':
        crlf = newlinestr[newline].lower()
        filter = filterstr[newline]
        ui.warn(
            _('\nTo prevent this mistake in your local repository,\n'
              'add to Mercurial.ini or .hg/hgrc:\n'
              '\n'
              '[hooks]\n'
              'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
              '\n'
              'and also consider adding:\n'
              '\n'
              '[extensions]\n'
              'win32text =\n'
              '[encode]\n'
              '** = %sencode:\n'
              '[decode]\n'
              '** = %sdecode:\n') % (crlf, crlf, filter, filter))
    return halt
示例#7
0
def pygmentize(field, fctx, style, tmpl):

    # append a <link ...> to the syntax highlighting css
    old_header = tmpl.load('header')
    if SYNTAX_CSS not in old_header:
        new_header =  old_header + SYNTAX_CSS
        tmpl.cache['header'] = new_header

    text = fctx.data()
    if util.binary(text):
        return

    # Pygments is best used with Unicode strings:
    # <http://pygments.org/docs/unicode/>
    text = text.decode(encoding.encoding, 'replace')

    # To get multi-line strings right, we can't format line-by-line
    try:
        lexer = guess_lexer_for_filename(fctx.path(), text[:1024])
    except (ClassNotFound, ValueError):
        try:
            lexer = guess_lexer(text[:1024])
        except (ClassNotFound, ValueError):
            lexer = TextLexer()

    formatter = HtmlFormatter(style=style)

    colorized = highlight(text, lexer, formatter)
    # strip wrapping div
    colorized = colorized[:colorized.find('\n</pre>')]
    colorized = colorized[colorized.find('<pre>')+5:]
    coloriter = (s.encode(encoding.encoding, 'replace')
                 for s in colorized.splitlines())

    tmpl.filters['colorize'] = lambda x: coloriter.next()

    oldl = tmpl.cache[field]
    newl = oldl.replace('line|escape', 'line|colorize')
    tmpl.cache[field] = newl
 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
     '''Overwrites selected files expanding/shrinking keywords.'''
     if self.restrict or lookup or self.record:  # exclude kw_copy
         candidates = self.iskwfile(candidates, ctx)
     if not candidates:
         return
     kwcmd = self.restrict and lookup  # kwexpand/kwshrink
     if self.restrict or expand and lookup:
         mf = ctx.manifest()
     fctx = ctx
     subn = (self.restrict
             or rekw) and self.re_kw.subn or self.re_kwexp.subn
     msg = (expand and _('overwriting %s expanding keywords\n')
            or _('overwriting %s shrinking keywords\n'))
     for f in candidates:
         if self.restrict:
             data = self.repo.file(f).read(mf[f])
         else:
             data = self.repo.wread(f)
         if util.binary(data):
             continue
         if expand:
             if lookup:
                 fctx = self.repo.filectx(f, fileid=mf[f]).changectx()
             data, found = self.substitute(data, f, fctx, subn)
         elif self.restrict:
             found = self.re_kw.search(data)
         else:
             data, found = _shrinktext(data, subn)
         if found:
             self.ui.note(msg % f)
             self.repo.wwrite(f, data, ctx.flags(f))
             if kwcmd:
                 self.repo.dirstate.normal(f)
             elif self.record:
                 self.repo.dirstate.normallookup(f)
 def shrink(self, fname, text):
     '''Returns text with all keyword substitutions removed.'''
     if self.match(fname) and not util.binary(text):
         return _shrinktext(text, self.re_kwexp.sub)
     return text
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.match(path) and not util.binary(data):
         ctx = self.repo.filectx(path, fileid=node).changectx()
         return self.substitute(data, path, ctx, self.re_kw.sub)
     return data
    try:
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError, inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = mimetypes.guess_type(path)[0]
    if mt is None:
        mt = binary(text) and 'application/octet-stream' or 'text/plain'
    if mt.startswith('text/'):
        mt += '; charset="%s"' % encoding.encoding

    req.respond(HTTP_OK, mt, path, len(text))
    return [text]


def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt
def macdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return macdumbdecode(s, cmd, **kwargs)
    return s
def cleverencode(s, cmd):
    if not util.binary(s):
        return dumbencode(s, cmd)
    return s
def cleverdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return dumbdecode(s, cmd, **kwargs)
    return s
def macencode(s, cmd):
    if not util.binary(s):
        return macdumbencode(s, cmd)
    return s