def __init__(self, base, a, b):
     basetext = '\n'.join([i.strip('\n') for i in base] + [''])
     atext = '\n'.join([i.strip('\n') for i in a] + [''])
     btext = '\n'.join([i.strip('\n') for i in b] + [''])
     if util.binary(basetext) or util.binary(atext) or util.binary(btext):
         raise error.Abort("don't know how to merge binary files")
     simplemerge.Merge3Text.__init__(self, basetext, atext, btext,
                                     base, a, b)
def rawfile(web, req, tmpl):
    guessmime = web.configbool('web', 'guessmime', False)

    path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
    if not path:
        content = manifest(web, req, tmpl)
        req.respond(HTTP_OK, web.ctype)
        return content

    try:
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError as inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = 'application/binary'
    if guessmime:
        mt = mimetypes.guess_type(path)[0]
        if mt is None:
            if util.binary(text):
                mt = 'application/binary'
            else:
                mt = 'text/plain'
    if mt.startswith('text/'):
        mt += '; charset="%s"' % encoding.encoding

    req.respond(HTTP_OK, mt, path, body=text)
    return []
Exemple #3
0
def tolf(s, params, ui, **kwargs):
    """Filter to convert to LF EOLs."""
    if util.binary(s):
        return s
    if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
        return s
    return eolre.sub('\n', s)
Exemple #4
0
 def overwrite(self, node=None, expand=True, files=None):
     '''Overwrites selected files expanding/shrinking keywords.'''
     ctx = self.repo.changectx(node)
     mf = ctx.manifest()
     if node is not None:     # commit
         files = [f for f in ctx.files() if f in mf]
         notify = self.ui.debug
     else:                    # kwexpand/kwshrink
         notify = self.ui.note
     candidates = [f for f in files if self.iskwfile(f, mf.linkf)]
     if candidates:
         self.restrict = True # do not expand when reading
         candidates.sort()
         action = expand and 'expanding' or 'shrinking'
         for f in candidates:
             fp = self.repo.file(f)
             data = fp.read(mf[f])
             if util.binary(data):
                 continue
             if expand:
                 changenode = node or self.getnode(f, mf[f])
                 data, found = self.substitute(data, f, changenode,
                                               self.re_kw.subn)
             else:
                 found = self.re_kw.search(data)
             if found:
                 notify(_('overwriting %s %s keywords\n') % (f, action))
                 self.repo.wwrite(f, data, mf.flags(f))
                 self.repo.dirstate.normal(f)
         self.restrict = False
Exemple #5
0
 def filelines(f):
     if binary(f.data()):
         mt = mimetypes.guess_type(f.path())[0]
         if not mt:
             mt = 'application/octet-stream'
         return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))]
     return f.data().splitlines()
Exemple #6
0
def pygmentize(self, tmpl, fctx, field):
    # append a <link ...> to the syntax highlighting css
    old_header = ''.join(tmpl('header'))
    if SYNTAX_CSS not in old_header:
        new_header =  old_header + SYNTAX_CSS
        tmpl.cache['header'] = new_header

    text = fctx.data()
    if util.binary(text):
        return

    style = self.config("web", "pygments_style", "colorful")
    # To get multi-line strings right, we can't format line-by-line
    try:
        lexer = guess_lexer_for_filename(fctx.path(), text,
                                         encoding=util._encoding)
    except (ClassNotFound, ValueError):
        try:
            lexer = guess_lexer(text, encoding=util._encoding)
        except (ClassNotFound, ValueError):
            lexer = TextLexer(encoding=util._encoding)

    formatter = HtmlFormatter(style=style, encoding=util._encoding)

    colorized = highlight(text, lexer, formatter)
    # strip wrapping div
    colorized = colorized[:colorized.find('\n</pre>')]
    colorized = colorized[colorized.find('<pre>')+5:]
    coloriter = iter(colorized.splitlines())

    filters['colorize'] = lambda x: coloriter.next()

    oldl = tmpl.cache[field]
    newl = oldl.replace('line|escape', 'line|colorize')
    tmpl.cache[field] = newl
Exemple #7
0
 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
     '''Overwrites selected files expanding/shrinking keywords.'''
     if self.restrict or lookup or self.record: # exclude kw_copy
         candidates = self.iskwfile(candidates, ctx)
     if not candidates:
         return
     kwcmd = self.restrict and lookup # kwexpand/kwshrink
     if self.restrict or expand and lookup:
         mf = ctx.manifest()
     lctx = ctx
     re_kw = (self.restrict or rekw) and self.rekw or self.rekwexp
     msg = (expand and _('overwriting %s expanding keywords\n')
            or _('overwriting %s shrinking keywords\n'))
     for f in candidates:
         if self.restrict:
             data = self.repo.file(f).read(mf[f])
         else:
             data = self.repo.wread(f)
         if util.binary(data):
             continue
         if expand:
             if lookup:
                 lctx = self.linkctx(f, mf[f])
             data, found = self.substitute(data, f, lctx, re_kw.subn)
         elif self.restrict:
             found = re_kw.search(data)
         else:
             data, found = _shrinktext(data, re_kw.subn)
         if found:
             self.ui.note(msg % f)
             self.repo.wwrite(f, data, ctx.flags(f))
             if kwcmd:
                 self.repo.dirstate.normal(f)
             elif self.record:
                 self.repo.dirstate.normallookup(f)
 def overwrite(self, node, expand, files):
     '''Overwrites selected files expanding/shrinking keywords.'''
     ctx = self.repo[node]
     mf = ctx.manifest()
     if node is not None:     # commit
         files = [f for f in ctx.files() if f in mf]
         notify = self.ui.debug
     else:                    # kwexpand/kwshrink
         notify = self.ui.note
     candidates = [f for f in files if self.iskwfile(f, ctx.flags)]
     if candidates:
         self.restrict = True # do not expand when reading
         msg = (expand and _('overwriting %s expanding keywords\n')
                or _('overwriting %s shrinking keywords\n'))
         for f in candidates:
             fp = self.repo.file(f)
             data = fp.read(mf[f])
             if util.binary(data):
                 continue
             if expand:
                 if node is None:
                     ctx = self.repo.filectx(f, fileid=mf[f]).changectx()
                 data, found = self.substitute(data, f, ctx,
                                               self.re_kw.subn)
             else:
                 found = self.re_kw.search(data)
             if found:
                 notify(msg % f)
                 self.repo.wwrite(f, data, mf.flags(f))
                 if node is None:
                     self.repo.dirstate.normal(f)
         self.restrict = False
Exemple #9
0
 def shrinklines(self, fname, lines):
     '''Returns lines with keyword substitutions removed.'''
     if self.match(fname):
         text = ''.join(lines)
         if not util.binary(text):
             return _shrinktext(text, self.rekwexp.sub).splitlines(True)
     return lines
Exemple #10
0
    def annotate(**map):
        last = None
        if binary(fctx.data()):
            mt = (mimetypes.guess_type(fctx.path())[0]
                  or 'application/octet-stream')
            lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
                                '(binary:%s)' % mt)])
        else:
            lines = enumerate(fctx.annotate(follow=True, linenumber=True))
        for lineno, ((f, targetline), l) in lines:
            fnode = f.filenode()

            if last != fnode:
                last = fnode

            yield {"parity": parity.next(),
                   "node": hex(f.node()),
                   "rev": f.rev(),
                   "author": f.user(),
                   "desc": f.description(),
                   "file": f.path(),
                   "targetline": targetline,
                   "line": l,
                   "lineid": "l%d" % (lineno + 1),
                   "linenumber": "% 6d" % (lineno + 1)}
Exemple #11
0
def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt

    def lines():
        for lineno, t in enumerate(text.splitlines(True)):
            yield {"line": t,
                   "lineid": "l%d" % (lineno + 1),
                   "linenumber": "% 6d" % (lineno + 1),
                   "parity": parity.next()}

    return tmpl("filerevision",
                file=f,
                path=webutil.up(f),
                text=lines(),
                rev=fctx.rev(),
                node=hex(fctx.node()),
                author=fctx.user(),
                date=fctx.date(),
                desc=fctx.description(),
                branch=webutil.nodebranchnodefault(fctx),
                parent=webutil.parents(fctx),
                child=webutil.children(fctx),
                rename=webutil.renamelink(fctx),
                permissions=fctx.manifest().flags(f))
Exemple #12
0
def tocrlf(s, params, ui, **kwargs):
    """Filter to convert to CRLF EOLs."""
    if util.binary(s):
        return s
    if ui.configbool("eol", "only-consistent", True) and inconsistenteol(s):
        return s
    if ui.configbool("eol", "fix-trailing-newline", False) and s and s[-1] != "\n":
        s = s + "\n"
    return eolre.sub("\r\n", s)
Exemple #13
0
def tocrlf(s, params, ui, **kwargs):
    """Filter to convert to CRLF EOLs."""
    if util.binary(s):
        return s
    if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
        return s
    if ui.configbool('eol', 'fix-trailing-newline', False) and s and s[-1] != '\n':
        s = s + '\n'
    return eolre.sub('\r\n', s)
Exemple #14
0
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):

    # append a <link ...> to the syntax highlighting css
    old_header = tmpl.load('header')
    if SYNTAX_CSS not in old_header:
        new_header =  old_header + SYNTAX_CSS
        tmpl.cache['header'] = new_header

    text = fctx.data()
    if util.binary(text):
        return

    # str.splitlines() != unicode.splitlines() because "reasons"
    for c in "\x0c\x1c\x1d\x1e":
        if c in text:
            text = text.replace(c, '')

    # Pygments is best used with Unicode strings:
    # <http://pygments.org/docs/unicode/>
    text = text.decode(encoding.encoding, 'replace')

    # To get multi-line strings right, we can't format line-by-line
    try:
        lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
                                         stripnl=False)
    except (ClassNotFound, ValueError):
        # guess_lexer will return a lexer if *any* lexer matches. There is
        # no way to specify a minimum match score. This can give a high rate of
        # false positives on files with an unknown filename pattern.
        if guessfilenameonly:
            return

        try:
            lexer = guess_lexer(text[:1024], stripnl=False)
        except (ClassNotFound, ValueError):
            # Don't highlight unknown files
            return

    # Don't highlight text files
    if isinstance(lexer, TextLexer):
        return

    formatter = HtmlFormatter(nowrap=True, style=style)

    colorized = highlight(text, lexer, formatter)
    coloriter = (s.encode(encoding.encoding, 'replace')
                 for s in colorized.splitlines())

    tmpl.filters['colorize'] = lambda x: coloriter.next()

    oldl = tmpl.cache[field]
    newl = oldl.replace('line|escape', 'line|colorize')
    tmpl.cache[field] = newl
Exemple #15
0
    def write(path):
        fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                         pathname=os.path.join(prefix, path))
        data = ctx[path].data()
        if not opts.get('text') and util.binary(data):
            fp.write("%s: binary file\n" % path)
            return

        for (num, line) in enumerate(data.split("\n"), start=1):
            line = line + "\n"
            fp.write("%s:%s: %s" % (path, num, line))
        fp.close()
Exemple #16
0
 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
     '''Overwrites selected files expanding/shrinking keywords.'''
     if self.restrict or lookup or self.postcommit: # exclude kw_copy
         candidates = self.iskwfile(candidates, ctx)
     if not candidates:
         return
     kwcmd = self.restrict and lookup # kwexpand/kwshrink
     if self.restrict or expand and lookup:
         mf = ctx.manifest()
     if self.restrict or rekw:
         re_kw = self.rekw
     else:
         re_kw = self.rekwexp
     if expand:
         msg = _('overwriting %s expanding keywords\n')
     else:
         msg = _('overwriting %s shrinking keywords\n')
     for f in candidates:
         if self.restrict:
             data = self.repo.file(f).read(mf[f])
         else:
             data = self.repo.wread(f)
         if util.binary(data):
             continue
         if expand:
             parents = ctx.parents()
             if lookup:
                 ctx = self.linkctx(f, mf[f])
             elif self.restrict and len(parents) > 1:
                 # merge commit
                 # in case of conflict f is in modified state during
                 # merge, even if f does not differ from f in parent
                 for p in parents:
                     if f in p and not p[f].cmp(ctx[f]):
                         ctx = p[f].changectx()
                         break
             data, found = self.substitute(data, f, ctx, re_kw.subn)
         elif self.restrict:
             found = re_kw.search(data)
         else:
             data, found = _shrinktext(data, re_kw.subn)
         if found:
             self.ui.note(msg % f)
             fp = self.repo.wvfs(f, "wb", atomictemp=True)
             fp.write(data)
             fp.close()
             if kwcmd:
                 self.repo.dirstate.normal(f)
             elif self.postcommit:
                 self.repo.dirstate.normallookup(f)
Exemple #17
0
 def commitctx(self, ctx, error=False):
     for f in sorted(ctx.added() + ctx.modified()):
         if not self._eolfile(f):
             continue
         data = ctx[f].data()
         if util.binary(data):
             # We should not abort here, since the user should
             # be able to say "** = native" to automatically
             # have all non-binary files taken care of.
             continue
         if inconsistenteol(data):
             raise util.Abort(_("inconsistent newline style "
                                "in %s\n" % f))
     return super(eolrepo, self).commitctx(ctx, error)
Exemple #18
0
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
    halt = False
    seen = set()
    # we try to walk changesets in reverse order from newest to
    # oldest, so that if we see a file multiple times, we take the
    # newest version as canonical. this prevents us from blocking a
    # changegroup that contains an unacceptable commit followed later
    # by a commit that fixes the problem.
    tip = repo["tip"]
    for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
        c = repo[rev]
        for f in c.files():
            if f in seen or f not in tip or f not in c:
                continue
            seen.add(f)
            data = c[f].data()
            if not util.binary(data) and newline in data:
                if not halt:
                    ui.warn(
                        _("Attempt to commit or push text file(s) " "using %s line endings\n") % newlinestr[newline]
                    )
                ui.warn(_("in %s: %s\n") % (short(c.node()), f))
                halt = True
    if halt and hooktype == "pretxnchangegroup":
        crlf = newlinestr[newline].lower()
        filter = filterstr[newline]
        ui.warn(
            _(
                "\nTo prevent this mistake in your local repository,\n"
                "add to Mercurial.ini or .hg/hgrc:\n"
                "\n"
                "[hooks]\n"
                "pretxncommit.%s = python:hgext.win32text.forbid%s\n"
                "\n"
                "and also consider adding:\n"
                "\n"
                "[extensions]\n"
                "hgext.win32text =\n"
                "[encode]\n"
                "** = %sencode:\n"
                "[decode]\n"
                "** = %sdecode:\n"
            )
            % (crlf, crlf, filter, filter)
        )
    return halt
Exemple #19
0
def pygmentize(field, fctx, style, tmpl):

    # append a <link ...> to the syntax highlighting css
    old_header = tmpl.load('header')
    if SYNTAX_CSS not in old_header:
        new_header =  old_header + SYNTAX_CSS
        tmpl.cache['header'] = new_header

    text = fctx.data()
    if util.binary(text):
        return

    # str.splitlines() != unicode.splitlines() because "reasons"
    for c in "\x0c\x1c\x1d\x1e":
        if c in text:
            text = text.replace(c, '')

    # Pygments is best used with Unicode strings:
    # <http://pygments.org/docs/unicode/>
    text = text.decode(encoding.encoding, 'replace')

    # To get multi-line strings right, we can't format line-by-line
    try:
        lexer = guess_lexer_for_filename(fctx.path(), text[:1024],
                                         stripnl=False)
    except (ClassNotFound, ValueError):
        try:
            lexer = guess_lexer(text[:1024], stripnl=False)
        except (ClassNotFound, ValueError):
            lexer = TextLexer(stripnl=False)

    formatter = HtmlFormatter(style=style)

    colorized = highlight(text, lexer, formatter)
    # strip wrapping div
    colorized = colorized[:colorized.find('\n</pre>')]
    colorized = colorized[colorized.find('<pre>') + 5:]
    coloriter = (s.encode(encoding.encoding, 'replace')
                 for s in colorized.splitlines())

    tmpl.filters['colorize'] = lambda x: coloriter.next()

    oldl = tmpl.cache[field]
    newl = oldl.replace('line|escape', 'line|colorize')
    tmpl.cache[field] = newl
Exemple #20
0
 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
     '''Overwrites selected files expanding/shrinking keywords.'''
     if self.restrict or lookup or self.postcommit: # exclude kw_copy
         candidates = self.iskwfile(candidates, ctx)
     if not candidates:
         return
     kwcmd = self.restrict and lookup # kwexpand/kwshrink
     if self.restrict or expand and lookup:
         mf = ctx.manifest()
     if self.restrict or rekw:
         re_kw = self.rekw
     else:
         re_kw = self.rekwexp
     if expand:
         msg = _('overwriting %s expanding keywords\n')
     else:
         msg = _('overwriting %s shrinking keywords\n')
     for f in candidates:
         if self.restrict:
             data = self.repo.file(f).read(mf[f])
         else:
             data = self.repo.wread(f)
         if util.binary(data):
             continue
         if expand:
             if lookup:
                 ctx = self.linkctx(f, mf[f])
             data, found = self.substitute(data, f, ctx, re_kw.subn)
         elif self.restrict:
             found = re_kw.search(data)
         else:
             data, found = _shrinktext(data, re_kw.subn)
         if found:
             self.ui.note(msg % f)
             fp = self.repo.wopener(f, "wb", atomictemp=True)
             fp.write(data)
             fp.close()
             if kwcmd:
                 self.repo.dirstate.normal(f)
             elif self.postcommit:
                 self.repo.dirstate.normallookup(f)
Exemple #21
0
 def diffwith(self, targetctx, match=None, showchanges=False):
     """diff and prepare fixups. update self.fixupmap, self.paths"""
     # only care about modified files
     self.status = self.stack[-1].status(targetctx, match)
     self.paths = []
     # but if --edit-lines is used, the user may want to edit files
     # even if they are not modified
     editopt = self.opts.get('edit_lines')
     if not self.status.modified and editopt and match:
         interestingpaths = match.files()
     else:
         interestingpaths = self.status.modified
     # prepare the filefixupstate
     seenfctxs = set()
     # sorting is necessary to eliminate ambiguity for the "double move"
     # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A".
     for path in sorted(interestingpaths):
         if self.ui.debugflag:
             self.ui.write(_('calculating fixups for %s\n') % path)
         targetfctx = targetctx[path]
         fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs)
         # ignore symbolic links or binary, or unchanged files
         if any(f.islink() or util.binary(f.data())
                for f in [targetfctx] + fctxs
                if not isinstance(f, emptyfilecontext)):
             continue
         if targetfctx.data() == fctxs[-1].data() and not editopt:
             continue
         seenfctxs.update(fctxs[1:])
         self.fctxmap[path] = ctx2fctx
         fstate = filefixupstate(fctxs, ui=self.ui, opts=self.opts)
         if showchanges:
             colorpath = self.ui.label(path, 'absorb.path')
             header = 'showing changes for ' + colorpath
             self.ui.write(header + '\n')
         fstate.diffwith(targetfctx, showchanges=showchanges)
         self.fixupmap[path] = fstate
         self.paths.append(path)
Exemple #22
0
 def shrink(self, fname, text):
     '''Returns text with all keyword substitutions removed.'''
     if self.match(fname) and not util.binary(text):
         return _shrinktext(text, self.rekwexp.sub)
     return text
Exemple #23
0
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.match(path) and not util.binary(data):
         ctx = self.linkctx(path, node)
         return self.substitute(data, path, ctx, self.rekw.sub)
     return data
Exemple #24
0
def macencode(s, cmd):
    if not util.binary(s):
        return macdumbencode(s, cmd)
    return s
Exemple #25
0
def cleverencode(s, cmd):
    if not util.binary(s):
        return dumbencode(s, cmd)
    return s
Exemple #26
0
            raise util.Abort(
                _('Cannot change to basedir "%s": %s') % (base, error))

        for f in files:
            file = self.selected[f]
            pathname, filename = os.path.split(os.path.normpath(file.path()))

            try:
                ftp.cwd(base + '/' + pathname)
            except ftplib.error_perm, error:
                self.ftp_mkd(ftp, pathname, base)

            try:
                filedata = StringIO(file.data())

                if util.binary(file.data()):
                    ftp.storbinary('STOR %s' % filename, filedata)
                else:
                    ftp.storlines('STOR %s' % filename, filedata)

                if self.chmod_file:
                    try:
                        ftp.voidcmd('SITE CHMOD %s %s' %
                                    (self.chmod_file, filename))
                    except ftplib.all_errors, error:
                        self.ui.warn(
                            _('Cannot CHMOD file "%s": %s') % (file, error))

                filedata.close()
            except ftplib.all_errors, error:
                raise util.Abort(
Exemple #27
0
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.match(path) and not util.binary(data):
         ctx = self.linkctx(path, node)
         return self.substitute(data, path, ctx, self.rekw.sub)
     return data
Exemple #28
0
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError, inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = 'application/binary'
    if guessmime:
        mt = mimetypes.guess_type(path)[0]
        if mt is None:
            if util.binary(text):
                mt = 'application/binary'
            else:
                mt = 'text/plain'
    if mt.startswith('text/'):
        mt += '; charset="%s"' % encoding.encoding

    req.respond(HTTP_OK, mt, path, body=text)
    return []


def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError, inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = 'application/binary'
    if guessmime:
        mt = mimetypes.guess_type(path)[0]
        if mt is None:
            mt = binary(text) and 'application/binary' or 'text/plain'
    if mt.startswith('text/'):
        mt += '; charset="%s"' % encoding.encoding

    req.respond(HTTP_OK, mt, path, len(text))
    return [text]


def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt
Exemple #30
0
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.match(path) and not util.binary(data):
         ctx = self.repo.filectx(path, fileid=node).changectx()
         return self.substitute(data, path, ctx, self.re_kw.sub)
     return data
Exemple #31
0
def blame_trail(origfn, ui, repo, *pats, **opts):
    trail_line = opts['trail']
    if trail_line == -1:
        return origfn(ui, repo, *pats, **opts)

    if not ui.interactive():
        raise util.Abort(_("blame trail must be used in interactive mode.\n"
            "If you're using the pager extension, use --pager off or create"
            " a non paged alias for blametrail."))

    # have to use line_number for trail
    opts['line_number'] = True

    if opts.get('follow'):
        # --follow is deprecated and now just an alias for -f/--file
        # to mimic the behavior of Mercurial before version 1.5
        opts['file'] = True

    datefunc = ui.quiet and util.shortdate or util.datestr
    getdate = util.cachefunc(lambda x: datefunc(x[0].date()))

    if not pats:
        raise util.Abort(_('at least one filename or pattern is required'))

    hexfn = ui.debugflag and hex or short

    opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
             ('number', ' ', lambda x: str(x[0].rev())),
             ('changeset', ' ', lambda x: hexfn(x[0].node())),
             ('date', ' ', getdate),
             ('file', ' ', lambda x: x[0].path()),
             ('line_number', ':', lambda x: str(x[1])),
            ]

    if (not opts.get('user') and not opts.get('changeset')
        and not opts.get('date') and not opts.get('file')):
        opts['number'] = True

    linenumber = opts.get('line_number') is not None
    if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
        raise util.Abort(_('at least one of -n/-c is required for -l'))

    funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
    funcmap[0] = (funcmap[0][0], '')  # no separator in front of first column

    def bad(x, y):
        raise util.Abort("%s: %s" % (x, y))

    ctx = scmutil.revsingle(repo, opts.get('rev'))
    m = scmutil.match(ctx, pats, opts)
    m.bad = bad
    follow = not opts.get('no_follow')
    diffopts = patch.diffopts(ui, opts, section='annotate')

    files = list(ctx.walk(m))
    assert len(files) == 1
    # todo what fails this assertion? original code assumed more than one file.

    fctx = ctx[files[0]]

    if not opts.get('text') and util.binary(fctx.data()):
        ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
        return

    lines = fctx.annotate(follow=follow, linenumber=linenumber,
                          diffopts=diffopts)

    metadata, line_contents = lines[trail_line - 1]
    original_rev = metadata[0].rev()
    original_line = metadata[1]

    context = opts['context']
    if context != -1:
        line_s = trail_line - 1 - context
        line_e = trail_line - 1 + context + 1
        display_lines = lines[line_s:line_e]

        print "lines %i±%i:" % (trail_line, context)

        for ((l, lineno), line) in display_lines:
            print "%s: %s: %s" % (l.rev(), lineno, line),

        print

    rev = original_rev
    line = original_line

    # print the summary of the diff
    mercurial.commands.log(ui, repo, *pats, rev=[rev], follow=True, date=None)

    # now look at just the hunk with this line
    show_hunk(ui, repo, *pats, patch=True, rev=[rev], follow=True, date=None, line=line)

    ctx = scmutil.revsingle(repo, rev)
    parents = ctx.parents()
    assert len(parents) == 1
    parent = parents[0].rev()

    ui.write("parent is %s\n" % parent)

    line = ui.prompt("Line number for next iteration", None)
    if line:
        opts['trail'] = int(line)
        opts['rev'] = str(parent)

        # recurse until we overflow the stack or run out of history :)
        # santa(parent, line, context, filename)
        blame_trail(origfn, ui, repo, *pats, **opts)
Exemple #32
0
def macdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return macdumbdecode(s, cmd, **kwargs)
    return s
Exemple #33
0
        return content

    try:
        fctx = web.filectx(req)
    except revlog.LookupError, inst:
        try:
            content = web.manifest(tmpl, web.changectx(req), path)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = mimetypes.guess_type(path)[0]
    if mt is None or util.binary(text):
        mt = mt or 'application/octet-stream'

    req.respond(HTTP_OK, mt, path, len(text))
    return [text]

def file(web, req, tmpl):
    path = web.cleanpath(req.form.get('file', [''])[0])
    if not path:
        return web.manifest(tmpl, web.changectx(req), path)
    try:
        return web.filerevision(tmpl, web.filectx(req))
    except revlog.LookupError, inst:
        try:
            return web.manifest(tmpl, web.changectx(req), path)
        except ErrorResponse:
Exemple #34
0
def cleverdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return dumbdecode(s, cmd, **kwargs)
    return s
Exemple #35
0
def commit(ui, repo, rev_ctx, meta, base_revision, svn):
    """Build and send a commit from Mercurial to Subversion.
    """
    file_data = {}
    parent = rev_ctx.parents()[0]
    parent_branch = rev_ctx.parents()[0].branch()
    branch_path = meta.layoutobj.remotename(parent_branch)

    extchanges = svnexternals.diff(svnexternals.parse(ui, parent),
                                   svnexternals.parse(ui, rev_ctx))
    addeddirs, deleteddirs = _getdirchanges(svn, branch_path, parent, rev_ctx,
                                            rev_ctx.files(), extchanges)
    deleteddirs = set(deleteddirs)

    props = {}
    copies = {}
    for file in rev_ctx.files():
        if file in util.ignoredfiles:
            continue
        new_data = base_data = ''
        action = ''
        if file in rev_ctx:
            fctx = rev_ctx.filectx(file)
            new_data = fctx.data()

            if 'x' in fctx.flags():
                props.setdefault(file, {})['svn:executable'] = '*'
            if 'l' in fctx.flags():
                props.setdefault(file, {})['svn:special'] = '*'
            isbinary = hgutil.binary(new_data)
            if isbinary:
                props.setdefault(
                    file, {})['svn:mime-type'] = 'application/octet-stream'

            if file not in parent:
                renamed = fctx.renamed()
                if renamed:
                    # TODO current model (and perhaps svn model) does not support
                    # this kind of renames: a -> b, b -> c
                    copies[file] = renamed[0]
                    base_data = parent[renamed[0]].data()
                    if 'l' in parent[renamed[0]].flags():
                        base_data = 'link ' + base_data
                else:
                    autoprops = svn.autoprops_config.properties(file)
                    if autoprops:
                        props.setdefault(file, {}).update(autoprops)

                action = 'add'
                dirname = '/'.join(file.split('/')[:-1] + [''])
            else:
                base_data = parent.filectx(file).data()
                if ('x' in parent.filectx(file).flags()
                        and 'x' not in rev_ctx.filectx(file).flags()):
                    props.setdefault(file, {})['svn:executable'] = None
                if 'l' in parent.filectx(file).flags():
                    base_data = 'link ' + base_data
                    if 'l' not in rev_ctx.filectx(file).flags():
                        props.setdefault(file, {})['svn:special'] = None
                if hgutil.binary(base_data) and not isbinary:
                    props.setdefault(file, {})['svn:mime-type'] = None
                action = 'modify'
        else:
            pos = file.rfind('/')
            if pos >= 0:
                if file[:pos] in deleteddirs:
                    # This file will be removed when its directory is removed
                    continue
            action = 'delete'
        file_data[file] = base_data, new_data, action

    def svnpath(p):
        return ('%s/%s' % (branch_path, p)).strip('/')

    changeddirs = []
    for d, v1, v2 in extchanges:
        props.setdefault(svnpath(d), {})['svn:externals'] = v2
        if d not in deleteddirs and d not in addeddirs:
            changeddirs.append(svnpath(d))

    # Now we are done with files, we can prune deleted directories
    # against themselves: ignore a/b if a/ is already removed
    deleteddirs2 = list(deleteddirs)
    deleteddirs2.sort(reverse=True)
    for d in deleteddirs2:
        pos = d.rfind('/')
        if pos >= 0 and d[:pos] in deleteddirs:
            deleteddirs.remove(d)

    newcopies = {}
    for source, dest in copies.iteritems():
        newcopies[svnpath(source)] = (svnpath(dest), base_revision)

    new_target_files = [svnpath(f) for f in file_data]
    for tf, ntf in zip(file_data, new_target_files):
        if tf in file_data and tf != ntf:
            file_data[ntf] = file_data[tf]
            if tf in props:
                props[ntf] = props.pop(tf)
            del file_data[tf]

    addeddirs = [svnpath(d) for d in addeddirs]
    deleteddirs = [svnpath(d) for d in deleteddirs]
    new_target_files += addeddirs + deleteddirs + changeddirs
    if not new_target_files:
        raise NoFilesException()
    try:
        return svn.commit(new_target_files, rev_ctx.description(),
                          file_data, base_revision, set(addeddirs),
                          set(deleteddirs), props, newcopies)
    except svnwrap.SubversionException, e:
        ui.traceback()

        if len(e.args) > 0 and e.args[1] in (svnwrap.ERR_FS_TXN_OUT_OF_DATE,
                                             svnwrap.ERR_FS_CONFLICT,
                                             svnwrap.ERR_FS_ALREADY_EXISTS):
            raise hgutil.Abort('Outgoing changesets parent is not at '
                               'subversion HEAD\n'
                               '(pull again and rebase on a newer revision)')
        elif len(e.args) > 0 and e.args[1] == svnwrap.ERR_REPOS_HOOK_FAILURE:
            # Special handling for svn hooks blocking error
            raise hgutil.Abort(e.args[0])
        else:
            raise
Exemple #36
0
def cleverencode(s, cmd):
    if not util.binary(s):
        return dumbencode(s, cmd)
    return s
Exemple #37
0
    try:
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError, inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = mimetypes.guess_type(path)[0]
    if mt is None:
        mt = binary(text) and 'application/octet-stream' or 'text/plain'

    req.respond(HTTP_OK, mt, path, len(text))
    return [text]

def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt

    def lines():
        for lineno, t in enumerate(text.splitlines(True)):
Exemple #38
0
def macdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return macdumbdecode(s, cmd, **kwargs)
    return s
Exemple #39
0
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.matcher(path) and not util.binary(data):
         changenode = self.getnode(path, node)
         return self.substitute(data, path, changenode, self.re_kw.sub)
     return data
Exemple #40
0
def cleverdecode(s, cmd, **kwargs):
    if not util.binary(s):
        return dumbdecode(s, cmd, **kwargs)
    return s
Exemple #41
0
        numberOfFiles = len(files)
        pos = 0
        for f in files:
            file = self.selected[f]
            pathname, filename = os.path.split( os.path.normpath(file.path()) )

            try:
                ftp.cwd(base + '/' + pathname)
            except ftplib.error_perm, error:
                self.ftp_mkd(ftp, pathname, base)

            try:
                filedata = StringIO(file.data())

                if util.binary(file.data()):
                    ftp.storbinary('STOR %s' % filename, filedata)
                else:
                    ftp.storlines('STOR %s' % filename, filedata)

                if self.chmod_file:
                    try:
                        ftp.voidcmd('SITE CHMOD %s %s' %
                                (self.chmod_file, filename))
                    except ftplib.all_errors, error:
                        self.ui.warn(_('Cannot CHMOD file "%s": %s') %
                                (file, error))

                filedata.close()
                pos += 1
                self.ui.progress(_('uploading'), pos, unit=_('files'), total=numberOfFiles)
Exemple #42
0
    try:
        fctx = webutil.filectx(web.repo, req)
    except error.LookupError, inst:
        try:
            content = manifest(web, req, tmpl)
            req.respond(HTTP_OK, web.ctype)
            return content
        except ErrorResponse:
            raise inst

    path = fctx.path()
    text = fctx.data()
    mt = mimetypes.guess_type(path)[0]
    if mt is None:
        mt = binary(text) and 'application/octet-stream' or 'text/plain'

    req.respond(HTTP_OK, mt, path, len(text))
    return [text]


def _filerevision(web, tmpl, fctx):
    f = fctx.path()
    text = fctx.data()
    parity = paritygen(web.stripecount)

    if binary(text):
        mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
        text = '(binary:%s)' % mt

    def lines():
Exemple #43
0
def macencode(s, cmd):
    if not util.binary(s):
        return macdumbencode(s, cmd)
    return s
 def expand(self, path, node, data):
     '''Returns data with keywords expanded.'''
     if not self.restrict and self.match(path) and not util.binary(data):
         ctx = self.repo.filectx(path, fileid=node).changectx()
         return self.substitute(data, path, ctx, self.re_kw.sub)
     return data