def __init__(self, base, a, b): basetext = b'\n'.join([i.strip(b'\n') for i in base] + [b'']) atext = b'\n'.join([i.strip(b'\n') for i in a] + [b'']) btext = b'\n'.join([i.strip(b'\n') for i in b] + [b'']) if (stringutil.binary(basetext) or stringutil.binary(atext) or stringutil.binary(btext)): raise error.Abort(b"don't know how to merge binary files") simplemerge.Merge3Text.__init__(self, basetext, atext, btext, base, a, b)
def expand(self, path, node, data): '''Returns data with keywords expanded.''' if (not self.restrict and self.match(path) and not stringutil.binary(data)): ctx = self.linkctx(path, node) return self.substitute(data, path, ctx, self.rekw.sub) return data
def writetostore(self, text, sidedata): # hg filelog metadata (includes rename, etc) hgmeta, offset = storageutil.parsemeta(text) if offset and offset > 0: # lfs blob does not contain hg filelog metadata text = text[offset:] # git-lfs only supports sha256 oid = hex(hashlib.sha256(text).digest()) self.opener.lfslocalblobstore.write(oid, text) # replace contents with metadata longoid = b'sha256:%s' % oid metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text)) # by default, we expect the content to be binary. however, LFS could also # be used for non-binary content. add a special entry for non-binary data. # this will be used by filectx.isbinary(). if not stringutil.binary(text): # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix metadata[b'x-is-binary'] = b'0' # translate hg filelog metadata to lfs metadata with "x-hg-" prefix if hgmeta is not None: for k, v in pycompat.iteritems(hgmeta): metadata[b'x-hg-%s' % k] = v rawtext = metadata.serialize() return (rawtext, False)
def shrinklines(self, fname, lines): '''Returns lines with keyword substitutions removed.''' if self.match(fname): text = ''.join(lines) if not stringutil.binary(text): return _shrinktext(text, self.rekwexp.sub).splitlines(True) return lines
def tocrlf(s, params, ui, **kwargs): """Filter to convert to CRLF EOLs.""" if stringutil.binary(s): return s if ui.configbool(b'eol', b'only-consistent') and inconsistenteol(s): return s if (ui.configbool(b'eol', b'fix-trailing-newline') and s and not s.endswith(b'\n')): s = s + b'\n' return util.tocrlf(s)
def tolf(s, params, ui, **kwargs): """Filter to convert to LF EOLs.""" if stringutil.binary(s): return s if ui.configbool('eol', 'only-consistent') and inconsistenteol(s): return s if (ui.configbool('eol', 'fix-trailing-newline') and s and s[-1] != '\n'): s = s + '\n' return util.tolf(s)
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False): # append a <link ...> to the syntax highlighting css tmpl.load('header') old_header = tmpl.cache['header'] if SYNTAX_CSS not in old_header: new_header = old_header + SYNTAX_CSS tmpl.cache['header'] = new_header text = fctx.data() if stringutil.binary(text): return # str.splitlines() != unicode.splitlines() because "reasons" for c in "\x0c\x1c\x1d\x1e": if c in text: text = text.replace(c, '') # Pygments is best used with Unicode strings: # <http://pygments.org/docs/unicode/> text = text.decode(encoding.encoding, 'replace') # To get multi-line strings right, we can't format line-by-line try: lexer = guess_lexer_for_filename(fctx.path(), text[:1024], stripnl=False) except (ClassNotFound, ValueError): # guess_lexer will return a lexer if *any* lexer matches. There is # no way to specify a minimum match score. This can give a high rate of # false positives on files with an unknown filename pattern. if guessfilenameonly: return try: lexer = guess_lexer(text[:1024], stripnl=False) except (ClassNotFound, ValueError): # Don't highlight unknown files return # Don't highlight text files if isinstance(lexer, TextLexer): return formatter = HtmlFormatter(nowrap=True, style=style) colorized = highlight(text, lexer, formatter) coloriter = (s.encode(encoding.encoding, 'replace') for s in colorized.splitlines()) tmpl._filters['colorize'] = lambda x: next(coloriter) oldl = tmpl.cache[field] newl = oldl.replace('line|escape', 'line|colorize') tmpl.cache[field] = newl
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs): halt = False seen = set() # we try to walk changesets in reverse order from newest to # oldest, so that if we see a file multiple times, we take the # newest version as canonical. this prevents us from blocking a # changegroup that contains an unacceptable commit followed later # by a commit that fixes the problem. tip = repo[b'tip'] for rev in pycompat.xrange( repo.changelog.tiprev(), repo[node].rev() - 1, -1 ): c = repo[rev] for f in c.files(): if f in seen or f not in tip or f not in c: continue seen.add(f) data = c[f].data() if not stringutil.binary(data) and newline in data: if not halt: ui.warn( _( b'attempt to commit or push text file(s) ' b'using %s line endings\n' ) % newlinestr[newline] ) ui.warn(_(b'in %s: %s\n') % (short(c.node()), f)) halt = True if halt and hooktype == b'pretxnchangegroup': crlf = newlinestr[newline].lower() filter = filterstr[newline] ui.warn( _( b'\nTo prevent this mistake in your local repository,\n' b'add to Mercurial.ini or .hg/hgrc:\n' b'\n' b'[hooks]\n' b'pretxncommit.%s = python:hgext.win32text.forbid%s\n' b'\n' b'and also consider adding:\n' b'\n' b'[extensions]\n' b'win32text =\n' b'[encode]\n' b'** = %sencode:\n' b'[decode]\n' b'** = %sdecode:\n' ) % (crlf, crlf, filter, filter) ) return halt
def overwrite(self, ctx, candidates, lookup, expand, rekw=False): '''Overwrites selected files expanding/shrinking keywords.''' if self.restrict or lookup or self.postcommit: # exclude kw_copy candidates = self.iskwfile(candidates, ctx) if not candidates: return kwcmd = self.restrict and lookup # kwexpand/kwshrink if self.restrict or expand and lookup: mf = ctx.manifest() if self.restrict or rekw: re_kw = self.rekw else: re_kw = self.rekwexp if expand: msg = _('overwriting %s expanding keywords\n') else: msg = _('overwriting %s shrinking keywords\n') for f in candidates: if self.restrict: data = self.repo.file(f).read(mf[f]) else: data = self.repo.wread(f) if stringutil.binary(data): continue if expand: parents = ctx.parents() if lookup: ctx = self.linkctx(f, mf[f]) elif self.restrict and len(parents) > 1: # merge commit # in case of conflict f is in modified state during # merge, even if f does not differ from f in parent for p in parents: if f in p and not p[f].cmp(ctx[f]): ctx = p[f].changectx() break data, found = self.substitute(data, f, ctx, re_kw.subn) elif self.restrict: found = re_kw.search(data) else: data, found = _shrinktext(data, re_kw.subn) if found: self.ui.note(msg % f) fp = self.repo.wvfs(f, "wb", atomictemp=True) fp.write(data) fp.close() if kwcmd: self.repo.dirstate.normal(f) elif self.postcommit: self.repo.dirstate.normallookup(f)
def commitctx(self, ctx, error=False, origctx=None): for f in sorted(ctx.added() + ctx.modified()): if not self._eolmatch(f): continue fctx = ctx[f] if fctx is None: continue data = fctx.data() if stringutil.binary(data): # We should not abort here, since the user should # be able to say "** = native" to automatically # have all non-binary files taken care of. continue if inconsistenteol(data): raise errormod.Abort( _(b"inconsistent newline style in %s\n") % f) return super(eolrepo, self).commitctx(ctx, error, origctx)
def diffwith(self, targetctx, match=None, fm=None): """diff and prepare fixups. update self.fixupmap, self.paths""" # only care about modified files self.status = self.stack[-1].status(targetctx, match) self.paths = [] # but if --edit-lines is used, the user may want to edit files # even if they are not modified editopt = self.opts.get(b'edit_lines') if not self.status.modified and editopt and match: interestingpaths = match.files() else: interestingpaths = self.status.modified # prepare the filefixupstate seenfctxs = set() # sorting is necessary to eliminate ambiguity for the "double move" # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". for path in sorted(interestingpaths): self.ui.debug(b'calculating fixups for %s\n' % path) targetfctx = targetctx[path] fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) # ignore symbolic links or binary, or unchanged files if any( f.islink() or stringutil.binary(f.data()) for f in [targetfctx] + fctxs if not isinstance(f, emptyfilecontext) ): continue if targetfctx.data() == fctxs[-1].data() and not editopt: continue seenfctxs.update(fctxs[1:]) self.fctxmap[path] = ctx2fctx fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) if fm is not None: fm.startitem() fm.plain(b'showing changes for ') fm.write(b'path', b'%s\n', path, label=b'absorb.path') fm.data(linetype=b'path') fstate.diffwith(targetfctx, fm) self.fixupmap[path] = fstate self.paths.append(path) self.ctxaffected.update(fstate.ctxaffected)
def cleverdecode(s, cmd, **kwargs): if not stringutil.binary(s): return dumbdecode(s, cmd, **kwargs) return s
def shrink(self, fname, text): '''Returns text with all keyword substitutions removed.''' if self.match(fname) and not stringutil.binary(text): return _shrinktext(text, self.rekwexp.sub) return text
def isbinary(self): return stringutil.binary(self.data())
def cleverencode(s, cmd): if not stringutil.binary(s): return dumbencode(s, cmd) return s
def macdecode(s, cmd, **kwargs): if not stringutil.binary(s): return macdumbdecode(s, cmd, **kwargs) return s
def macencode(s, cmd): if not stringutil.binary(s): return macdumbencode(s, cmd) return s