def __init__(self, base, a, b): basetext = "\n".join([i.strip("\n") for i in base] + [""]) atext = "\n".join([i.strip("\n") for i in a] + [""]) btext = "\n".join([i.strip("\n") for i in b] + [""]) if util.binary(basetext) or util.binary(atext) or util.binary(btext): raise error.Abort("don't know how to merge binary files") simplemerge.Merge3Text.__init__(self, basetext, atext, btext, base, a, b)
def writetostore(self, text): # hg filelog metadata (includes rename, etc) hgmeta, offset = filelog.parsemeta(text) if offset and offset > 0: # lfs blob does not contain hg filelog metadata text = text[offset:] # git-lfs only supports sha256 oid = hashlib.sha256(text).hexdigest() self.opener.lfslocalblobstore.write(oid, text) # replace contents with metadata longoid = "sha256:%s" % oid metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text))) # by default, we expect the content to be binary. however, LFS could also # be used for non-binary content. add a special entry for non-binary data. # this will be used by filectx.isbinary(). if not util.binary(text): # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix metadata["x-is-binary"] = "0" # translate hg filelog metadata to lfs metadata with "x-hg-" prefix if hgmeta is not None: for k, v in pycompat.iteritems(hgmeta): metadata["x-hg-%s" % k] = v rawtext = metadata.serialize() return (rawtext, False)
def tocrlf(s, params, ui, **kwargs): """Filter to convert to CRLF EOLs.""" if util.binary(s): return s if ui.configbool("eol", "only-consistent") and inconsistenteol(s): return s if ui.configbool("eol", "fix-trailing-newline") and s and s[-1] != "\n": s = s + "\n" return util.tocrlf(s)
def tocrlf(s, params, ui, **kwargs): """Filter to convert to CRLF EOLs.""" if util.binary(s): return s if ui.configbool("eol", "only-consistent") and inconsistenteol(s): return s if ui.configbool("eol", "fix-trailing-newline") and s and s[-1:] != b"\n": s = s + b"\n" return pycompat.encodeutf8(util.tocrlf(pycompat.decodeutf8(s)))
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False): # append a <link ...> to the syntax highlighting css old_header = tmpl.load("header") if SYNTAX_CSS not in old_header: new_header = old_header + SYNTAX_CSS tmpl.cache["header"] = new_header text = fctx.data() if util.binary(text): return # str.splitlines() != unicode.splitlines() because "reasons" for c in "\x0c\x1c\x1d\x1e": if c in text: text = text.replace(c, "") # Pygments is best used with Unicode strings: # <http://pygments.org/docs/unicode/> text = text.decode(encoding.encoding, "replace") # To get multi-line strings right, we can't format line-by-line try: lexer = guess_lexer_for_filename(fctx.path(), text[:1024], stripnl=False) except (ClassNotFound, ValueError): # guess_lexer will return a lexer if *any* lexer matches. There is # no way to specify a minimum match score. This can give a high rate of # false positives on files with an unknown filename pattern. if guessfilenameonly: return try: lexer = guess_lexer(text[:1024], stripnl=False) except (ClassNotFound, ValueError): # Don't highlight unknown files return # Don't highlight text files if isinstance(lexer, TextLexer): return formatter = HtmlFormatter(nowrap=True, style=style) colorized = highlight(text, lexer, formatter) coloriter = (s.encode(encoding.encoding, "replace") for s in colorized.splitlines()) tmpl.filters["colorize"] = lambda x: next(coloriter) oldl = tmpl.cache[field] newl = oldl.replace("line|escape", "line|colorize") tmpl.cache[field] = newl
def write(path): fp = cmdutil.makefileobj(repo, opts.get("output"), ctx.node(), pathname=os.path.join(prefix, path)) data = ctx[path].data() if not opts.get("text") and util.binary(data): fp.write("%s: binary file\n" % path) return for (num, line) in enumerate(data.split("\n"), start=1): line = line + "\n" fp.write("%s:%s: %s" % (path, num, line)) fp.close()
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs): halt = False seen = set() # we try to walk changesets in reverse order from newest to # oldest, so that if we see a file multiple times, we take the # newest version as canonical. this prevents us from blocking a # changegroup that contains an unacceptable commit followed later # by a commit that fixes the problem. tip = repo["tip"] for rev in range(len(repo) - 1, repo[node].rev() - 1, -1): c = repo[rev] for f in c.files(): if f in seen or f not in tip or f not in c: continue seen.add(f) data = c[f].data() if not util.binary(data) and newline in data: if not halt: ui.warn( _( "attempt to commit or push text file(s) " "using %s line endings\n" ) % newlinestr[newline] ) ui.warn(_("in %s: %s\n") % (short(c.node()), f)) halt = True if halt and hooktype == "pretxnchangegroup": crlf = newlinestr[newline].lower() filter = filterstr[newline] ui.warn( _( "\nTo prevent this mistake in your local repository,\n" "add to Mercurial.ini or .hg/hgrc:\n" "\n" "[hooks]\n" "pretxncommit.%s = python:hgext.win32text.forbid%s\n" "\n" "and also consider adding:\n" "\n" "[extensions]\n" "win32text =\n" "[encode]\n" "** = %sencode:\n" "[decode]\n" "** = %sdecode:\n" ) % (crlf, crlf, filter, filter) ) return halt
def commitctx(self, ctx, error=False): for f in sorted(ctx.added() + ctx.modified()): if not self._eolmatch(f): continue fctx = ctx[f] if fctx is None: continue data = fctx.data() if util.binary(data): # We should not abort here, since the user should # be able to say "** = native" to automatically # have all non-binary files taken care of. continue if inconsistenteol(data): raise errormod.Abort(_("inconsistent newline style " "in %s\n") % f) return super(eolrepo, self).commitctx(ctx, error)
def diffwith(self, targetctx, match=None, fm=None): """diff and prepare fixups. update self.fixupmap, self.paths""" # only care about modified files self.status = self.stack[-1].status(targetctx, match) self.paths = [] # but if --edit-lines is used, the user may want to edit files # even if they are not modified editopt = self.opts.get("edit_lines") if not self.status.modified and editopt and match: interestingpaths = match.files() else: interestingpaths = self.status.modified # prepare the filefixupstate seenfctxs = set() # sorting is necessary to eliminate ambiguity for the "double move" # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". for path in sorted(interestingpaths): if self.ui.debugflag: self.ui.write(_("calculating fixups for %s\n") % path) targetfctx = targetctx[path] fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) # ignore symbolic links or binary, or unchanged files if any( f.islink() or util.binary(f.data()) for f in [targetfctx] + fctxs if not isinstance(f, emptyfilecontext) ): continue if targetfctx.data() == fctxs[-1].data() and not editopt: continue seenfctxs.update(fctxs[1:]) self.fctxmap[path] = ctx2fctx fstate = filefixupstate(fctxs, path, ui=self.ui, opts=self.opts) if fm is not None: fm.startitem() fm.plain("showing changes for ") fm.write("path", "%s\n", path, label="absorb.path") fm.data(linetype="path") fstate.diffwith(targetfctx, fm) self.fixupmap[path] = fstate self.paths.append(path) self.ctxaffected.update(fstate.ctxaffected)
def isbinary(self): return util.binary(self.data())
def macencode(s, cmd): if not util.binary(s): return macdumbencode(s, cmd) return s
def macdecode(s, cmd, **kwargs): if not util.binary(s): return macdumbdecode(s, cmd, **kwargs) return s
def cleverencode(s, cmd): if not util.binary(s): return dumbencode(s, cmd) return s
def cleverdecode(s, cmd, **kwargs): if not util.binary(s): return dumbdecode(s, cmd, **kwargs) return s