def fmt_wrap (text, indent=""): paras = text.split("\n\n") fmtparas = [] for para in paras: lines = wrap_text(para, wcol=wcol, flead=indent, lead=indent, endl="") fmtparas.append(cjoin(lines, "\n")) return cjoin(fmtparas, "\n\n")
def help (self, subcmds=None, wcol=None, stream=sys.stdout): """ Formatted help for subcommands. @param subcmds: subcommand names (all subcommands if C{None}) @type subcmds: list of strings @param wcol: column to wrap text at (<= 0 for no wrapping, C{None} for automatic according to output stream) @type wcol: int @param stream: intended output stream for the text @type stream: file @return: formatted help @rtype: string """ if subcmds is None: subcmds = self._scviews.keys() subcmds.sort() fmts = [] for subcmd in subcmds: scview = self._scviews.get(subcmd, None) if scview is None: raise SubcmdError( _("@info", "Trying to get help for an unknown subcommand '%(cmd)s'.", cmd=subcmd)) fmts.append(scview.help(wcol, stream)) fmts.append("") return cjoin(fmts, "\n")
def to_string(self, wrapf=wrap_field, force=False, colorize=0): """ The string-representation of the message. Passes the arguments to L{to_lines} and joins the resulting list. @see: L{to_lines} """ return cjoin(self.to_lines(wrapf, force, colorize))
def listcmd (self, subcmds=None, wcol=None, stream=sys.stdout): """ Formatted listing of subcommands with short descriptions. @param subcmds: subcommand names (all subcommands if C{None}) @type subcmds: list of strings @param wcol: column to wrap text at (<= 0 for no wrapping, C{None} for automatic according to output stream) @type wcol: int @param stream: intended output stream for the text @type stream: file @return: formatted listing @rtype: string """ if subcmds is None: subcmds = self._scviews.keys() subcmds.sort() maxsclen = max([len(x) for x in subcmds]) ndsep = _("@item:intext splitter between a subcommand name " "and its description", " - ") flead = " " * 2 lead = flead + " " * (maxsclen + 3) if wcol is None: wcol = (term_width(stream=stream) or 80) - 1 fmts = [] for subcmd in subcmds: scview = self._scviews.get(subcmd, None) if scview is None: raise SubcmdError( _("@info", "Trying to include an unknown subcommand '%(cmd)s' " "into listing.", cmd=subcmd)) desc = scview.shdesc() if desc: name = cinterp("%%-%ds" % maxsclen, subcmd) s = name + ndsep + desc else: s = name lines = wrap_text(s, wcol=wcol, flead=flead, lead=lead, endl="") fmts.extend(lines) return cjoin(fmts, "\n")
def help (self, wcol=None, stream=sys.stdout): """ Formatted help for the subcommand. @param wcol: column to wrap text at (<= 0 for no wrapping, C{None} for automatic according to output stream) @type wcol: int @param stream: intended output stream for the text @type stream: file @return: formatted help @rtype: string """ # Split parameters into mandatory and optional. m_params = [] o_params = [] for param in self._ordered: if self._mandatorys[param]: m_params.append(param) else: o_params.append(param) # Format output. if wcol is None: wcol = (term_width(stream=stream) or 80) - 1 def fmt_wrap (text, indent=""): paras = text.split("\n\n") fmtparas = [] for para in paras: lines = wrap_text(para, wcol=wcol, flead=indent, lead=indent, endl="") fmtparas.append(cjoin(lines, "\n")) return cjoin(fmtparas, "\n\n") def fmt_par (param, indent=""): s = "" s += indent + " " + param ptype = self._ptypes[param] if ptype is bool: s += " "*1 +_("@item:intext indicator that the parameter " "is a flag", "[flag]") else: metavar = self._metavars[param] if metavar is None: metavar = _("@item:intext default placehodler for " "the parameter argument", "ARG") s += cinterp(":%s", metavar) defval = self._defvals[param] admvals = self._admvals[param] if ptype is not bool and defval is not None and str(defval): cpos = len(s) - s.rfind("\n") - 1 s += " "*1 + _("@item:intext default value for the argument", "[default %(arg)s=%(val)s]", arg=metavar, val=defval) if admvals is not None: s += "\n" + (" " * cpos) if ptype is not bool and admvals is not None: s += " "*1 + _("@item:intext admissible argument values", "[%(arg)s is one of: %(vallist)s]", arg=metavar, vallist=format_item_list(admvals)) s += "\n" desc = self._descs[param] if desc: fmt_desc = fmt_wrap(desc, indent + " ") s += fmt_desc ## Wrap current parameter with empty lines if ## the description spanned several lines. #if "\n\n" in fmt_desc: #s = "\n" + s + "\n" s += "\n" # empty line after description return s ls = [] ls += [" " + self._subcmd] ls += [" " + "=" * len(ls[-1].strip())] ls += [""] desc = self._desc if not desc: desc = _("@info", "No description available.") ls += [fmt_wrap(desc, " ")] if m_params: ls += [""] ls += [" " + _("@info", "Mandatory parameters:")] ls += [""] for param in m_params: ls += [fmt_par(param, " ")] if o_params: ls += [""] ls += [" " + _("@info", "Optional parameters:")] ls += [""] for param in o_params: ls += [fmt_par(param, " ")] return cjoin(ls, "\n").strip("\n")
def process (self, msg, cat): if msg.obsolete: return try: for msgstr in msg.msgstr: # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) self.connection.request("GET", _REQUEST % (self.lang, urlencode({"text":msgstr.encode("UTF-8")}))) response=self.connection.getresponse() if response: responseData=response.read() if "error" in responseData: dom=parseString(responseData) for error in dom.getElementsByTagName("error"): if error.getAttribute("ruleId") in self.disabledRules: continue self.nmatch+=1 report("-"*(len(msgstr)+8)) report(_("@info", "<bold>%(file)s:%(line)d(#%(entry)d)</bold>", file=cat.filename, line=msg.refline, entry=msg.refentry)) #TODO: create a report function in the right place #TODO: color in red part of context that make the mistake report(_("@info", "<bold>Context:</bold> %(snippet)s", snippet=error.getAttribute("context"))) report(_("@info", "(%(rule)s) <bold><red>==></red></bold> %(note)s", rule=error.getAttribute("ruleId"), note=error.getAttribute("msg"))) report("") if self.lokalize: repls = [_("@label", "Grammar errors:")] repls.append(_( "@info", "<bold>%(file)s:%(line)d(#%(entry)d)</bold>", file=cat.filename, line=msg.refline, entry=msg.refentry )) repls.append(_( "@info", "(%(rule)s) <bold><red>==></red></bold> %(note)s", rule=error.getAttribute("ruleId"), note=error.getAttribute("msg") )) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n")) except socket.error: raise SieveError(_("@info", "Cannot connect to LanguageTool server. " "Did you start it?"))
def _renew_lines_bymod(self, mod, wrapf=wrap_field, force=False, colorize=0): prefix = {} if self.obsolete: prefix["curr"] = "#~ " prefix["prev"] = "#~| " else: prefix["curr"] = "" prefix["prev"] = "#| " if force or mod["manual_comment"] or not self._lines_manual_comment: self._lines_manual_comment = [] for manc in self.manual_comment: ls = wrap_comment_unwrap("", manc) if colorize >= 2: ls = [ColorString("<grey>%s</grey>") % x for x in ls] self._lines_manual_comment.extend(ls) if force or mod["auto_comment"] or not self._lines_auto_comment: self._lines_auto_comment = [] for autoc in self.auto_comment: ls = wrap_comment_unwrap(".", autoc) if colorize >= 2: ls = [ColorString("<blue>%s</blue>") % x for x in ls] self._lines_auto_comment.extend(ls) if force or mod["source"] or not self._lines_source: self._lines_source = [] srcrefs = [] for src in self.source: if src[1] > 0: srcrefs.append(src[0] + ":" + str(src[1])) else: srcrefs.append(src[0]) if srcrefs: ls = wrap_comment(":", cjoin(srcrefs, " ")) if colorize >= 2: ls = [ColorString("<blue>%s</blue>") % x for x in ls] self._lines_source = ls if force or mod["flag"] or not self._lines_flag: self._lines_flag = [] # Rearange so that fuzzy is first, if present. flst = [] for fl in self.flag: if fl == u"fuzzy": if colorize >= 1: fl = ColorString("<underline>%s</underline>") % fl flst.insert(0, fl) else: flst.append(fl) if flst: ls = wrap_comment(",", cjoin(flst, ", ")) if colorize >= 2: ls = [ColorString("<blue>%s</blue>") % x for x in ls] self._lines_flag = ls for att in _Message_single_fields: att_lins = "_lines_" + att if force or mod[att] or not self.__dict__[att_lins]: # modcount of this string > 0 or lines not cached or forced self.__dict__[att_lins] = [] msgsth = getattr(self, att) if msgsth is not None or att in _Message_mandatory_fields: if msgsth is None: msgsth = u"" if att.endswith("_previous"): fname = att[:-len("_previous")] pstat = "prev" else: fname = att pstat = "curr" if colorize >= 1: fname = ColorString("<bold>%s</bold>") % fname self.__dict__[att_lins] = wrapf(fname, _escape(msgsth), prefix[pstat]) # msgstr must be renewed if the plurality of the message changed. new_plurality = (getattr(self, "_lines_msgstr", []) and ((self.msgid_plural is None and "msgstr[" in self._lines_msgstr[0]) or (self.msgid_plural is not None and "msgstr[" not in self._lines_msgstr[0]))) if force or mod["msgstr"] or not self._lines_msgstr or new_plurality: self._lines_msgstr = [] msgstr = self.msgstr or [u""] if self.msgid_plural is None: fname = "msgstr" if colorize >= 1: fname = ColorString("<bold>%s</bold>") % fname self._lines_msgstr.extend( wrapf(fname, _escape(msgstr[0]), prefix["curr"])) else: for i in range(len(msgstr)): fname = "msgstr[%d]" % i if colorize >= 1: fname = ColorString("<bold>%s</bold>") % fname self._lines_msgstr.extend( wrapf(fname, _escape(msgstr[i]), prefix["curr"])) # Marshal the lines into proper order. self._lines_all = [] lins = self._lines_all lins.extend(self._lines_manual_comment) lins.extend(self._lines_auto_comment) if not self.obsolete: # no source for an obsolete message lins.extend(self._lines_source) lins.extend(self._lines_flag) # Actually, it might make sense regardless... ## Old originals makes sense only for a message with a fuzzy flag. #if self.fuzzy: lins.extend(self._lines_msgctxt_previous) lins.extend(self._lines_msgid_previous) lins.extend(self._lines_msgid_plural_previous) lins.extend(self._lines_msgctxt) lins.extend(self._lines_msgid) lins.extend(self._lines_msgid_plural) lins.extend(self._lines_msgstr) if self._lines_all[-1] != "\n": lins.extend(u"\n")
def _resolve_single_uiref (uitext, uicats, hookcl_f3c, hookcl_v3c, fdiralt): errmsgs = [] # Determine context separator in the reference. # If the arcane one is not present, use normal. ctxsep = _uiref_ctxsep2 if ctxsep not in uitext: ctxsep = _uiref_ctxsep # Return verbatim if requested (starts with two context separators). if uitext.startswith(ctxsep * 2): return uitext[len(ctxsep) * 2:], errmsgs # Split into msgctxt and msgid. has_msgctxt = False msgctxt = None msgid = uitext if ctxsep in uitext: lst = uitext.split(ctxsep) if len(lst) > 2: rep = "..." + ctxsep + ctxsep.join(lst[2:]) errmsgs.append(_("@info \"tail\" is the trailing remainder of " "a UI reference string after parsing", "Superfluous tail '%(str)s' in " "UI reference '%(ref)s'.", str=rep, ref=uitext)) msgctxt, msgid = lst[:2] if not msgctxt: # FIXME: What about context with existing, but empty context? msgctxt = None has_msgctxt = True # msgctxt may be None while has_msgctxt is True. # This distinction is important when deciding between two msgids, # one having no context and one having a context. # Split any arguments from msgid. args = [] argsep = _uiref_argsep2 if _uiref_argsep2 not in msgid: argsep = _uiref_argsep if argsep in msgid: lst = msgid.split(argsep) msgid = lst[0] args_raw = lst[1:] for arg_raw in args_raw: alst = arg_raw.split(_uiref_argplsep) if len(alst) == 2: single = False if alst[0].startswith(_uiref_argsrepl): alst[0] = alst[0][1:] single = True for fdalt, fdnorm in fdiralt.items(): if alst[0].startswith(fdalt): plhold = alst[0].replace(fdalt, fdnorm, 1) if single: msgid = msgid.replace(alst[0], plhold, 1) else: msgid = msgid.replace(alst[0], plhold) alst[0] = plhold # Argument itself may contain UI references. local_errspans = hookcl_v3c(alst[1]) if local_errspans: errmsgs.extend([x[-1] for x in local_errspans]) else: alst[1] = hookcl_f3c(alst[1]) alst.append(single) args.append(alst) else: errmsgs.append(_("@info", "Invalid argument specification '%(arg)s' " "in UI reference '%(ref)s'.", arg=arg_raw, ref=uitext)) # Try to find unambiguous match to msgctxt/msgid. rmsg = None rcat = None for uicat in uicats: if has_msgctxt: msgs = uicat.select_by_key(msgctxt, msgid) if not msgs: # Also try as if the context were regular expression. msgs = uicat.select_by_key_match(msgctxt, msgid, exctxt=False, exid=True, case=False) else: msgs = uicat.select_by_msgid(msgid) if len(msgs) == 1: rmsg = msgs[0] rcat = uicat break # If unambiguous match found. if rmsg is not None: # If the message is translated, use its translation, # otherwise use original and report. if rmsg.translated: ruitext = rmsg.msgstr[0] else: ruitext = msgid errmsgs.append(_("@info", "UI reference '%(ref)s' not translated " "at %(file)s:%(line)d(#%(entry)d).", ref=uitext, file=rcat.filename, line=rmsg.refline, entry=rmsg.refentry)) # If no unambiguous match found, collect all the approximate ones, # report and use the original UI text. else: ruitext = msgid approx = [] for uicat in uicats: nmsgs = uicat.select_by_msgid_fuzzy(msgid) for nmsg in nmsgs: if nmsg.translated: approx1 = _("@item condensed display of text and " "its translation; they should stand out " "well, hence the {{...}} wrapping", "{{%(text)s}}={{%(translation)s}} " "at %(file)s:%(line)d(#%(entry)d)", text=_to_uiref(nmsg), translation=nmsg.msgstr[0], file=uicat.filename, line=nmsg.refline, entry=nmsg.refentry) else: approx1 = _("@item condensed display of text without " "translation; it should stand out " "well, hence the {{...}} wrapping", "{{%(text)s}}=(untranslated) " "at %(file)s:%(line)d(#%(entry)d)", text=_to_uiref(nmsg), file=uicat.filename, line=nmsg.refline, entry=nmsg.refentry) approx.append(approx1) if approx: errmsgs.append(_("@info", "UI reference '%(ref)s' cannot be resolved; " "close matches:\n" "%(matches)s", ref=uitext, matches=cjoin(approx, "\n"))) else: errmsgs.append(_("@info", "UI reference '%(ref)s' cannot be resolved.", ref=uitext)) # Strip scripted part if any. p = ruitext.find(_ts_fence) if p >= 0: ruitext = ruitext[:p] # Replace any provided arguments. for plhold, value, single in args: if plhold in ruitext: if single: ruitext = ruitext.replace(plhold, value, 1) else: ruitext = ruitext.replace(plhold, value) else: errmsgs.append(_("@info", "Placeholder '%(plhold)s' not found in resolved " "UI reference text '%(text)s' " "to reference '%(ref)s'.", plhold=plhold, text=ruitext, ref=uitext)) return ruitext, errmsgs
def multi_rule_error(msg, cat, rspec, showmsg=True, predelim=False): """ Print formated rule error messages on screen. Like L{rule_error}, but reports multiple failed rules at once. Contents of the matched message is shown only once for all rules, with all highlights embedded, and all rule information following. This holds unless there are several different filtered messages, when rule failures are reported in groups by filtered message. @param msg: the message matched by rules @type msg: Message @param cat: the catalog in which the message resides @type cat: Catalog @param rspec: specification of failed rules. This is a list in which each element can be one of: - rule - tuple of rule and highlight specification (see L{report_msg_content} for details on highlight specifications). Highlight can be None. - tuple of rule, highlight, and filtered message which the rule really matched. Highlight and filtered message can be None. @type rspec: [(Rule|(Rule, highlight)|(Rule, highlight, Message))*] @param showmsg: whether to show contents of message (both original and filtered if given) @type showmsg: bool @param predelim: whether to also print delimiter before the first error @type predelim: bool """ # Expand elements in rule specification to full lengths. rspec_mod = [] for el in rspec: if not isinstance(el, tuple): el = (el, ) el_mod = el + tuple(None for i in range(3 - len(el))) rspec_mod.append(el_mod) rspec = rspec_mod # Split into groups by distinct filtered messages, # or make one dummy group if content display not requested. if showmsg: rspec_groups = [] for rule, hl, fmsg in rspec: rlhls = None for ofmsg, rlhls in rspec_groups: if fmsg == ofmsg: # check for apparent equality break if rlhls is None: rlhls = [] rspec_groups.append((fmsg, rlhls)) rlhls.append((rule, hl)) else: rlhls = [] rspec_groups = [(None, rlhls)] for rule, hl, fmsg in rspec: rlhls.append((rule, hl)) # Report each rule group. for fmsg, rlhls in rspec_groups: rinfos = [] highlight = [] for rule, hl in rlhls: rinfos.append( _("@info", "rule %(rule)s <bold><red>==></red></bold> " "<bold>%(msg)s</bold>", rule=rule.displayName, msg=rule.hint)) highlight.extend(hl) if len(rinfos) > 1: note = cjoin([""] + rinfos, "\n") elif rinfos: note = rinfos[0] if showmsg: delim = "-" * 40 if predelim: report(delim) report_msg_content(msg, cat, highlight=highlight, fmsg=fmsg, showfmsg=(fmsg is not None), note=note, delim=delim) else: report_on_msg(note, msg, cat) report_on_msg_hl(highlight, msg, cat, fmsg)
def report_msg_content(msg, cat, wrapf=None, force=False, note=None, delim=None, highlight=None, showmsg=True, fmsg=None, showfmsg=False, subsrc=None, file=sys.stdout): """ Report the content of a PO message. Provides the message reference, consisting of the catalog name and the message position within it, the message contents, and any notes on particular segments. Parts of the message can be highlighted using colors. Parameter C{highlight} provides the highlighting specification, as list of tuples where each tuple consists of: name of the message element to highlight, element index (used when the element is a list of values), list of spans, and optionally the filtered text of the element value. For example, to highlight spans C{(5, 10)} and C{(15, 25)} in the C{msgid}, and C{(30, 40)} in C{msgstr}, the highlighting specification would be:: [("msgid", 0, [(5, 10), (15, 25)]), ("msgstr", 0, [(30, 40)])] Names of the elements that can presently be highlighted are: C{"msgctxt"}, C{"msgid"}, C{"msgid_plural"}, C{"msgstr"}, C{"manual_comment"}, C{"auto_comment"}, C{"source"}, C{"flag"}. For unique fields the element index is not used, but 0 should be given for consistency (may be enforced later). Span tuples can have a third element, following the indices, which is the note about why the particular span is highlighted; there may be more elements after the note, and these are all ignored. If start or end index of a span is not an integer, then the note is taken as relating to the complete field. Sometimes the match to which the spans correspond has been made on a filtered value of the message field (e.g. after accelerator markers or tags have been removed). In that case, the filtered text can be given as the fourth element of the tuple, after the list of spans, and the function will try to fit spans from filtered onto original text. More globally, if the complete highlight is relative to a modified, filtered version of the message, this message can be given as C{fmsg} parameter. The display of content can be controlled by C{showmsg} parameter; if it is C{False}, only the message reference and span notes are shown. Similarly for the C{showfmsg} parameter, which controls the display of the content of filtered message (if given by C{fmsg}). To show the filtered message may be useful for debugging filtering in cases when it is not straightforward, or it is user-defined. @param msg: the message to report the content for @type msg: L{Message_base} @param cat: the catalog where the message lives @type cat: L{Catalog} or C{None} @param wrapf: the function used for wrapping message fields in output. See L{to_lines()<message.Message_base.to_lines>} method of message classes for details. If not given, it will be taken from the catalog (see L{Catalog.wrapf<catalog.Catalog.wrapf>}). @type wrapf: (string)->[string...] @param force: whether to force reformatting of cached message content @type force: bool @param note: note about why the content is being reported @type note: string @param delim: text to print on the line following the message @type delim: C{None} or string @param highlight: highlighting specification of message elements @type highlight: (see description) @param showmsg: show content of the message @type showmsg: bool @param fmsg: filtered message @type fmsg: L{Message_base} @param showfmsg: show content of the filtered message, if any @type showfmsg: bool @param subsrc: more detailed source of the message @type subsrc: C{None} or string @param file: output stream @type file: file """ rsegs = [] wrapf = wrapf or cat.wrapf() notes_data = [] if highlight: msg = Message(msg) # must work on copy, highlight modifies it ffmsg = fmsg or msg # use original message as filtered if not given # Unify spans for same parts, to have single coloring pass per part # (otherwise markup can get corrupted). highlightd = {} for hspec in highlight: name, item, spans = hspec[:3] pkey = (name, item) phspec = highlightd.get(pkey) if phspec is None: # Make needed copies in order not to modify # the original highlight when adding stuff later. highlightd[pkey] = list(hspec) highlightd[pkey][2] = list(spans) else: phspec[2].extend(spans) # Take filtered text if available and not already taken. if len(hspec) > 3 and len(phspec) <= 3: phspec.append(hspec[3]) highlight = highlightd.values() for hspec in highlight: name, item, spans = hspec[:3] def hl(text, ftext): if len(hspec) > 3: # Override filtered text from filtered message # by filtered text from the highlight spec. ftext = hspec[3] aspans = adapt_spans(text, ftext, spans, merge=False) notes_data.append((text, name, item, aspans)) text = _highlight_spans(text, spans, "red", ftext=ftext) return text if name == "msgctxt": if msg.msgctxt or ffmsg.msgctxt: msg.msgctxt = hl(msg.msgctxt or u"", ffmsg.msgctxt or u"") elif name == "msgid": msg.msgid = hl(msg.msgid, ffmsg.msgid) elif name == "msgid_plural": msg.msgid_plural = hl(msg.msgid_plural or u"", ffmsg.msgid_plural or u"") elif name == "msgstr": msg.msgstr[item] = hl(msg.msgstr[item], ffmsg.msgstr[item]) elif name == "manual_comment": msg.manual_comment[item] = hl(msg.manual_comment[item], ffmsg.manual_comment[item]) elif name == "auto_comment": msg.auto_comment[item] = hl(msg.auto_comment[item], ffmsg.auto_comment[item]) elif name == "source": msg.source[item] = Monpair( (hl(msg.source[item][0], ffmsg.source[item][0]), msg.source[item][1])) elif name == "flag": pass # FIXME: How to do this? else: warning( _("@info", "Unknown field '%(field)s' " "in highlighting specification.", field=name)) # Report the message. msegs = [] if cat is not None: msegs += [_msg_pos_fmt(cat.filename, msg.refline, msg.refentry) + "\n"] if showmsg: msgstr = msg.to_string(wrapf=wrapf, force=force, colorize=1) msegs += [msgstr.rstrip() + "\n"] if msegs: rsegs.append(cjoin(msegs).rstrip()) # Report notes. if note is not None: # global notestr = _("@info", "<bold>[note]</bold> %(msg)s", msg=note) rsegs.append(notestr) if notes_data: # span notes note_ord = 1 for text, name, item, spans in notes_data: if msg.msgid_plural is not None and name == "msgstr": name = "%s_%d" % (name, item) for span in spans: if len(span) < 3: continue start, end, snote = span if isinstance(start, int) and isinstance(end, int): seglen = end - start if seglen > 0: segtext = text[start:end] if len(segtext) > 30: segtext = _("@item:intext shortened longer text", "%(snippet)s...", snippet=segtext[:27]) posinfo = "%s:%d:\"%s\"" % (name, start, escape(segtext)) else: posinfo = "%s:%d" % (name, start) else: posinfo = "%s" % name posinfo = ColorString("<green>%s</green>") % posinfo rsegs.append( _("@info", "[%(pos)s]: %(msg)s", pos=posinfo, msg=snote)) note_ord += 1 # Report the filtered message, if given and requested. if fmsg and showfmsg: fmtnote = (ColorString("<green>%s</green>") % _("@info", ">>> Filtered message was:")) rsegs.append(fmtnote) fmsgstr = fmsg.to_string(wrapf=wrapf, force=force, colorize=1) mstr = fmsgstr.rstrip() + "\n" rsegs.append(mstr.rstrip()) if delim: rsegs.append(delim) rtext = cjoin(rsegs, "\n").rstrip() report(rtext, subsrc=subsrc, file=file)
def report_msg_to_lokalize(msg, cat, report=None): """ Open catalog in Lokalize and jump to message. Lokalize is a CAT tool for KDE 4, U{http://userbase.kde.org/Lokalize}. This function opens the catalog in Lokalize (if not already open) and jumps to the given message within it. If the message is obsolete, it will be ignored. @param msg: the message which should be jumped to in Lokalize @type msg: L{Message_base} @param cat: the catalog in which the message resides @type cat: L{Catalog} @param report: simple text or highlight specification @type report: string or L{highlight<report_msg_content>} """ dbus = _get_module( "dbus", _("@info", "Communication with Lokalize not possible. " "Try installing the '%(pkg)s' package.", pkg="python-dbus")) if not dbus: return if msg.obsolete: return # If report is a highlight specification, # flatten it into lines of notes by spans. if isinstance(report, list): notes = [] for hspec in report: for span in hspec[2]: if len(span) > 2: notes.append(span[2]) report = cjoin(notes, "\n") try: try: globals()['lokalizeobj'] except: bus = dbus.SessionBus() lokalize_dbus_instances = lambda: filter( lambda name: name.startswith('org.kde.lokalize'), bus.list_names()) for lokalize_dbus_instance in lokalize_dbus_instances(): try: globals()['lokalizeinst'] = lokalize_dbus_instance globals()['lokalizeobj'] = bus.get_object( globals()['lokalizeinst'], '/ThisIsWhatYouWant') globals()['openFileInEditor'] = globals( )['lokalizeobj'].get_dbus_method( 'openFileInEditor', 'org.kde.Lokalize.MainWindow') globals()['visitedcats'] = {} except: pass if 'openFileInEditor' not in globals(): return index = globals()['openFileInEditor'](os.path.abspath(cat.filename)) editorobj = dbus.SessionBus().get_object( globals()['lokalizeinst'], '/ThisIsWhatYouWant/Editor/%d' % index) if cat.filename not in globals()['visitedcats']: globals()['visitedcats'][cat.filename] = 1 setEntriesFilteredOut = editorobj.get_dbus_method( 'setEntriesFilteredOut', 'org.kde.Lokalize.Editor') setEntriesFilteredOut(True) setEntryFilteredOut = editorobj.get_dbus_method( 'setEntryFilteredOut', 'org.kde.Lokalize.Editor') setEntryFilteredOut(msg.refentry - 1, False) gotoEntry = editorobj.get_dbus_method('gotoEntry', 'org.kde.Lokalize.Editor') gotoEntry(msg.refentry - 1) if report: addTemporaryEntryNote = editorobj.get_dbus_method( 'addTemporaryEntryNote', 'org.kde.Lokalize.Editor') addTemporaryEntryNote(msg.refentry - 1, report.resolve(ctype="none")) except: return
class Sieve (object): def __init__ (self, params): self.caller_sync = False self.caller_monitored = False self.propcons = None if params.propcons: self.propcons = self._read_propcons(params.propcons) self.p = params if not params.pmhead: raise SieveError(_("@info", "Prefix which starts property map entries " "in comments cannot be empty.")) if not params.sdhead: raise SieveError(_("@info", "Prefix which starts syntagma derivator entries " "in comments cannot be empty.")) # Collected entries. # Each element is a tuple of the form: # (ekeys, props, psep, kvsep, msg, cat) self.entries = [] # Syntagma derivator, for synder entries. self.synder = Synder() self.sdord = 0 def process (self, msg, cat): if not msg.translated or msg.obsolete: return if msg.msgid_plural is not None: return # Parse property map entries from the message. psep, kvsep = None, None ekeys = set() props = {} for i in range(len(msg.manual_comment)): ind = i + 1 manc = (msg.manual_comment[i]).strip() if manc.startswith(self.p.pmhead): # Parse and check consistency of separators. espec = manc[len(self.p.pmhead):].lstrip() lkvsep, lpsep = espec[:2] if lkvsep.isalnum() or lpsep.isalnum(): warning_on_msg(_("@info", "An alphanumeric separator is used for " "property map entry in comment " "no. %(ord)d.", ord=ind), msg, cat) return if not psep: psep, kvsep = lpsep, lkvsep elif (psep, kvsep) != (lpsep, lkvsep): warning_on_msg(_("@info", "Inconsistent separators for " "continued property map entry in comment " "no. %(ord)d.", ord=ind), msg, cat) return # Remove leading and trailing separators. respec = espec[2:] if respec.endswith(psep + psep): respec = respec[:-2] elif respec.endswith(psep): respec = respec[:-1] else: warning_on_msg(_("@info", "Missing terminating separator for " "property map entry in comment " "no. %(ord)d.", ord=ind), msg, cat) return # Parse entry keys and key-value pairs. for elspec in respec.split(psep): if kvsep in elspec: pkey, pval = elspec.split(kvsep, 1) props[pkey] = pval else: ekey = elspec if not self.p.extrakeys: warning_on_msg(_("@info", "Additional entry key '%(key)s' " "is defined but not allowed for " "property map entry in comment " "no. %(ord)d.", key=ekey, ord=ind), msg, cat) return ekeys.add(ekey) elif manc.startswith(self.p.sdhead): sddef = manc[len(self.p.sdhead):].lstrip() sdkey = str(self.sdord) sdexpr = sdkey + ":" + sddef if self.p.derivs: sdexpr = ">" + self.p.derivs + "\n" + sdexpr try: self.synder.import_string(sdexpr) cprops = self.synder.props(sdkey) except Exception, e: errmsg = str_to_unicode(str(e)) warning_on_msg(_("@info", "Invalid derivation '%(deriv)s':\n" "%(msg)s", deriv=sddef, msg=errmsg), msg, cat) return jumble = "".join(["".join(x) for x in cprops.items()]) if not psep: psep = self._pick_sep(jumble, u"/|¦") kvsep = self._pick_sep(jumble, u"=:→") if not psep or not kvsep: warning_on_msg(_("@info", "No known separator are applicable " "to keys and values derived from " "'%(deriv)s'.", deriv=sddef), msg, cat) return else: if psep in jumble or kvsep in jumble: warning_on_msg(_("@info", "Previously selected separators " "are not applicable to " "keys and values derived from " "'%(deriv)s'.", deriv=sddef), msg, cat) return props.update(cprops) if not props: if ekeys: warning_on_msg(_("@info", "Some additional entry keys " "are defined for property map entry, " "but there are no properties."), msg, cat) return props = sorted(props.items()) # no need for dictionary any more # Add default keys. ekeys.add(msg.msgid) ekeys.add(msg.msgstr[0]) # Validate entry if requested. if self.propcons: errs = self._validate_props(props, msg, cat, self.propcons) if errs: problems = cjoin([" " + x for x in errs], "\n") warning_on_msg(_("@info", "Property map entry fails validation:\n" "%(msgs)s", msgs=problems), msg, cat) return # Entry parsed. ekeys = sorted(ekeys) props = sorted(props) self.entries.append((ekeys, props, psep, kvsep, msg, cat))
def process(self, msg, cat): if not msg.translated: return id = 0 # Count msgstr plural forms failedSuggs = [] # pairs of wrong words and suggestions for msgstr in msg.msgstr: # Skip message with context in the ignoredContext list skip = False for context in self.ignoredContext: if context in (msg.msgctxt or u"").lower(): skip = True break for comment in msg.auto_comment: if context in comment.lower(): skip = True break if skip: break if skip: break # Skip message if explicitly requested. if flag_no_check_spell in manc_parse_flag_list(msg, "|"): continue # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) # Split text into words. if not self.simsp: words = proper_words(msgstr, True, cat.accelerator(), msg.format) else: # NOTE: Temporary, remove when proper_words becomes smarter. words = msgstr.split() # Eliminate from checking words matching the skip regex. if self.skipRx: words = [x for x in words if not self.skipRx.search(x)] # Eliminate from checking words explicitly listed as good. locally_ignored = manc_parse_list(msg, elist_well_spelled, ",") words = [x for x in words if x not in locally_ignored] for word in words: # Encode word for Aspell. encodedWord = word.encode(self.encoding) spell = self.aspell.check(encodedWord) if spell is False: try: self.nmatch += 1 if self.unknownWords is not None: self.unknownWords.add(word) else: encodedSuggestions = self.aspell.suggest( encodedWord) suggestions = [ i.decode(self.encoding) for i in encodedSuggestions ] failedSuggs.append((word, suggestions)) if self.xmlFile: xmlError = spell_xml_error( msg, cat, word, suggestions, id) self.xmlFile.writelines(xmlError) else: spell_error(msg, cat, word, suggestions) except UnicodeEncodeError: warning( _("@info", "Cannot encode word '%(word)s' in " "selected encoding '%(enc)s'.", word=word, enc=self.encoding)) id += 1 # Increase msgstr id count if failedSuggs and self.lokalize: repls = [_("@label", "Spelling errors:")] for word, suggs in failedSuggs: if suggs: fmtsuggs = format_item_list(suggs) repls.append( _("@item", "%(word)s (suggestions: %(wordlist)s)", word=word, wordlist=fmtsuggs)) else: repls.append("%s" % (word)) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n"))
def process (self, msg, cat): # Apply rules only on translated messages. if not msg.translated: return # Apply rules only to messages from selected branches. if self.branches: msg_branches = parse_summit_branches(msg) if not set.intersection(self.branches, msg_branches): return filename = basename(cat.filename) # New file handling if self.xmlFile and self.filename != filename: newFile = True self.cached = False # Reset flag self.cachePath = join(_CACHEDIR, abspath(cat.filename).replace("/", _MARSHALL)) if self.cacheFile: self.cacheFile.close() if self.filename != "": # close previous self.xmlFile.write("</po>\n") self.filename = filename else: newFile = False # Current file loaded from cache on previous message. Close and return if self.cached: # No need to analyze message, return immediately if self.cacheFile: self.cacheFile = None # Indicate cache has been used and flushed into xmlFile return # Does cache exist for this file ? if self.xmlFile and newFile and exists(self.cachePath): poDate = None for headerName, headerValue in cat.header.field: if headerName == "PO-Revision-Date": poDate = headerValue break if poDate: #Truncate daylight information poDate = poDate.rstrip("GMT") poDate = poDate[0:poDate.find("+")] #Convert in sec since epoch time format poDate = mktime(strptime(poDate, '%Y-%m-%d %H:%M')) if os.stat(self.cachePath)[8] > poDate: if self.ruleinfo: report(_("@info:progress", "Using cache.")) self.xmlFile.writelines(open(self.cachePath, "r", "utf-8").readlines()) self.cached = True # No cache available, create it for next time if self.xmlFile and newFile and not self.cached: if self.ruleinfo: report(_("@info", "No cache available, processing file.")) self.cacheFile = open(self.cachePath, "w", "utf-8") # Handle start/end of files for XML output (not needed for text output) if self.xmlFile and newFile: # open new po if self.cached: # We can return now, cache is used, no need to process catalog return else: poTag = '<po name="%s">\n' % filename self.xmlFile.write(poTag) # Write to result self.cacheFile.write(poTag) # Write to cache # Collect explicitly ignored rules by ID for this message. locally_ignored = manc_parse_list(msg, "skip-rule:", ",") # Collect explicitly applied rules by ID for this message. locally_applied = manc_parse_list(msg, "apply-rule:", ",") # Collect ignored/applied rules by switching comment. swprefix = "switch-rule:" swsep = ">" for cmnt in msg.manual_comment: if cmnt.strip().startswith(swprefix): p1 = cmnt.find(swprefix) + len(swprefix) p2 = cmnt.find(swsep, p1) if p2 < 0: raise SieveMessageError( _("@info", "Separator character '%(sep)s' missing in " "'%(prefix)s' comment.", sep=swsep, prefix=swprefix)) els1 = [x.strip() for x in cmnt[p1:p2].split(",")] els2 = [x.strip() for x in cmnt[p2 + len(swsep):].split(",")] locally_ignored.extend(x for x in els1 if x) locally_applied.extend(x for x in els2 if x) # NOTE: It would be nice to warn if an explicitly applied rule # is not defined, but this is not generally possible because # different rule files may be loaded for different runs. # Prepare filtered messages for checking. envSet = set(self.envs) msgByFilter = {} for mfilter in self.ruleFilters: if mfilter is not None: msgf = MessageUnsafe(msg) mfilter(msgf, cat, envSet) else: msgf = msg msgByFilter[mfilter] = msgf # Now the sieve itself. Check message with every rules failedRules = [] for rule in self.rules: if rule.disabled: continue if rule.environ and rule.environ not in envSet: continue if rule.ident in locally_ignored: continue if rule.manual and not rule.ident in locally_applied: continue msgf = msgByFilter[rule.mfilter] try: spans = rule.process(msgf, cat, envs=envSet, nofilter=True) except TimedOutException: warning(_("@info:progress", "Rule '%(rule)s' timed out, skipping it.", rule=rule.rawPattern)) continue if spans: self.nmatch += 1 if self.xmlFile: # FIXME: rule_xml_error is actually broken, # as it considers matching to always be on msgstr # Multiple span are now supported as well as msgstr index # Now, write to XML file if defined rspans = [x[:2] for x in spans[0][2]] pluid = spans[0][1] xmlError = rule_xml_error(msg, cat, rule, rspans, pluid) self.xmlFile.writelines(xmlError) if not self.cached: # Write result in cache self.cacheFile.writelines(xmlError) if not self.showfmsg: msgf = None failedRules.append((rule, spans, msgf)) if failedRules: if not self.byrule: multi_rule_error(msg, cat, failedRules, self.showmsg, predelim=self._first_error) self._first_error = False else: for rule, spans, msgf in failedRules: if rule.ident not in self.postFailedMessages: self.postFailedMessages[rule.ident] = [] self.postFailedMessages[rule.ident].append( (msg, cat, ((rule, spans, msgf)))) if self.mark: msg.flag.add(_flag_mark) if self.lokalize: repls = [_("@label", "Failed rules:")] for rule, hl, msgf in failedRules: repls.append(_("@item", "rule %(rule)s ==> %(msg)s", rule=rule.displayName, msg=rule.hint)) for part, item, spans, fval in hl: repls.extend([u"↳ %s" % x[2] for x in spans if len(x) > 2]) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n"))
def process(self, msg, cat): if not msg.translated: return failed_w_suggs = [] for msgstr in msg.msgstr: # Skip message if explicitly requested. if flag_no_check_spell in manc_parse_flag_list(msg, "|"): continue # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) # Split text into words. # TODO: See to use markup types somehow. words = proper_words(msgstr, True, cat.accelerator(), msg.format) # Eliminate from checking words matching the skip regex. if self.skip_rx: words = [x for x in words if not self.skip_rx.search(x)] # Eliminate from checking words explicitly listed as good. locally_ignored = manc_parse_list(msg, elist_well_spelled, ",") words = [x for x in words if x not in locally_ignored] for word in words: if not self.checker.check(word): failed = True self.unknown_words.add(word) if not self.words_only or self.lokalize: suggs = self.checker.suggest(word) incmp = False if len(suggs) > 5: # do not put out too many words suggs = suggs[:5] incmp = True failed_w_suggs.append((word, suggs)) if not self.words_only: if suggs: fsuggs = format_item_list(suggs, incmp=incmp) report_on_msg( _("@info", "Unknown word '%(word)s' " "(suggestions: %(wordlist)s).", word=word, wordlist=fsuggs), msg, cat) else: report_on_msg( _("@info", "Unknown word '%(word)s'.", word=word), msg, cat) if self.lokalize and failed_w_suggs: repls = [_("@label", "Spelling errors:")] for word, suggs in failed_w_suggs: if suggs: fmtsuggs = format_item_list(suggs, incmp=incmp) repls.append( _("@item", "%(word)s (suggestions: %(wordlist)s)", word=word, wordlist=fmtsuggs)) else: repls.append("%s" % (word)) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n"))
def tabulate(data, coln=None, rown=None, dfmt=None, space=" ", none="", rotated=False, colorize=False, indent="", colnra=False, rownra=False, colw=0): """ Tabulate data in plain text. All data fields can have missing trailing entries. They will be set to C{None} according to table extents. Examples: >>> print T.tabulate(data=((1, 4), (2, ), (3, 6)), ... coln=("c1", "c2", "c3"), rown=("r1", "r2"), ... space=" ", none="-") - c1 c2 c3 r1 1 2 3 r2 4 - 6 @param data: column entries (cells) by column @type data: [[string*]*] @param coln: column names @type coln: [string*] @param rown: row names @type rown: [string*] @param dfmt: format strings per column (e.g. C{"%+.2f"} for floats) @type dfmt: [string*] @param space: fill-in for spacing between cells @type space: string @param none: fill-in for displaying empty cells (i.e. C{None}-valued) @type none: string @param rotated: whether the table should be transposed @type rotated: bool @param colorize: whether the table should have color highlighting @type colorize: bool @param indent: indent string for the whole table @type indent: string @param colnra: right align column names @type colnra: bool @param rownra: right align row names @type rownra: bool @param colw: minimal column width @type colw: integer @returns: plain text representation of the table (no trailing newline) @rtype: string/L{ColorString<colors.ColorString>} """ # Make local copies, to be able to extend to table extents. _data = [] for col in data: _data.append(list(col)) _coln = None if coln: _coln = list(coln) _rown = None if rown: _rown = list(rown) _dfmt = None if dfmt: _dfmt = list(dfmt) # Calculate maximum row and column number. # ...look at data: nrows = 0 ncols = 0 for col in _data: if nrows < len(col): nrows = len(col) ncols += 1 # ...look at column and row names: if _coln is not None: if ncols < len(_coln): ncols = len(_coln) if _rown is not None: if nrows < len(_rown): nrows = len(_rown) # Index offsets due to column/row names. ro = 0 if _coln is not None: ro = 1 co = 0 if _rown is not None: co = 1 # Extend all missing table fields. # ...add columns: for c in range(len(_data), ncols): _data.append([]) # ...add rows: for col in _data: for r in range(len(col), nrows): col.append(None) # ...add column names: if _coln is not None: if _rown is not None: _coln.insert(0, none) # header corner for c in range(len(_coln), ncols + co): _coln.append(None) # ...add row names: if _rown is not None: if _coln is not None: _rown.insert(0, none) # header corner for r in range(len(_rown), nrows + ro): _rown.append(None) # ...add formats: if _dfmt is None: _dfmt = [] if _rown is not None: _dfmt.insert(0, u"%s") # header corner for c in range(len(_dfmt), ncols + co): _dfmt.append(u"%s") # Stringize data. # ...nice fat deep assembly of empty stringized table: sdata = [[u"" for i in range(nrows + ro)] for j in range(ncols + co)] # ...table body: for c in range(ncols): for r in range(nrows): if _data[c][r] is not None: sdata[c + co][r + ro] = _dfmt[c + co] % (_data[c][r], ) else: sdata[c + co][r + ro] = none # ...column names: if _coln is not None: for c in range(ncols + co): if _coln[c] is not None: sdata[c][0] = u"%s" % (_coln[c], ) # ...row names: if _rown is not None: for r in range(nrows + ro): if _rown[r] is not None: sdata[0][r] = u"%s" % (_rown[r], ) # Rotate needed data for output. if rotated: _coln, _rown = _rown, _coln ncols, nrows = nrows, ncols co, ro = ro, co sdata_r = [[u"" for i in range(nrows + ro)] for j in range(ncols + co)] for c in range(ncols + co): for r in range(nrows + ro): sdata_r[c][r] = sdata[r][c] sdata = sdata_r # Calculate maximum lengths per screen column. maxlen = [colw] * (ncols + co) for c in range(ncols + co): for r in range(nrows + ro): l = len(sdata[c][r]) if maxlen[c] < l: maxlen[c] = l # Reformat strings to maximum length per column. for c in range(co, ncols + co): lfmt = u"%" + str(maxlen[c]) + "s" for r in range(ro, nrows + ro): sdata[c][r] = lfmt % (sdata[c][r], ) # ...but column names aligned as requested: if _coln is not None: if colnra: lfmt = u"%" + str(maxlen[c]) + "s" else: lfmt = u"%-" + str(maxlen[c]) + "s" sdata[c][0] = lfmt % (sdata[c][0], ) if colorize: sdata[c][0] = ColorString("<purple>%s</purple>") % sdata[c][0] # ...but row names aligned as requested: if _rown is not None: if rownra: lfmt = u"%" + str(maxlen[0]) + "s" else: lfmt = u"%-" + str(maxlen[0]) + "s" for r in range(nrows + ro): sdata[0][r] = lfmt % (sdata[0][r], ) if colorize: sdata[0][r] = ColorString("<blue>%s</blue>") % sdata[0][r] # Assemble the table. lines = [] for r in range(nrows + ro): cells = [] for c in range(ncols + co): cells.append(sdata[c][r]) lines.append(indent + cjoin(cells, space)) return cjoin(lines, "\n")
def main (): locale.setlocale(locale.LC_ALL, "") # Get defaults for command line options from global config. cfgsec = pology_config.section("poediff") def_do_merge = cfgsec.boolean("merge", True) # Setup options and parse the command line. usage = _("@info command usage", "%(cmd)s [OPTIONS] FILE1 FILE2\n" "%(cmd)s [OPTIONS] DIR1 DIR2\n" "%(cmd)s -c VCS [OPTIONS] [PATHS...]", cmd="%prog") desc = _("@info command description", "Create embedded diffs of PO files.") ver = _("@info command version", u"%(cmd)s (Pology) %(version)s\n" u"Copyright © 2009, 2010 " u"Chusslove Illich (Часлав Илић) <%(email)s>", cmd="%prog", version=version(), email="*****@*****.**") showvcs = list(set(available_vcs()).difference(["none"])) showvcs.sort() opars = ColorOptionParser(usage=usage, description=desc, version=ver) opars.add_option( "-b", "--skip-obsolete", action="store_true", dest="skip_obsolete", default=False, help=_("@info command line option description", "Do not diff obsolete messages.")) opars.add_option( "-c", "--vcs", metavar=_("@info command line value placeholder", "VCS"), dest="version_control", help=_("@info command line option description", "Paths are under version control by given VCS; " "can be one of: %(vcslist)s.", vcslist=format_item_list(showvcs))) opars.add_option( "--list-options", action="store_true", dest="list_options", default=False, help=_("@info command line option description", "List the names of available options.")) opars.add_option( "--list-vcs", action="store_true", dest="list_vcs", default=False, help=_("@info command line option description", "List the keywords of known version control systems.")) opars.add_option( "-n", "--no-merge", action="store_false", dest="do_merge", default=def_do_merge, help=_("@info command line option description", "Do not try to indirectly pair messages by merging catalogs.")) opars.add_option( "-o", "--output", metavar=_("@info command line value placeholder", "POFILE"), dest="output", help=_("@info command line option description", "Output diff catalog to a file instead of stdout.")) opars.add_option( "-p", "--paired-only", action="store_true", dest="paired_only", default=False, help=_("@info command line option description", "When two directories are diffed, ignore catalogs which " "are not present in both directories.")) opars.add_option( "-q", "--quiet", action="store_true", dest="quiet", default=False, help=_("@info command line option description", "Do not display any progress info.")) opars.add_option( "-Q", "--quick", action="store_true", dest="quick", default=False, help=_("@info command line option description", "Equivalent to %(opt)s.", opt="-bns")) opars.add_option( "-r", "--revision", metavar=_("@info command line value placeholder", "REV1[:REV2]"), dest="revision", help=_("@info command line option description", "Revision from which to diff to current working copy, " "or from first to second revision (if VCS is given).")) opars.add_option( "-s", "--strip-headers", action="store_true", dest="strip_headers", default=False, help=_("@info command line option description", "Do not diff headers and do not write out the top header " "(resulting output cannot be used as patch).")) opars.add_option( "-U", "--update-effort", action="store_true", dest="update_effort", default=False, help=_("@info command line option description", "Instead of outputting the diff, calculate and output " "an estimate of the effort that was needed to update " "the translation from old to new paths. " "Ignores %(opt1)s and %(opt1)s options.", opt1="-b", opt2="-n")) add_cmdopt_colors(opars) (op, free_args) = opars.parse_args(str_to_unicode(sys.argv[1:])) if op.list_options: report(list_options(opars)) sys.exit(0) if op.list_vcs: report("\n".join(showvcs)) sys.exit(0) # Could use some speedup. try: import psyco psyco.full() except ImportError: pass set_coloring_globals(ctype=op.coloring_type, outdep=(not op.raw_colors)) if op.quick: op.do_merge = False op.skip_obsolete = True op.strip_headers = True # Create VCS. vcs = None if op.version_control: if op.version_control not in available_vcs(flat=True): error_wcl(_("@info", "Unknown VCS '%(vcs)s' selected.", vcs=op.version_control)) vcs = make_vcs(op.version_control) # Sanity checks on paths. paths = free_args if not vcs: if len(paths) != 2: error_wcl(_("@info", "Exactly two paths are needed for diffing.")) for path in paths: if not os.path.exists(path): error_wcl("path does not exist: %s" % path) p1, p2 = paths if (not ( (os.path.isfile(p1) and (os.path.isfile(p2))) or (os.path.isdir(p1) and (os.path.isdir(p2)))) ): error_wcl(_("@info", "Both paths must be either files or directories.")) else: # Default to current working dir if no paths given. paths = paths or ["."] for path in paths: if not os.path.exists(path): error_wcl(_("@info", "Path '%(path)s' does not exist.", path=path)) if not vcs.is_versioned(path): error_wcl(_("@info", "Path '%(path)s' is not under version control.", path=path)) # Collect and pair PO files in given paths. # Each pair specification is in the form of # ((path1, path2), (vpath1, vpath2)) # where path* are the real paths, and vpath* the visual paths to be # presented in diff output. if not vcs: fpairs = collect_file_pairs(paths[0], paths[1], op.paired_only) pspecs = [(x, x) for x in fpairs] else: lst = op.revision and op.revision.split(":", 1) or [] if len(lst) > 2: error_wcl(_("@info", "Too many revisions given: %(revlist)s.", revspec=format_item_list(lst))) elif len(lst) == 2: revs = lst # diff between revisions elif len(lst) == 1: revs = [lst[0], None] # diff from revision to working copy else: revs = ["", None] # diff from head to working copy # Replace original paths with modified/added catalogs. paths_nc = [] for path in paths: for path in vcs.to_commit(path): if path.endswith(".po") or path.endswith(".pot"): paths_nc.append(path) paths = paths_nc paths.sort() pspecs = collect_pspecs_from_vcs(vcs, paths, revs, op.paired_only) if not op.update_effort: ecat, ndiffed = diff_pairs(pspecs, op.do_merge, colorize=(not op.output), shdr=op.strip_headers, noobs=op.skip_obsolete, quiet=op.quiet) if ndiffed > 0: hmsgctxt = ecat.header.get_field_value(EDST.hmsgctxt_field) lines = [] msgs = list(ecat) if not op.strip_headers: msgs.insert(0, ecat.header.to_msg()) for msg in msgs: if op.strip_headers and msg.msgctxt == hmsgctxt: sepl = [] sepl += [msg.manual_comment[0]] sepl += msg.msgid.split("\n")[:2] lines.extend(["# %s\n" % x for x in sepl]) lines.append("\n") else: lines.extend(msg.to_lines(force=True, wrapf=ecat.wrapf())) diffstr = cjoin(lines)[:-1] # remove last newline if op.output: file = open(op.output, "w") file.write(diffstr.encode(ecat.encoding())) file.close() else: report(diffstr) else: updeff = pairs_update_effort(pspecs, quiet=op.quiet) ls = [] for kw, desc, val, fmtval in updeff: ls.append(_("@info", "%(quantity)s: %(value)s", quantity=desc, value=fmtval)) report("\n".join(ls)) # Clean up. cleanup_tmppaths()
def _bar_stats(self, counts, title, count, summed, dlabel, dcolumn): # Count categories to display and chars/colors associated to them. # Note: Use only characters from Latin1. tspecs = (("trn", u"×", "green"), ("fuz", u"¤", "blue"), ("unt", u"·", "red")) # Find out maximum counts overall. maxcounts = dict(trn=0, fuz=0, unt=0, tot=0) maxcounts_jumbled = maxcounts.copy() for otitle, ocount, osummed in counts: # If absolute bars, compare counts only for non-summed counts. if self.p.absolute and osummed: continue # Count both messages and words, for the number display padding. for tkey in maxcounts_jumbled: for dcol in (0, 1): c = ocount[tkey][dcol] if maxcounts_jumbled[tkey] < c: maxcounts_jumbled[tkey] = c for tkey in maxcounts: c = ocount[tkey][dcolumn] if maxcounts[tkey] < c: maxcounts[tkey] = c # Character widths of maximum count categories. maxcountscw = {} for tkey, tval in maxcounts.iteritems(): maxcountscw[tkey] = len(str(tval)) maxcountscw_jumbled = {} for tkey, tval in maxcounts_jumbled.iteritems(): maxcountscw_jumbled[tkey] = len(str(tval)) # Formatted counts by disjunct categories. fmt_counts = [] for tkey, tchar, tcol in tspecs: cstr = str(count[tkey][dcolumn]) if cstr == "0": cstr = "-" cfmt = ("%%%ds" % maxcountscw_jumbled[tkey]) % cstr if tcol is not None: fmt_counts.append( (ColorString("<%s>%%s</%s>") % (tcol, tcol)) % cfmt) else: fmt_counts.append(cfmt) fmt_counts = cjoin(fmt_counts, "/") # Maximum and nominal bar widths in characters. # TODO: Make parameters. if self.inline: nombarcw = 20 maxbarcw = 50 else: nombarcw = 40 maxbarcw = 80 def roundnear(x): return int(round(x, 0)) def roundup(x): ix = int(x) if x - ix > 1e-16: ix += 1 return ix # Compute number of cells per category. n_cells = {} if self.p.absolute: # Absolute bar. n_per_cell = 0 for npc in (1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000): if npc * maxbarcw > maxcounts["tot"]: n_per_cell = npc break if not n_per_cell: warning( _("@info", "Count too large, cannot display bar graph.")) return for tkey, roundf in (("fuz", roundup), ("unt", roundup), ("tot", roundnear)): c = count[tkey][dcolumn] n_cells[tkey] = roundf(float(c) / n_per_cell) # Correct the situation when there are no cells. if n_cells["tot"] < 1: n_cells["tot"] = 1 # Correct the situation when the sum of cells fuzzy+untranslated # goes over the total; give priority to untranslated when reducing. while n_cells["fuz"] + n_cells["unt"] > n_cells["tot"]: if n_cells["fuz"] >= n_cells["unt"]: n_cells["fuz"] -= 1 else: n_cells["unt"] -= 1 n_cells["trn"] = n_cells["tot"] - n_cells["fuz"] - n_cells["unt"] else: # Relative bar. if count["tot"][dcolumn] > 0: n_per_cell = float(nombarcw) / count["tot"][dcolumn] else: n_per_cell = 0 for tkey in ("fuz", "unt"): c = count[tkey][dcolumn] n_cells[tkey] = roundup(c * n_per_cell) # When there are almost none translated, it may have happened that # the sum of cells fuzzy+untranslated is over nominal; reduce. while n_cells["fuz"] + n_cells["unt"] > nombarcw: if n_cells["fuz"] >= n_cells["unt"]: n_cells["fuz"] -= 1 else: n_cells["unt"] -= 1 n_cells["trn"] = nombarcw - n_cells["fuz"] - n_cells["unt"] # Create the bar. fmt_bar = [] for tkey, tchar, tcol in tspecs: bar = tchar * n_cells[tkey] if tcol is not None: bar = (ColorString("<%s>%%s</%s>") % (tcol, tcol)) % bar fmt_bar.append(bar) fmt_bar = cjoin(fmt_bar) # Assemble final output. if not self.p.absolute or not summed: if count["tot"][dcolumn] == 0: fmt_bar = "" report(cinterp("%s %s |%s|", fmt_counts, dlabel, fmt_bar)) else: report(cinterp("%s %s", fmt_counts, dlabel))