def __init__ (self, params): self.caller_sync = False self.caller_monitored = False self.propcons = None if params.propcons: self.propcons = self._read_propcons(params.propcons) self.p = params if not params.pmhead: raise SieveError(_("@info", "Prefix which starts property map entries " "in comments cannot be empty.")) if not params.sdhead: raise SieveError(_("@info", "Prefix which starts syntagma derivator entries " "in comments cannot be empty.")) # Collected entries. # Each element is a tuple of the form: # (ekeys, props, psep, kvsep, msg, cat) self.entries = [] # Syntagma derivator, for synder entries. self.synder = Synder() self.sdord = 0
def __init__(self, params): self.total = None self.select = None try: for spec in params.alt.split(","): if spec.endswith("t"): self.total = int(spec[:-1]) else: self.select = int(spec) except: raise SieveError( _("@info", "Malformed specification for " "resolution of alternatives '%(spec)s'.", spec=params.alt)) if self.total is None: raise SieveError( _("@info", "Number of alternatives per directive not given.")) if self.select is None: raise SieveError( _("@info", "Index of selected alternative not given.")) if self.total < 1: raise SieveError( _("@info", "Number of alternatives specified as %(num)d, " "but must be greater than 1.", num=self.total)) if self.select < 1 or self.select > self.total: raise SieveError( _("@info", "Selected alternative no. %(ord)d is out of range.", ord=self.select)) self.nresolved = 0
def process_header(self, hdr, cat): mcount = hdr.modcount for tfilter, tfname in self.tfilters: try: res = tfilter(hdr, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=tfname)) # Process result based on hook type. if isinstance(res, list): # Validation hook. # TODO: Better span reporting on headers. for part in res: for span in part[2]: if len(span) >= 3: errmsg = span[2] report("%s:header: %s", (cat.filename, errmsg)) else: # Side-effect hook, nothing to do. # TODO: Perhaps report returned number? pass if mcount < hdr.modcount: self.nmod += 1
def __init__ (self, params): self.strict = params.strict self.showmsg = params.showmsg self.lokalize = params.lokalize self.selected_checks = None if params.check is not None: unknown_checks = [] for chname in params.check: if chname not in _known_checks: unknown_checks.append(chname) if unknown_checks: fmtchecks = format_item_list(unknown_checks) raise SieveError( _("@info", "Unknown checks selected: %(chklist)s.", chklist=fmtchecks)) self.selected_checks = set(params.check) # Indicators to the caller: self.caller_sync = False # no need to sync catalogs to the caller self.caller_monitored = False # no need for monitored messages self.nproblems = 0
def make_match_group (names, negatable=False, orlinked=False): names_negs = [(x, False) for x in names] if negatable: names_negs.extend([(x, True) for x in names]) matchers = [] for name, neg in names_negs: nname = name if neg: nname = "n" + name values = getattr(params, nname) if values is None: # parameter not given continue if not isinstance(values, list): values = [values] for value in values: try: if name == "fexpr": m = make_msg_matcher(value, params) else: m = make_matcher(name, value, [], params, neg) except ExprError, e: raise SieveError(str_to_unicode(str(e))) matchers.append(m)
def __init__(self, params): self.nconv = 0 self.csep = unescape(params.sep) if not self.csep: raise SieveError( _("@info", "Context separator cannot be empty string."))
def process_header(self, hdr, cat): # Check if the catalog itself states the language, and if yes, # create the language-dependent stuff if not already created # for this language. clang = self.lang or cat.language() if not clang: raise SieveCatalogError( _("@info", "Cannot determine language for catalog '%(file)s'.", file=cat.filename)) cenvs = self.envs or cat.environment() or [] ckey = (clang, tuple(cenvs)) if ckey not in self.aspells: # New language. self.aspellOptions["lang"] = clang.encode(self.loc_encoding) # Get Pology's internal personal dictonary for this langenv. if ckey not in self.personalDicts: # may be in but None self.personalDicts[ckey] = self._get_personal_dict( clang, cenvs) if self.personalDicts[ckey]: self.aspellOptions["personal-path"] = self.personalDicts[ ckey].encode(self.loc_encoding) else: self.aspellOptions.pop("personal-path", None) # remove previous if not self.suponly: # Create Aspell object. import pology.external.pyaspell as A try: self.aspells[ckey] = A.Aspell(self.aspellOptions.items()) except A.AspellConfigError, e: raise SieveError( _("@info", "Aspell configuration error:\n%(msg)s", msg=e)) except A.AspellError, e: raise SieveError( _("@info", "Cannot initialize Aspell:\n%(msg)s", msg=e))
def parse_span(spanspec): lst = spanspec is not None and spanspec.split(":") or ("", "") if len(lst) != 2: raise SieveError( _("@info", "Wrong number of elements in span " "specification '%(spec)s'.", spec=self.p.lspan)) nlst = [] for el in lst: if not el: nlst.append(None) else: try: nlst.append(int(el)) except: raise SieveError( _("@info", "Not an integer number in span " "specification '%(spec)s'.", spec=self.p.lspan)) return tuple(nlst)
def __init__(self, params): self.p = params # Collect user and project configuration. prjsect = "project-" + params.proj if not config.has_section(prjsect): raise SieveError( _("@info", "Project '%(id)s' is not defined in user configuration.", id=params.proj)) self.prjcfg = config.section(prjsect) prjcfg = config.section(prjsect) usrcfg = config.section("user") # Collect project data. self.name = prjcfg.string("name") or usrcfg.string("name") if not self.name: warning( _("@info", "Field '%(field)s' is not set in " "project or user configuration.", field="name")) self.email = prjcfg.string("email") or usrcfg.string("email") if not self.email: warning( _("@info", "Field '%(field)s' is not set in " "project or user configuration.", field="email")) self.langteam = prjcfg.string("language-team") if not self.langteam: warning( _("@info", "Field '%(field)s' is not set in " "project configuration.", field="language-team")) self.teamemail = prjcfg.string("team-email") # ok not to be present self.langcode = prjcfg.string("language") or usrcfg.string("language") if not self.langcode: warning( _("@info", "Field '%(field)s' is not set in " "project configuration.", field="language")) self.encoding = (prjcfg.string("encoding") or usrcfg.string("encoding") or u"UTF-8") self.plforms = (prjcfg.string("plural-forms") or usrcfg.string("plural-forms")) if not self.plforms: warning( _("@info", "Field '%(field)s' is not set in " "project configuration.", field="plural-forms")) self.poeditor = (prjcfg.string("po-editor") or usrcfg.string("po-editor")) # ok not to be present
def __init__(self, params): self.nrepl_single = 0 self.nrepl_double = 0 # Pair of single quotes. self.singles = () if params.single is not None and params.longsingle is not None: raise SieveError( _( "@info", "Both single- and multi-character replacement of " "single quotes issued.")) if params.single is not None: quotes = params.single if len(quotes) != 2: raise SieveError( _("@info", "Invalid specification of single quotes (%(quotes)s), " "expected two characters.", quotes=quotes)) self.singles = (quotes[0], quotes[1]) elif params.longsingle is not None: quotes = split_escaped(params.longsingle, ",") if len(quotes) != 2: raise SieveError( _("@info", "Invalid specification of single quotes (%(quotes)s), " "expected two strings.", quotes=quotes)) self.singles = (quotes[0], quotes[1]) # Pair of double quotes. self.doubles = () if params.double is not None and params.longdouble is not None: raise SieveError( _( "@info", "Both single- and multi-character replacement of " "double quotes issued.")) if params.double is not None: quotes = params.double if len(quotes) != 2: raise SieveError( _("@info", "Invalid specification of double quotes (%(quotes)s), " "expected two characters.", quotes=quotes)) self.doubles = (quotes[0], quotes[1]) elif params.longdouble is not None: quotes = split_escaped(params.longdouble, ",") if len(quotes) != 2: raise SieveError( _("@info", "Invalid specification of double quotes '%(quotes)s', " "expected two strings.", quotes=quotes)) self.doubles = (quotes[0], quotes[1])
def __init__(self, params): exclusive_picks = [params.first] if sum(exclusive_picks) > 2: raise SieveError( _( "@info", "Only one resolution criterion for " "aggregate messages can be given.")) if params.first: self.selvar = _selvar_first else: self.selvar = _selvar_frequent self.unfuzzy = params.unfuzzy self.keepsrc = params.keepsrc self.nresolved = 0 self.nresolvedhdr = 0
def process_header(self, hdr, cat): # Check if the catalog itself states the language, and if yes, # create the language-dependent stuff if not already created # for this langenv. clang = self.lang or cat.language() if not clang: raise SieveCatalogError( _("@info", "Cannot determine language for catalog '%(file)s'.", file=cat.filename)) cenvs = self.envs or cat.environment() or [] ckey = (clang, tuple(cenvs)) if ckey not in self.checkers: # Get Pology's internal word list for this langenv. if clang not in self.word_lists: # may be in but None self.word_lists[ckey] = _compose_word_list(clang, cenvs) # Create spell-checker object. clang_mod = (self.suponly and [None] or [clang])[0] checker = _create_checker(self.providers, clang_mod, self.word_lists[ckey]) if not checker: raise SieveError( _("@info", "No spelling dictionary for language '%(lang)s' and " "provider '%(prov)s'.", lang=clang, prov=self.providers)) self.checkers[ckey] = checker # Get language-dependent stuff. self.checker = self.checkers[ckey] # Force explicitly given accelerators and markup. if self.accel is not None: cat.set_accelerator(self.accel) if self.markup is not None: cat.set_markup(self.markup)
def process(self, msg, cat): mcount = msg.modcount for i in range(len(msg.msgstr)): for tfilter, tfname in self.tfilters: try: # try as type *1A hook res = tfilter(msg.msgstr[i]) except TypeError: try: # try as type *3* hook res = tfilter(msg.msgstr[i], msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=tfname)) # Process result based on hook type. if isinstance(res, basestring): # Modification hook. msg.msgstr[i] = res elif isinstance(res, list): # Validation hook. if res: report_msg_content(msg, cat, highlight=[("msgstr", i, res)], delim=("-" * 20)) else: # Side-effect hook, nothing to do. # TODO: Perhaps report returned number? pass if mcount < msg.modcount: self.nmod += 1 if self.p.showmsg: report_msg_content(msg, cat, delim=("-" * 20))
def _loadRules (self, lang, envs): # Load rules. rules = loadRules(lang, envs, self.envOnly, self.customRuleFiles, self.stat, self.ruleinfo) # Perhaps retain only those rules explicitly requested # in the command line, by their identifiers. selectedRules = set() srules = set() if self.ruleChoice: requestedRules = set([x.strip() for x in self.ruleChoice]) foundRules = set() for rule in rules: if rule.ident in requestedRules: srules.add(rule) foundRules.add(rule.ident) rule.disabled = False if foundRules != requestedRules: missingRules = list(requestedRules - foundRules) fmtMissingRules = format_item_list(sorted(missingRules)) raise SieveError(_("@info", "Some explicitly selected rules " "are missing: %(rulelist)s.", rulelist=fmtMissingRules)) selectedRules.update(foundRules) if self.ruleChoiceRx: identRxs = [re.compile(x, re.U) for x in self.ruleChoiceRx] for rule in rules: if (rule.ident and reduce(lambda s, x: s or x.search(rule.ident), identRxs, False) ): srules.add(rule) selectedRules.add(rule.ident) if self.ruleChoice or self.ruleChoiceRx: rules = list(srules) selectedRulesInv = set() srules = set(rules) if self.ruleChoiceInv: requestedRules = set([x.strip() for x in self.ruleChoiceInv]) foundRules = set() for rule in rules: if rule.ident in requestedRules: if rule in srules: srules.remove(rule) foundRules.add(rule.ident) if foundRules != requestedRules: missingRules = list(requestedRules - foundRules) fmtMissingRules = format_item_list(sorted(missingRules)) raise SieveError(_("@info", "Some explicitly excluded rules " "are missing: %(rulelist)s.", rulelist=fmtMissingRules)) selectedRulesInv.update(foundRules) if self.ruleChoiceInvRx: identRxs = [re.compile(x, re.U) for x in self.ruleChoiceInvRx] for rule in rules: if (rule.ident and reduce(lambda s, x: s or x.search(rule.ident), identRxs, False) ): if rule in srules: srules.remove(rule) selectedRulesInv.add(rule.ident) if self.ruleChoiceInv or self.ruleChoiceInvRx: rules = list(srules) if self.ruleinfo: ntot = len(rules) ndis = len([x for x in rules if x.disabled]) nact = ntot - ndis totfmt = n_("@item:intext inserted below as %(tot)s", "Loaded %(num)d rule", "Loaded %(num)d rules", num=ntot) if self.envOnly: envfmt = _("@item:intext inserted below as %(env)s", "[only: %(envlist)s]", envlist=format_item_list(envs)) else: envfmt = _("@item:intext inserted below as %(env)s", "[%(envlist)s]", envlist=format_item_list(envs)) actfmt = n_("@item:intext inserted below as %(act)s", "%(num)d active", "%(num)d active", num=nact) disfmt = n_("@item:intext inserted below as %(dis)s", "%(num)d disabled", "%(num)d disabled", num=ndis) subs = dict(tot=totfmt, env=envfmt, act=actfmt, dis=disfmt) if ndis and envs: report(_("@info:progress insertions from above", "%(tot)s %(env)s (%(act)s, %(dis)s).", **subs)) elif ndis: report(_("@info:progress insertions from above", "%(tot)s (%(act)s, %(dis)s).", **subs)) elif envs: report(_("@info:progress insertions from above", "%(tot)s %(env)s.", **subs)) else: report(_("@info:progress insertions from above", "%(tot)s.", **subs)) if selectedRules: selectedRules = selectedRules.difference(selectedRulesInv) n = len(selectedRules) if n <= 10: rlst = list(selectedRules) report(_("@info:progress", "Selected rules: %(rulelist)s.", rulelist=format_item_list(sorted(rlst)))) else: report(n_("@info:progress", "Selected %(num)d rule.", "Selected %(num)d rules.", num=n)) elif selectedRulesInv: n = len(selectedRulesInv) if n <= 10: rlst = list(selectedRulesInv) report(_("@info:progress", "Excluded rules: %(rulelist)s.", rulelist=format_item_list(sorted(rlst)))) else: report(n_("@info:progress", "Excluded %(num)d rule.", "Excluded %(num)d rules.", num=n)) # Collect all distinct filters from rules. ruleFilters = set() for rule in rules: if not rule.disabled: ruleFilters.add(rule.mfilter) if self.ruleinfo: nflt = len([x for x in ruleFilters if x is not None]) if nflt: report(n_("@info:progress", "Active rules define %(num)d distinct filter set.", "Active rules define %(num)d distinct filter sets.", num=nflt)) return rules, ruleFilters
def process (self, msg, cat): if msg.obsolete: return try: for msgstr in msg.msgstr: # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) self.connection.request("GET", _REQUEST % (self.lang, urlencode({"text":msgstr.encode("UTF-8")}))) response=self.connection.getresponse() if response: responseData=response.read() if "error" in responseData: dom=parseString(responseData) for error in dom.getElementsByTagName("error"): if error.getAttribute("ruleId") in self.disabledRules: continue self.nmatch+=1 report("-"*(len(msgstr)+8)) report(_("@info", "<bold>%(file)s:%(line)d(#%(entry)d)</bold>", file=cat.filename, line=msg.refline, entry=msg.refentry)) #TODO: create a report function in the right place #TODO: color in red part of context that make the mistake report(_("@info", "<bold>Context:</bold> %(snippet)s", snippet=error.getAttribute("context"))) report(_("@info", "(%(rule)s) <bold><red>==></red></bold> %(note)s", rule=error.getAttribute("ruleId"), note=error.getAttribute("msg"))) report("") if self.lokalize: repls = [_("@label", "Grammar errors:")] repls.append(_( "@info", "<bold>%(file)s:%(line)d(#%(entry)d)</bold>", file=cat.filename, line=msg.refline, entry=msg.refentry )) repls.append(_( "@info", "(%(rule)s) <bold><red>==></red></bold> %(note)s", rule=error.getAttribute("ruleId"), note=error.getAttribute("msg") )) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n")) except socket.error: raise SieveError(_("@info", "Cannot connect to LanguageTool server. " "Did you start it?"))
def process(self, msg, cat): if not msg.translated: return failed_w_suggs = [] for msgstr in msg.msgstr: # Skip message if explicitly requested. if flag_no_check_spell in manc_parse_flag_list(msg, "|"): continue # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) # Split text into words. # TODO: See to use markup types somehow. words = proper_words(msgstr, True, cat.accelerator(), msg.format) # Eliminate from checking words matching the skip regex. if self.skip_rx: words = [x for x in words if not self.skip_rx.search(x)] # Eliminate from checking words explicitly listed as good. locally_ignored = manc_parse_list(msg, elist_well_spelled, ",") words = [x for x in words if x not in locally_ignored] for word in words: if not self.checker.check(word): failed = True self.unknown_words.add(word) if not self.words_only or self.lokalize: suggs = self.checker.suggest(word) incmp = False if len(suggs) > 5: # do not put out too many words suggs = suggs[:5] incmp = True failed_w_suggs.append((word, suggs)) if not self.words_only: if suggs: fsuggs = format_item_list(suggs, incmp=incmp) report_on_msg( _("@info", "Unknown word '%(word)s' " "(suggestions: %(wordlist)s).", word=word, wordlist=fsuggs), msg, cat) else: report_on_msg( _("@info", "Unknown word '%(word)s'.", word=word), msg, cat) if self.lokalize and failed_w_suggs: repls = [_("@label", "Spelling errors:")] for word, suggs in failed_w_suggs: if suggs: fmtsuggs = format_item_list(suggs, incmp=incmp) repls.append( _("@item", "%(word)s (suggestions: %(wordlist)s)", word=word, wordlist=fmtsuggs)) else: repls.append("%s" % (word)) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n"))
def _read_propcons (self, fpath): if not os.path.isfile(fpath): raise SieveError(_("@info", "Property constraint file '%(file)s' " "does not exist.", file=fpath)) lines = open(fpath).read().decode("UTF-8").split("\n") if not lines[-1]: lines.pop() cmrx = re.compile(r"#.*") # Constraints collected as list of tuples: # (compiled key regex, string key regex, # compiled value regex, string value regex, # string of flags) propcons = [] lno = 0 def mkerr (problem): return _("@info", "Invalid property map constraint " "at %(file)s:%(line)d: %(snippet)s.", file=fpath, line=lno, snippet=problem) known_flags = set(("i", "I", "t", "r")) for line in lines: lno += 1 line = cmrx.sub("", line).strip() if not line: continue sep = line[0] if sep.isalnum(): raise SieveError(mkerr(_("@item:intext", "alphanumeric separators " "not allowed"))) lst = line.split(sep) if len(lst) < 4: raise SieveError(mkerr(_("@item:intext", "too few separators"))) elif len(lst) > 4: raise SieveError(mkerr(_("@item:intext", "too many separators"))) d1, keyrxstr, valrxstr, flags = lst unknown_flags = set(flags).difference(known_flags) if unknown_flags: fmtflags = format_item_list(sorted(unknown_flags), quoted=True) raise SieveError(mkerr(_("@item:intext", "unknown flags %(flaglist)s", flaglist=fmtflags))) rxs = [] for rxstr, iflag in ((keyrxstr, "I"), (valrxstr, "i")): rxfls = re.U if iflag in flags: rxfls |= re.I wrxstr = r"^(?:%s)$" % rxstr try: rx = re.compile(wrxstr, rxfls) except: raise SieveError(mkerr(_("@item:intext", "invalid regular expression " "'%(regex)s'", regex=rxstr))) rxs.append(rx) keyrx, valrx = rxs propcons.append((keyrx, keyrxstr, valrx, valrxstr, flags)) return propcons
def process(self, msg, cat): if not msg.translated: return id = 0 # Count msgstr plural forms failedSuggs = [] # pairs of wrong words and suggestions for msgstr in msg.msgstr: # Skip message with context in the ignoredContext list skip = False for context in self.ignoredContext: if context in (msg.msgctxt or u"").lower(): skip = True break for comment in msg.auto_comment: if context in comment.lower(): skip = True break if skip: break if skip: break # Skip message if explicitly requested. if flag_no_check_spell in manc_parse_flag_list(msg, "|"): continue # Apply precheck filters. for pfilter, pfname in self.pfilters: try: # try as type F1A hook msgstr = pfilter(msgstr) except TypeError: try: # try as type F3* hook msgstr = pfilter(msgstr, msg, cat) except TypeError: raise SieveError( _("@info", "Cannot execute filter '%(filt)s'.", filt=pfname)) # Split text into words. if not self.simsp: words = proper_words(msgstr, True, cat.accelerator(), msg.format) else: # NOTE: Temporary, remove when proper_words becomes smarter. words = msgstr.split() # Eliminate from checking words matching the skip regex. if self.skipRx: words = [x for x in words if not self.skipRx.search(x)] # Eliminate from checking words explicitly listed as good. locally_ignored = manc_parse_list(msg, elist_well_spelled, ",") words = [x for x in words if x not in locally_ignored] for word in words: # Encode word for Aspell. encodedWord = word.encode(self.encoding) spell = self.aspell.check(encodedWord) if spell is False: try: self.nmatch += 1 if self.unknownWords is not None: self.unknownWords.add(word) else: encodedSuggestions = self.aspell.suggest( encodedWord) suggestions = [ i.decode(self.encoding) for i in encodedSuggestions ] failedSuggs.append((word, suggestions)) if self.xmlFile: xmlError = spell_xml_error( msg, cat, word, suggestions, id) self.xmlFile.writelines(xmlError) else: spell_error(msg, cat, word, suggestions) except UnicodeEncodeError: warning( _("@info", "Cannot encode word '%(word)s' in " "selected encoding '%(enc)s'.", word=word, enc=self.encoding)) id += 1 # Increase msgstr id count if failedSuggs and self.lokalize: repls = [_("@label", "Spelling errors:")] for word, suggs in failedSuggs: if suggs: fmtsuggs = format_item_list(suggs) repls.append( _("@item", "%(word)s (suggestions: %(wordlist)s)", word=word, wordlist=fmtsuggs)) else: repls.append("%s" % (word)) report_msg_to_lokalize(msg, cat, cjoin(repls, "\n"))
self.aspells[ckey] = A.Aspell(self.aspellOptions.items()) except A.AspellConfigError, e: raise SieveError( _("@info", "Aspell configuration error:\n%(msg)s", msg=e)) except A.AspellError, e: raise SieveError( _("@info", "Cannot initialize Aspell:\n%(msg)s", msg=e)) else: # Create simple internal checker that only checks against # internal supplemental dictionaries. personalDict = self.personalDicts[ckey] if not personalDict: raise SieveError( _("@info", "No supplemental dictionaries found.")) self.aspells[ckey] = _QuasiSpell(personalDict, self.encoding) # Load list of contexts by which to ignore messages. self.ignoredContexts[ckey] = [] ignoredContextFile = join(datadir(), "lang", clang, "spell", "ignoredContext") if isfile(ignoredContextFile): for line in open(ignoredContextFile, "r", "utf-8"): line = line.strip() if line.startswith("#") or line == "": continue else: self.ignoredContexts[ckey].append(line.lower()) # Get language-dependent stuff.
def __init__(self, params): # Parse field setting specifications. self.fields_values = [] for field_value_str in (params.field or []): field_value = field_value_str.split(":", 1) if len(field_value) != 2: raise SieveError( _("@info", "Invalid specification '%(spec)s' " "of header field and value.", spec=field_value_str)) self.fields_values.append(field_value) # Set fields in reverse, so that 'after' and 'before' parameters # are followed by the order of appearance of fields in command line. if params.after or params.before: self.fields_values.reverse() # Prepare matching for field removal. if params.removerx is not None: rxs = [] for rxstr in params.removerx: try: rx = re.compile(rxstr, re.U | re.I) except: raise SieveError( _("@info", "Invalid regular expression '%(regex)s' " "for removing fields.", regex=rxstr)) rxs.append(rx) params.removerx = rxs # Check validity of comment values. for title in (params.title or []): if re.search(r"copyright|©|\(C\)|license|<.*?@.*?>", title, re.I | re.U): raise SieveError( _("@info", "Invalid value '%(val)s' for title comment " "(it contains some elements appropriate " "for other types of comments).", val=title)) if params.copyright is not None: if not re.search(r"copyright|©|\(C\)", params.copyright, re.I | re.U): raise SieveError( _("@info", "Invalid value '%(val)s' for copyright comment " "(missing the word 'copyright'?).", val=params.copyright)) if params.license is not None: if not re.search(r"license", params.license, re.I): raise SieveError( _("@info", "Invalid value '%(val)s' for license comment " "(missing the word 'license'?).", val=params.license)) for author in (params.author or []): if not re.search(r"<.*?@.*?>", author): raise SieveError( _("@info", "Invalid value '%(val)s' for author comment " "(missing the email address?).", val=author)) self.p = params
def __init__ (self, params): self.nmatch = 0 # Number of match for finalize self.rules = [] # List of rules objects loaded in memory self.xmlFile = None # File handle to write XML output self.cacheFile = None # File handle to write XML cache self.cachePath = None # Path to cache file self.filename = "" # File name we are processing self.cached = False # Flag to indicate if process result is already is cache self.globalLang = params.lang self.globalEnvs = params.env self.envOnly = params.envonly self._rulesCache = {} self.accels = params.accel self.markup = params.markup self.ruleChoice = params.rule self.ruleChoiceRx = params.rulerx self.ruleChoiceInv = params.norule self.ruleChoiceInvRx = params.norulerx self.stat = params.stat self.showfmsg = params.showfmsg self.showmsg = params.showmsg self.lokalize = params.lokalize self.mark = params.mark self.byrule = params.byrule self.ruleinfo = params.ruleinfo self.branches = params.branch and set(params.branch) or None # Collect non-internal rule files. self.customRuleFiles = None if params.rfile or params.rdir: self.customRuleFiles = [] if params.rfile: self.customRuleFiles.extend(params.rfile) if params.rdir: for rdir in params.rdir: rfiles = collect_files_by_ext(rdir, "rules") self.customRuleFiles.extend(rfiles) # Also output in XML file ? if params.xml: xmlPath = params.xml if os.access(dirname(abspath(xmlPath)), os.W_OK): #TODO: create nice api to manage xml file and move it to rules.py self.xmlFile = open(xmlPath, "w", "utf-8") self.xmlFile.write('<?xml version="1.0" encoding="UTF-8"?>\n') self.xmlFile.write('<pos date="%s">\n' % strftime('%c').decode(getpreferredencoding())) else: warning(_("@info", "Cannot open file '%(file)s'. XML output disabled.", file=xmlPath)) if not exists(_CACHEDIR) and self.xmlFile: #Create cache dir (only if we want wml output) try: os.mkdir(_CACHEDIR) except IOError, e: raise SieveError(_("@info", "Cannot create cache directory '%(dir)s':\n" "%(msg)s", dir=_CACHEDIR, msg=e))
class Sieve (object): def __init__ (self, params): self.nmatch = 0 self.p = params # Build matching function. # It takes as arguments: filtered message, message, catalog, # and highlight specification (which is filled on matches). def make_match_group (names, negatable=False, orlinked=False): names_negs = [(x, False) for x in names] if negatable: names_negs.extend([(x, True) for x in names]) matchers = [] for name, neg in names_negs: nname = name if neg: nname = "n" + name values = getattr(params, nname) if values is None: # parameter not given continue if not isinstance(values, list): values = [values] for value in values: try: if name == "fexpr": m = make_msg_matcher(value, params) else: m = make_matcher(name, value, [], params, neg) except ExprError, e: raise SieveError(str_to_unicode(str(e))) matchers.append(m) if orlinked: expr = lambda *a: reduce(lambda s, m: s or m(*a), matchers, False) else: expr = lambda *a: reduce(lambda s, m: s and m(*a), matchers, True) return expr # - first matchers which are always AND expr_and = make_match_group([ "transl", "obsol", "active", "plural", "maxchar", "lspan", "espan", "flag", "branch", ], negatable=True, orlinked=False) # - then matchers which can be AND or OR expr_andor = make_match_group([ "msgctxt", "msgid", "msgstr", "comment", "fexpr", ], negatable=True, orlinked=self.p.or_match) # - all together self.matcher = lambda *a: expr_and(*a) and expr_andor(*a) # Prepare replacement. self.replrxs = [] if self.p.replace is not None: if not self.p.msgstr: raise SieveError( _("@info", "Cannot perform replacement if match " "on '%(field)s' is not given.", field="msgstr")) rxflags = re.U if not self.p.case: rxflags |= re.I for rxstr in self.p.msgstr: self.replrxs.append(re.compile(rxstr, rxflags)) # Resolve filtering hooks. self.pfilters = [] for hreq in self.p.filter or []: self.pfilters.append(get_hook_ireq(hreq, abort=True)) # Unless replacement or marking requested, no need to monitor/sync. if self.p.replace is None and not self.p.mark: self.caller_sync = False self.caller_monitored = False