def __init__(self, sparse_reader, check_options, dont_print = False): super().__init__(sparse_reader, check_options) self.grammar = grammalecte.GrammarChecker("fr") opts = check_options.get("grammalecte", {}) self.setup_dictionnary(opts.get("dictionary")) self.whitelist = frozenset(opts.get("whitelist")) spellchecker = self.grammar.getSpellChecker().activateStorage() replacements = opts.get("replacements", {}) before = self.prepare_replacements(replacements.get("before")) after = self.prepare_replacements(replacements.get("after")) # These allow to suppress known issues, especially with formatting. # It seems that there is no API to dynamically add words to a custom # dictionnary. So these replacements will do, as long as we replace # unknown words with similar words that exist in the dictionnary. self.grammalecte_replacements_before = before self.grammalecte_replacements_after = after self.dont_print = dont_print self.all_the_errors = {} self.all_spells = {} self.spell_count = 0 self.grammar_count = 0
def find_errors(input_file, opts={}): """Read the file and run grammalecte on it""" with open(input_file, "r") as f: lines = f.readlines() border = opts.get("border") if not border or border == "": # No borders, simply join text lines document_offset = 0 raw_text = "".join(lines) debug("No border to detect") else: debug(str(border)) # May be None document_offset, raw_text = _compute_offset(lines, border) debug("Border found at {}".format(document_offset)) # Cleanup text by redacting all matching patterns. for pattern in opts.get("filters", []): raw_text = _redact_text(re.compile(pattern), raw_text) debug(raw_text) text_input = raw_text.splitlines() text, lineset = txt.createParagraphWithLines(list(enumerate(text_input))) do_gramm = not opts.get("no_gramm", False) do_spell = not opts.get("no_spell", False) gramm_err = spell_err = [] # Load grammalecte. gc = grammalecte.GrammarChecker("fr") # Compute grammar and spell check errors if do_gramm: gc.gce.setOption("apos", not opts.get("no_apos", False)) gc.gce.setOption("nbsp", not opts.get("no_nbsp", False)) gc.gce.setOption("esp", not opts.get("no_esp", False)) gc.gce.setOption("tab", not opts.get("no_esp", False)) gramm_err = gc.gce.parse(text, "FR", bDebug=False) if do_spell: spell_err = gc.oSpellChecker.parseParagraph(text, True) # Get colums and lines. gramm_err, spell_err = txt.convertToXY(gramm_err, spell_err, lineset) if do_gramm: final_errors = _prepare_gramm_errors(gramm_err, document_offset, text_input) else: final_errors = [] if do_spell: final_errors += _prepare_spell_errors(spell_err, document_offset) return sorted(final_errors, key=itemgetter(2, 4))
def ocrquality(xml_file): list_error_gram = [] list_error_spell = [] list_unknown_word = [] list_minus_word = [] list_error_punct = 0 xslt = etree.parse("encpos2txt.xsl") transform = etree.XSLT(xslt) text_file = transform(xml_file) text_file = str(text_file) oGrammarChecker = grammalecte.GrammarChecker("fr") #renvoie le fichier json des erreurs du fichier result_error = oGrammarChecker.generateParagraphAsJSON(0, text_file, bEmptyIfNoErrors=True) data_store = json.loads(result_error) with open("GrammarCheck.json", "w") as f_write: json.dump(data_store, f_write) for error in data_store.get("lGrammarErrors"): #Ajoute la valeur de l'erreur de grammaire dans une liste list_error_punct += 1 list_error_gram.append(error.get("sType")) for error in data_store.get("lSpellingErrors"): # Ajoute la valeur de l'erreur d'orthographe'dans une liste list_error_spell.append(error.get("sType")) if not error.get("sValue")[0].isupper(): list_minus_word.append(error.get("sValue")) else: list_unknown_word.append(error.get("sValue")) #transforme la liste en dictionnaire pour connaitre le nombre des différents types d'erreur dict_error_spell = dict(Counter(list_error_spell)) dict_error_gram = dict(Counter(list_error_gram)) dict_unknow_world = dict(Counter(list_unknown_word)) dict_minus_world = dict(Counter(list_minus_word)) del dict_minus_world["xml"] number_minus_world = sum(value for value in dict_minus_world.values()) #renvoie une liste avec les différents dictionnaires sous forme de string return [str(dict_error_spell), str(dict_unknow_world), str(dict_minus_world), str(number_minus_world), str(dict_error_gram)]
import argparse import json import traceback import time import os import concurrent.futures from grammalecte.bottle import Bottle, run, request, response #, template, static_file import grammalecte import grammalecte.text as txt from grammalecte.graphspell.echo import echo #### GRAMMAR CHECKER #### oGrammarChecker = grammalecte.GrammarChecker("fr", "Server") oSpellChecker = oGrammarChecker.getSpellChecker() oTextFormatter = oGrammarChecker.getTextFormatter() oGCE = oGrammarChecker.getGCEngine() def parseText(sText, dOptions=None, bFormatText=False, sError=""): "parse <sText> and return errors in a JSON format" sJSON = '{ "program": "grammalecte-fr", "version": "' + oGCE.version + '", "lang": "' + oGCE.lang + '", "error": "' + sError + '", "data" : [\n' sDataJSON = "" for i, sParagraph in enumerate(txt.getParagraph(sText), 1): if bFormatText: sParagraph = oTextFormatter.formatText(sParagraph) sResult = oGrammarChecker.getParagraphErrorsAsJSON( i, sParagraph,
def main(files, opts={}): """Read the file and run grammalecte on it""" # Read input from stdin or first arg. text_input = [line for line in fileinput.input(files=files)] text, lineset = txt.createParagraphWithLines(list(enumerate(text_input))) do_gramm = ("no_gramm" not in opts or opts["no_gramm"] is False) do_spell = ("no_spell" not in opts or opts["no_spell"] is False) gramm_err = spell_err = [] # Load grammalecte. gc = grammalecte.GrammarChecker("fr") # Compute grammar and spell check errors if do_gramm: gc.gce.setOption("apos", "no_apos" not in opts or opts["no_apos"] is False) gc.gce.setOption("nbsp", "no_nbsp" not in opts or opts["no_nbsp"] is False) gc.gce.setOption("esp", "no_esp" not in opts or opts["no_esp"] is False) gc.gce.setOption("tab", "no_esp" not in opts or opts["no_esp"] is False) gramm_err = gc.gce.parse(text, "FR", bDebug=False) if do_spell: spell_err = gc.oSpellChecker.parseParagraph(text, False) # Get colums and lines. gramm_err, spell_err = txt.convertToXY(gramm_err, spell_err, lineset) org_keywords = [ "author", "caption", "category", "creator", "date", "email", "header", "keywords", "language", "name", "options", "title", "attr_.+" ] # Output if do_gramm: org_re = re.compile("^#\\+(?:{})\\:$".format("|".join(org_keywords)), re.IGNORECASE) for i in list(gramm_err): cur_line = text_input[i["nStartY"]] if i["sType"] == "esp": # Remove useless space warning for visual paragraph in # text modes next_line_no = i["nStartY"] + 1 if next_line_no > len(text_input): # Weird, but maybe there is no blank line at the end # of the file? Or some sort of buffer overflow? next_line = "" else: next_line = text_input[next_line_no].strip() if cur_line[i["nStartX"]] == "\n" and next_line == "": continue elif i["sType"] == "nbsp": # Remove some unwanted nbsp warnings if cur_line[0:4] == "#-*-": continue # The following line is not subject to overflow # excepton, even if i["nStartX"] + 1 > len(cur_line) m = org_re.match(cur_line[0:i["nStartX"] + 1]) if m is not None and m.start() == 0: continue print("grammaire|{}|{}|{}\n".format(i["nStartY"] + 1, i["nStartX"] + 1, i["sMessage"])) if do_spell: for i in list(spell_err): cur_line = text_input[i["nStartY"]] next_char_no = i["nStartX"] + 1 org_re = re.compile("(?:{})\\:".format("|".join(org_keywords)), re.IGNORECASE) m = org_re.match(cur_line, i["nStartX"]) if m is not None and m.start() == i["nStartX"]: continue print("orthographe|{}|{}|{}\n".format( i["nStartY"] + 1, i["nStartX"] + 1, "Mot absent du dictionnaire"))
def main (): "launch the CLI (command line interface)" xParser = argparse.ArgumentParser() xParser.add_argument("-f", "--file", help="parse file (UTF-8 required!) [on Windows, -f is similar to -ff]", type=str) xParser.add_argument("-ff", "--file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) xParser.add_argument("-iff", "--interactive_file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) xParser.add_argument("-owe", "--only_when_errors", help="display results only when there are errors", action="store_true") xParser.add_argument("-j", "--json", help="generate list of errors in JSON (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-cl", "--concat_lines", help="concatenate lines not separated by an empty paragraph (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-tf", "--textformatter", help="auto-format text according to typographical rules (not with option --concat_lines)", action="store_true") xParser.add_argument("-tfo", "--textformatteronly", help="auto-format text and disable grammar checking (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-ctx", "--context", help="return errors with context (only with option --json)", action="store_true") xParser.add_argument("-wss", "--with_spell_sugg", help="add suggestions for spelling errors (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-pdi", "--personal_dict", help="load personnal dictionary (JSON file)", type=str) xParser.add_argument("-w", "--width", help="width in characters (40 < width < 200; default: 100)", type=int, choices=range(40,201,10), default=100) xParser.add_argument("-lo", "--list_options", help="list options", action="store_true") xParser.add_argument("-lr", "--list_rules", nargs="?", help="list rules [regex pattern as filter]", const="*") xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str) xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options") xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options") xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules") xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true") xArgs = xParser.parse_args() oGrammarChecker = grammalecte.GrammarChecker("fr") oSpellChecker = oGrammarChecker.getSpellChecker() oLexicographer = oGrammarChecker.getLexicographer() oTextFormatter = oGrammarChecker.getTextFormatter() if xArgs.personal_dict: oJSON = loadDictionary(xArgs.personal_dict) if oJSON: oSpellChecker.setPersonalDictionary(oJSON) if not xArgs.json: echo("Python v" + sys.version) echo("Grammalecte v{}".format(oGrammarChecker.gce.version)) # list options or rules if xArgs.list_options or xArgs.list_rules: if xArgs.list_options: oGrammarChecker.gce.displayOptions("fr") if xArgs.list_rules: oGrammarChecker.gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules) exit() # spell suggestions if xArgs.suggest: for lSugg in oSpellChecker.suggest(xArgs.suggest): if xArgs.json: sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False) else: sText = "Suggestions : " + " | ".join(lSugg) echo(sText) exit() # disable options if not xArgs.json: xArgs.context = False if xArgs.concat_lines: xArgs.textformatter = False # grammar options oGrammarChecker.gce.setOptions({"html": True, "latex": True}) if xArgs.opt_on: oGrammarChecker.gce.setOptions({ opt:True for opt in xArgs.opt_on }) if xArgs.opt_off: oGrammarChecker.gce.setOptions({ opt:False for opt in xArgs.opt_off }) # disable grammar rules if xArgs.rule_off: for sRule in xArgs.rule_off: oGrammarChecker.gce.ignoreRule(sRule) if xArgs.file or xArgs.file_to_file: # file processing sFile = xArgs.file or xArgs.file_to_file hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") if xArgs.file_to_file or sys.platform == "win32" else None bComma = False if xArgs.json: output('{ "grammalecte": "'+oGrammarChecker.gce.version+'", "lang": "'+oGrammarChecker.gce.lang+'", "data" : [\n', hDst) for i, sText, lLineSet in generateParagraphFromFile(sFile, xArgs.concat_lines): if xArgs.textformatter or xArgs.textformatteronly: sText = oTextFormatter.formatText(sText) if xArgs.textformatteronly: output(sText, hDst) continue if xArgs.json: sText = oGrammarChecker.getParagraphErrorsAsJSON(i, sText, bContext=xArgs.context, bEmptyIfNoErrors=xArgs.only_when_errors, \ bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter, lLineSet=lLineSet) else: sText, _ = oGrammarChecker.getParagraphWithErrors(sText, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) if sText: if xArgs.json and bComma: output(",\n", hDst) output(sText, hDst) bComma = True if hDst: echo("§ %d\r" % i, end="", flush=True) if xArgs.json: output("\n]}\n", hDst) elif xArgs.interactive_file_to_file: # file processing: interactive mode sFile = xArgs.interactive_file_to_file hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") for i, sText, lLineSet in generateParagraphFromFile(sFile, xArgs.concat_lines): if xArgs.textformatter: sText = oTextFormatter.formatText(sText) while True: sResult, lErrors = oGrammarChecker.getParagraphWithErrors(sText, bEmptyIfNoErrors=False, bSpellSugg=True, nWidth=xArgs.width) print("\n\n============================== Paragraph " + str(i) + " ==============================\n") echo(sResult) print("\n") vCommand = getCommand() if vCommand == "q": # quit hDst.close() exit() elif vCommand == "n": # next paragraph hDst.write(sText) break else: nError, cAction, vSugg = vCommand if 0 <= nError <= len(lErrors) - 1: dErr = lErrors[nError] if cAction == ">" and 0 <= vSugg <= len(dErr["aSuggestions"]) - 1: sSugg = dErr["aSuggestions"][vSugg] sText = sText[0:dErr["nStart"]] + sSugg + sText[dErr["nEnd"]:] elif cAction == "=": sText = sText[0:dErr["nStart"]] + vSugg + sText[dErr["nEnd"]:] else: print("Error. Action not possible.") else: print("Error. This error doesn’t exist.") else: # pseudo-console sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n" sText = _getText(sInputText) while True: if sText.startswith("?"): for sWord in sText[1:].strip().split(): if sWord: echo("* " + sWord) for sMorph in oSpellChecker.getMorph(sWord): echo(" {:<32} {}".format(sMorph, oLexicographer.formatTags(sMorph))) elif sText.startswith("!"): for sWord in sText[1:].strip().split(): if sWord: for lSugg in oSpellChecker.suggest(sWord): echo(" | ".join(lSugg)) elif sText.startswith(">"): oSpellChecker.drawPath(sText[1:].strip()) elif sText.startswith("="): sSearch = sText[1:].strip() if "=" in sSearch: nCut = sSearch.find("=") sFlexPattern = sSearch[0:nCut] sTagsPattern = sSearch[nCut+1:] else: sFlexPattern = sSearch sTagsPattern = "" for aRes in oSpellChecker.select(sFlexPattern, sTagsPattern): echo("{:<30} {:<30} {}".format(*aRes)) elif sText.startswith("/o+ "): oGrammarChecker.gce.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in oGrammarChecker.gce.getOptions() }) echo("done") elif sText.startswith("/o- "): oGrammarChecker.gce.setOptions({ opt:False for opt in sText[3:].strip().split() if opt in oGrammarChecker.gce.getOptions() }) echo("done") elif sText.startswith("/r- "): for sRule in sText[3:].strip().split(): oGrammarChecker.gce.ignoreRule(sRule) echo("done") elif sText.startswith("/r+ "): for sRule in sText[3:].strip().split(): oGrammarChecker.gce.reactivateRule(sRule) echo("done") elif sText in ("/debug", "/d"): xArgs.debug = not xArgs.debug echo("debug mode on" if xArgs.debug else "debug mode off") elif sText in ("/textformatter", "/tf"): xArgs.textformatter = not xArgs.textformatter echo("textformatter on" if xArgs.debug else "textformatter off") elif sText in ("/help", "/h"): echo(_HELP) elif sText in ("/lopt", "/lo"): oGrammarChecker.gce.displayOptions("fr") elif sText.startswith("/lr"): sText = sText.strip() sFilter = sText[sText.find(" "):].strip() if " " in sText else None oGrammarChecker.gce.displayRules(sFilter) elif sText in ("/quit", "/q"): break elif sText.startswith("/rl"): # reload (todo) pass elif sText.startswith("$"): for sParagraph in txt.getParagraph(sText[1:]): if xArgs.textformatter: sParagraph = oTextFormatter.formatText(sParagraph) lParagraphErrors, lSentences = oGrammarChecker.gce.parse(sParagraph, bDebug=xArgs.debug, bFullInfo=True) echo(txt.getReadableErrors(lParagraphErrors, xArgs.width)) for dSentence in lSentences: echo("{nStart}:{nEnd}".format(**dSentence)) echo(" <" + dSentence["sSentence"]+">") for dToken in dSentence["lToken"]: echo(" {0[nStart]:>3}:{0[nEnd]:<3} {1} {0[sType]:<14} {2} {0[sValue]:<16} {3:<10} {4}".format(dToken, \ "×" if dToken.get("bToRemove", False) else " ", "!" if dToken["sType"] == "WORD" and not dToken.get("bValidToken", False) else " ", " ".join(dToken.get("lMorph", "")), \ "·".join(dToken.get("aTags", "")) ) ) echo(txt.getReadableErrors(dSentence["lGrammarErrors"], xArgs.width)) else: for sParagraph in txt.getParagraph(sText): if xArgs.textformatter: sParagraph = oTextFormatter.formatText(sParagraph) sRes, _ = oGrammarChecker.getParagraphWithErrors(sParagraph, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width, bDebug=xArgs.debug) if sRes: echo("\n" + sRes) else: echo("\nNo error found.") sText = _getText(sInputText)
def main (): xParser = argparse.ArgumentParser() xParser.add_argument("-f", "--file", help="parse file (UTF-8 required!) [on Windows, -f is similar to -ff]", type=str) xParser.add_argument("-ff", "--file_to_file", help="parse file (UTF-8 required!) and create a result file (*.res.txt)", type=str) xParser.add_argument("-owe", "--only_when_errors", help="display results only when there are errors", action="store_true") xParser.add_argument("-j", "--json", help="generate list of errors in JSON (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-cl", "--concat_lines", help="concatenate lines not separated by an empty paragraph (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-tf", "--textformatter", help="auto-format text according to typographical rules (not with option --concat_lines)", action="store_true") xParser.add_argument("-tfo", "--textformatteronly", help="auto-format text and disable grammar checking (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-ctx", "--context", help="return errors with context (only with option --json)", action="store_true") xParser.add_argument("-wss", "--with_spell_sugg", help="add suggestions for spelling errors (only with option --file or --file_to_file)", action="store_true") xParser.add_argument("-w", "--width", help="width in characters (40 < width < 200; default: 100)", type=int, choices=range(40,201,10), default=100) xParser.add_argument("-lo", "--list_options", help="list options", action="store_true") xParser.add_argument("-lr", "--list_rules", nargs="?", help="list rules [regex pattern as filter]", const="*") xParser.add_argument("-sug", "--suggest", help="get suggestions list for given word", type=str) xParser.add_argument("-on", "--opt_on", nargs="+", help="activate options") xParser.add_argument("-off", "--opt_off", nargs="+", help="deactivate options") xParser.add_argument("-roff", "--rule_off", nargs="+", help="deactivate rules") xParser.add_argument("-d", "--debug", help="debugging mode (only in interactive mode)", action="store_true") xArgs = xParser.parse_args() oGrammarChecker = grammalecte.GrammarChecker("fr") oSpellChecker = oGrammarChecker.getSpellChecker() oLexicographer = oGrammarChecker.getLexicographer() oTextFormatter = oGrammarChecker.getTextFormatter() if not xArgs.json: echo("Grammalecte v{}".format(oGrammarChecker.gce.version)) # list options or rules if xArgs.list_options or xArgs.list_rules: if xArgs.list_options: oGrammarChecker.gce.displayOptions("fr") if xArgs.list_rules: oGrammarChecker.gce.displayRules(None if xArgs.list_rules == "*" else xArgs.list_rules) exit() # spell suggestions if xArgs.suggest: for lSugg in oSpellChecker.suggest(xArgs.suggest): if xArgs.json: sText = json.dumps({ "aSuggestions": lSugg }, ensure_ascii=False) else: sText = "Suggestions : " + " | ".join(lSugg) echo(sText) exit() # disable options if not xArgs.json: xArgs.context = False if xArgs.concat_lines: xArgs.textformatter = False # grammar options oGrammarChecker.gce.setOptions({"html": True, "latex": True}) if xArgs.opt_on: oGrammarChecker.gce.setOptions({ opt:True for opt in xArgs.opt_on if opt in oGrammarChecker.gce.getOptions() }) if xArgs.opt_off: oGrammarChecker.gce.setOptions({ opt:False for opt in xArgs.opt_off if opt in oGrammarChecker.gce.getOptions() }) # disable grammar rules if xArgs.rule_off: for sRule in xArgs.rule_off: oGrammarChecker.gce.ignoreRule(sRule) sFile = xArgs.file or xArgs.file_to_file if sFile: # file processing hDst = open(sFile[:sFile.rfind(".")]+".res.txt", "w", encoding="utf-8", newline="\n") if xArgs.file_to_file or sys.platform == "win32" else None bComma = False if xArgs.json: output('{ "grammalecte": "'+oGrammarChecker.gce.version+'", "lang": "'+oGrammarChecker.gce.lang+'", "data" : [\n', hDst) for i, sText, lLineSet in generateParagraphFromFile(sFile, xArgs.concat_lines): if xArgs.textformatter or xArgs.textformatteronly: sText = oTextFormatter.formatText(sText) if xArgs.textformatteronly: output(sText, hDst) continue if xArgs.json: sText = oGrammarChecker.generateParagraphAsJSON(i, sText, bContext=xArgs.context, bEmptyIfNoErrors=xArgs.only_when_errors, \ bSpellSugg=xArgs.with_spell_sugg, bReturnText=xArgs.textformatter, lLineSet=lLineSet) else: sText = oGrammarChecker.generateParagraph(sText, bEmptyIfNoErrors=xArgs.only_when_errors, bSpellSugg=xArgs.with_spell_sugg, nWidth=xArgs.width) if sText: if xArgs.json and bComma: output(",\n", hDst) output(sText, hDst) bComma = True if hDst: echo("§ %d\r" % i, end="", flush=True) if xArgs.json: output("\n]}\n", hDst) else: # pseudo-console sInputText = "\n~==========~ Enter your text [/h /q] ~==========~\n" sText = _getText(sInputText) while True: if sText.startswith("?"): for sWord in sText[1:].strip().split(): if sWord: echo("* " + sWord) for sMorph in oSpellChecker.getMorph(sWord): echo(" {:<32} {}".format(sMorph, oLexicographer.formatTags(sMorph))) elif sText.startswith("!"): for sWord in sText[1:].strip().split(): if sWord: for lSugg in oSpellChecker.suggest(sWord): echo(" | ".join(lSugg)) elif sText.startswith(">"): oSpellChecker.drawPath(sText[1:].strip()) elif sText.startswith("="): for sRes in oSpellChecker.select(sText[1:].strip()): echo(sRes) elif sText.startswith("/+ "): oGrammarChecker.gce.setOptions({ opt:True for opt in sText[3:].strip().split() if opt in oGrammarChecker.gce.getOptions() }) echo("done") elif sText.startswith("/- "): oGrammarChecker.gce.setOptions({ opt:False for opt in sText[3:].strip().split() if opt in oGrammarChecker.gce.getOptions() }) echo("done") elif sText.startswith("/-- "): for sRule in sText[3:].strip().split(): oGrammarChecker.gce.ignoreRule(sRule) echo("done") elif sText.startswith("/++ "): for sRule in sText[3:].strip().split(): oGrammarChecker.gce.reactivateRule(sRule) echo("done") elif sText == "/debug" or sText == "/d": xArgs.debug = not(xArgs.debug) echo("debug mode on" if xArgs.debug else "debug mode off") elif sText == "/textformatter" or sText == "/tf": xArgs.textformatter = not(xArgs.textformatter) echo("textformatter on" if xArgs.debug else "textformatter off") elif sText == "/help" or sText == "/h": echo(_HELP) elif sText == "/lopt" or sText == "/lo": oGrammarChecker.gce.displayOptions("fr") elif sText.startswith("/lr"): sText = sText.strip() sFilter = sText[sText.find(" "):].strip() if sText != "/lr" and sText != "/rules" else None oGrammarChecker.gce.displayRules(sFilter) elif sText == "/quit" or sText == "/q": break elif sText.startswith("/rl"): # reload (todo) pass else: for sParagraph in txt.getParagraph(sText): if xArgs.textformatter: sText = oTextFormatter.formatText(sText) sRes = oGrammarChecker.generateParagraph(sText, bEmptyIfNoErrors=xArgs.only_when_errors, nWidth=xArgs.width, bDebug=xArgs.debug) if sRes: echo("\n" + sRes) else: echo("\nNo error found.") sText = _getText(sInputText)