def parser (sText, oTokenizer, oDict, nWidth=100, bDebug=False, bEmptyIfNoErrors=False): aGrammErrs = gce.parse(sText, "FR", bDebug) aSpellErrs = [] for tToken in oTokenizer.genTokens(sText): if tToken.type == "WORD" and not oDict.isValidToken(tToken.value): aSpellErrs.append(tToken) if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: return "" return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
def generateText (iParagraph, sText, oTokenizer, oDict, bJSON, nWidth=100, bDebug=False, bEmptyIfNoErrors=False): aGrammErrs = gce.parse(sText, "FR", bDebug) aSpellErrs = [] for dToken in oTokenizer.genTokens(sText): if dToken['sType'] == "WORD" and not oDict.isValidToken(dToken['sValue']): aSpellErrs.append(dToken) if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: return "" if not bJSON: return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth) return " " + json.dumps({ "iParagraph": iParagraph, "lGrammarErrors": aGrammErrs, "lSpellingErrors": aSpellErrs }, ensure_ascii=False)
def generateText(sText, oTokenizer, oDict, bDebug=False, bEmptyIfNoErrors=False, nWidth=100): aGrammErrs, aSpellErrs = _getErrors(sText, oTokenizer, oDict, False, bDebug) if bEmptyIfNoErrors and not aGrammErrs and not aSpellErrs: return "" return txt.generateParagraph(sText, aGrammErrs, aSpellErrs, nWidth)
def showResult (sText, res, bAutocorrect=False): aGrammErrs = res[0] aSpellErrs = res[1] if bAutocorrect: sResult = sText aGrammErrs = sorted(aGrammErrs, key=lambda dGrammErr: -dGrammErr['nEnd']) for dGrammErr in aGrammErrs: if len(dGrammErr['aSuggestions']): sSuggestion = dGrammErr['aSuggestions'][0] sResult = sResult[0:dGrammErr['nStart']] + sSuggestion + sResult[dGrammErr['nEnd']:] else: sResult = txt.generateParagraph(sText, aGrammErrs, aSpellErrs) sys.stdout.write(sResult)