def checkGrammar(text): # Creating the client client = GrammarBotClient() # or, signup for an API Key to get higher usage limits here: https://www.grammarbot.io/ #client = GrammarBotClient(api_key='my_api_key_here') # GrammarBotClient(api_key=my_api_key_here) res = client.check(text) mistakes = [] for mistake in res.matches: value = {} value['offset'] = mistake.replacement_offset value['length'] = mistake.replacement_length value['message'] = mistake.message value['replacements'] = mistake.replacements value['corrections'] = mistake.corrections value['rule'] = mistake.rule value['category'] = mistake.category mistakes.append(value) ret = mistakes #language = res.detected_language #result_is_incomplete = res.result_is_incomplete #num_error = len(res.matches) #ret = {'language': language, 'result_is_incomplete': result_is_incomplete, 'errors':num_error, 'mistakes': mistakes} return json.dumps(ret)
def fix_tweet(text): # spell = SpellChecker() # list_text = list(text.split(" ")) # misspelled = spell.unknown(list_text) # print(text) # for word in misspelled: # print(word) # text.replace(word, spell.correction(word)) # print(text) # lang_tool = language_tool.LanguageTool("en-US") # matches = lang_tool.check(text) # tool = language_check.LanguageTool('en-US') # matches = tool.check(text) # print(language_check.correct(text, matches)) # print(res.raw_json) NEWLINE = '\n' # Creating the client client = GrammarBotClient() # check the text, returns GrammarBotApiResponse object res = client.check(text) tweets_to_make = [] correct_text = "" limit = "" # Construct tweet for match in res.matches: # Create messages if (match.category == 'TYPOS'): correct_text += "At the start of character number " + \ str(match.replacement_offset) + " did you mean any of the following instead: " + \ str(match.replacements[0:3]) + "." else: correct_text += match.message # Insert a new line after each recommendation correct_text += NEWLINE # Make sure it doesn't exceed the tweet character limit if (len(limit) + len(correct_text)) > 280: tweets_to_make.append(limit) limit = "" limit += correct_text correct_text = "" tweets_to_make.append(limit) return tweets_to_make
def grade_essay(essay_tuple, essay_list): raw_body = essay_tuple[1] citation_type = essay_tuple[2] client = GrammarBotClient() edited_body = "" cursor = 0 citation_heading = "" if citation_type == "APA": citation_heading = "References" else: citation_heading = "Works Cited" if citation_heading not in raw_body: ret = "<p style=\"padding-top:1em\"><mark style=\"background-color:red;line-height:1.5em\">" + "ERROR: No reference list/works cited header found (this may be due to a typo in the word \"References\" or the word \"Works Cited\"). Unable to mark essay." + "</mark></p>" + raw_body return essay_tuple[0], ret body = check_citations(essay_tuple) body = body.split(citation_heading) raw_citations = "<p>" + citation_heading + "</p>" + body[-1] body = citation_heading.join(body[:-1]) plagiarism_ret = check_plagiarism.delay(essay_tuple[3], essay_tuple[4], body, essay_list) plagiarism = plagiarism_ret.get() result = None if len(body) > 6000: result = client.check(body[:6000]) else: result = client.check(body) for match in result.matches: # you also have access to match.category if you want offset = match.replacement_offset length = match.replacement_length edited_body += body[cursor:offset] if body[offset:(offset + length)] != "br" or body[offset:( offset + length)] != "&emsp": edited_body += "<mark style=\"background-color:red;\">" + body[ offset:(offset + length)] + "</mark>" cursor = offset + length # if cursor < text length, then add remaining text to new_text if cursor >= len(body): edited_body += body[cursor:] if edited_body == "": edited_body = body edited_body = plagiarism + edited_body + raw_citations return essay_tuple[0], edited_body
def __init__(self, text, max_char_count=17, sensitivity=1): # Initialize checker with text and preprocess text by removing unwanted sentences # Sensitivity (0, 1): how strict you are with typos # Sensitivity of 1 is strictest, 0 is most lenient self.text = text self.max_char_count = max_char_count self.sensitivity = sensitivity self.client = GrammarBotClient() self.sentences = self.get_sentences() self.num_words = 0 self.preprocess_text()
def error_counter(msg_body): tokens = nlp(msg_body) err_count = 0 for sent in tokens.sents: x = sent.string.strip() #print(x) client = GrammarBotClient() res = client.check(x, 'en-GB') if len(res.matches) > 0: #print(res.matches) err_count += 1 if err_count > 5: return "\n\nDue to high grammatical errors this job is probably fake" else: return "\n\nDue to low grammatical errors this job is probably real"
def grammar_analysis(lines_of_text): client = GrammarBotClient(api_key='TODO: FILL IN API KEY') all_text = '' for txt in lines_of_text: all_text += (str(txt) + '/') # check the text, returns GrammarBotApiResponse object errors = client.check(text) # get error counts metadata = {} error_types = [] for err in errors.matches: start = err.replacement_offset end = start + err.replacement_length mistake = allText[start:end] # format suggestions suggestions = '' for suggestion in err.replacements: suggestions += suggestion + ', ' metadata[mistake] = suggestions[:-2] # take off last comma error_types.append(err.category) # get line numbers for help line_numbers = [] for mistake in metadata.keys(): for i, s in enumerate(linesOfText): if mistake in s: line_numbers.append(i) type_counts = dict(Counter(errorTypes)) type_counts_list = type_counts.items() total_errors = sum(types_counts.values()) result_string = json.dumps(res.raw_json, indent=4) mistake_info = [] for i, mistake in enumerate(metadata.keys()): mistake_info.append(( line_numbers[i], mistake, metadata[mistake])) flag = (total_errors / len(lines_of_text) > 1) return total_errors, type_counts_list, mistake_info, flag
def chekingCredibilitiy(self): domain = "" nameList = self.website.split('.') if nameList[0] == "www": domain = nameList[2] else: domain = nameList[1] if domain.lower() in CREDIBLE_DOMAINS: return True else: client = GrammarBotClient() text = self.getText() errors = client.check(text, "en-US") if len(errors.matches) < 4: return False else: return True
def initialize_api(): """Initialize API Function Make sure to enter in a GrammarBot api key to run program """ my_api_key = <'enter key here'> client = GrammarBotClient(api_key=my_api_key) return client
def grammar_checker(text): correct_texts = [] # collect possible ways of fixing text # spell checking & grammar checking with language_check module tool = language_check.LanguageTool('en-US') matches = tool.check(text) correct_texts.append(language_check.correct(text, matches)) # spell checking & grammar checking with grammarbot module client = GrammarBotClient() res = client.check(text) for match in res.matches: for correction in match.corrections: correct_texts.append(correction) print("grammar check") for text in correct_texts: print(text) return correct_texts
def output(): client = GrammarBotClient() client = GrammarBotClient( api_key='AF5B9M2X') # GrammarBotClient(api_key=my_api_key_here) res = client.check( text.get("1.0", "end-1c") ) # GrammarBotApiResponse(matches=[GrammarBotMatch(offset=2, length=4, rule={'CANT'}, category={'TYPOS'}), GrammarBotMatch(offset=26, length=5, rule={'CONFUSION_RULE'}, category={'TYPOS'})]) res.detected_language # "en-US" res.result_is_incomplete # False res.matches # [GrammarBotMatch(offset=2, length=4, rule={'CANT'}, category={'TYPOS'}), GrammarBotMatch(offset=26, length=5, rule={'CONFUSION_RULE'}, category={'TYPOS'})] match0 = res.matches[ 0] # GrammarBotMatch(offset=2, length=4, rule={'CANT'}, category={'TYPOS'}) match0.replacement_offset # 2 match0.replacement_length # 4 match0.replacements # ["can't", 'cannot'] match0.corrections # res.raw_json text2 = tk.Text(root, width=40, height=30) text2.place(x=500, y=50) text2.insert(tk.END, match0.corrections[0])
class Grammar(): def __init__(self): dotenv_path = os.path.join(os.path.dirname(__file__), '.env') load_dotenv(dotenv_path) rapidapi_key = os.environ.get('RAPIDAPI_KEY') self.client = GrammarBotClient(api_key=rapidapi_key) def check(self, text): message = '' res = self.client.check(text) for i, match in enumerate(res.matches): message += f'\n{i+1}. {match.message}, {match.replacements[:3]}\n' return message
class GrammarChecker: def __init__(self): self.client = GrammarBotClient(api_key='KS9C5N3Y') def check(self, text, file=None): res = self.client.check(text) for match in res.matches: if file: file.write("Position {} \n Replacements {} \n Rule {} Category {} Type {}\n".format(match.replacement_offset, match.replacements, match.rule, match.category, match.type)) else: print("Position {} \n Replacements {} \n Rule {} Category {} Type {}\n".format(match.replacement_offset, match.replacements, match.rule, match.category, match.type)) return res
# import the client library from grammarbot import GrammarBotClient # # # Creating the client # # =================== client = GrammarBotClient() # or, signup for an API Key to get higher usage limits here: https://www.grammarbot.io/ client = GrammarBotClient(api_key='my_api_key_here') # GrammarBotClient(api_key=my_api_key_here) # you can even set the base URI to a different server # client = GrammarBotClient(base_uri='http://backup.grammarbot.io:80') # Analyzing the text # ================== # There is only one method to perform the analysis, viz. GrammarBotClient.check # method. text = 'I cant remember how to go their' # check the text, returns GrammarBotApiResponse object res = client.check(text) # GrammarBotApiResponse(matches=[GrammarBotMatch(offset=2, length=4, rule={'CANT'}, category={'TYPOS'}), GrammarBotMatch(offset=26, length=5, rule={'CONFUSION_RULE'}, category={'TYPOS'})]) # Inspecting the GrammarBotApiResponse object # =========================================== # check detected language res.detected_language # "en-US" # check if the result is incomplete
def get_essay(request): if request.user.is_authenticated: if request.method == 'POST': form = EssayForm(request.POST) if form.is_valid(): text_buffer = form.cleaned_data.get("essay_content").encode( 'utf-8') question_buffer = form.cleaned_data.get("question") result = model.calculate_score(text_buffer, question_buffer) from grammarbot import GrammarBotClient client = GrammarBotClient() sentences = text_buffer.split('.') content = '' for sentence in sentences: res = client.check(sentence) grm = [] counter = 0 sentence = sentence.replace("'", "\'") #print res.matches for each in res.matches: grm.append([ each.replacement_offset, each.replacement_length, each.message.replace('"', '\"').replace("'", '\"'), each.category.replace('"', '\"') ]) if len(grm) == 0: content += sentence + '.' continue for i in range(len(grm)): content += sentence[counter:grm[i][ 0]] + '<span class="mytooltip" style="color:white;background-color:#FB8C8C">' + sentence[ grm[i][0]:grm[i][0] + grm[i] [1]] + '<span class="mytooltiptext">' + grm[i][ 2].replace("'", "\'") + '</span>' + '</span>' counter = grm[i][0] + grm[i][1] content += '.' print content if result >= 0: Essays.objects.create(question = question_buffer, \ essay_content = text_buffer,\ predicted_score = result) result = round(result, 1) return render( request, 'main/result.html', { 'result': result, 'essay': json.dumps(content), 'grm': json.dumps(grm), 'original': json.dumps(text_buffer) }) else: result = 0 Essays.objects.create(question = question_buffer, \ essay_content = text_buffer,\ predicted_score = result) return render( request, 'main/result.html', { 'result': 'Sorry, this questions are not ready to be scored by system yet, but your submission will help the system developing.' }) else: form = EssayForm() return render(request, 'main/scoring.html', {'form': form}) else: return render(request, 'main/home.html')
def writing(request, article_id): if request.method == "POST": form = WritingContent(request.POST) if form.is_valid(): text = form.cleaned_data['text'] text = text.strip("\n\t\r .,?!;") client = GrammarBotClient(api_key='KS9C5N3Y') res_js = client.check(text).raw_json tokens = text.count(" ") + 1 sens = text.count(".") + text.count("?") + text.count( "!") + text.count(";") + 1 result = [] cnt_spelling = 0 cnt_grammar = 0 warn_start = "<span style=\"background-color:yellow\">" error_start = "<span style=\"background-color:red\">" we_end = "</span>" conflict = 0 cnt_error = 0 for messeage in res_js['matches']: offset = messeage['offset'] + conflict leng = messeage['length'] if messeage['rule']['issueType'] == 'non-conformance': text = text[0:offset] + warn_start + text[ offset:offset + leng] + we_end + text[offset + leng:] conflict += 44 error = {} error['id'] = cnt_error error['id2'] = cnt_error * 1000 error['msg'] = messeage['message'] try: error['dst'] = messeage['description'] except: error['dst'] = "" error['rep'] = messeage['replacements'] cnt_error += 1 result.append(error) elif messeage['rule']['issueType'].find( "grammar") != -1 or messeage['rule']['issueType'].find( "misspelling") != -1: if messeage['rule']['issueType'].find("grammar") != -1: cnt_grammar += 1 else: cnt_spelling += 1 text = text[0:offset] + error_start + text[ offset:offset + leng] + we_end + text[offset + leng:] conflict += 41 error = {} error['id'] = cnt_error error['id2'] = cnt_error * 1000 error['msg'] = messeage['message'] try: error['dst'] = messeage['description'] except: error['dst'] = "" error['rep'] = messeage['replacements'] cnt_error += 1 result.append(error) point = { 'grammar': (1 - (cnt_grammar // sens)) * 100, 'spelling': (1 - (cnt_spelling // tokens)) * 100, 'total': int((1 - 0.5 * (cnt_grammar // sens) + 0.5 * (cnt_spelling // tokens)) * 100) } return render(request, 'writing_results.html', { 'errors': result, 'fixed': text, 'point': point }) else: conn = MongoClient() db = conn.MyProject collection = db.Writing records = collection.find() topics = list(records) context = {'topic': topics[article_id], 'form': WritingContent()} return render(request, 'writing.html', context) else: conn = MongoClient() db = conn.MyProject collection = db.Writing records = collection.find() topics = list(records) context = {'topic': topics[article_id], 'form': WritingContent()} return render(request, 'writing.html', context)
import pandas as pd from flask import Flask, render_template, url_for, request from flask_bootstrap import Bootstrap from googletrans import Translator from grammarbot import GrammarBotClient # from similarity import model_similarity app = Flask(__name__) Bootstrap(app) #function to modify string with stylen transfer according to the personality translator=Translator() checker = GrammarBotClient(api_key='KS9C5N3Y') def paraphrased(in_text): phrased = [] for i in ['ko', 'ja', 'el', 'fr', 'tl', 'ar', 'ht','af', 'sq', 'am']: par_text = translator.translate(in_text, dest=i).text phrased.append(translator.translate(par_text, dest='en').text.capitalize()) t = [i for i in phrased if i.lower() != in_text.lower()] return "No possible phrases" if not list(set(t)) else list(set(t)) def grammar_check(alist): final_list = [] #print(alist) for i in alist: x = [j.corrections for j in checker.check(i).matches] if len(x) < 1: final_list.append(i) else: final_list.append(x) return final_list
print("There are {} targeted sentences.".format(len(targeted_sentences))) # print(targeted_sentences[100]) wrong_sentences = [] for j in range(0, len(targeted_sentences)): wrong_sentences.append(targeted_sentences[j].replace(" she ", " her ")) print("There are {} wrong sentences.".format(len(wrong_sentences))) # print(wrong_sentences[100]) counter = 0 for j in range(1, 100): try: client = GrammarBotClient( api_key='AF5B9M2X') # GrammarBotClient(api_key=my_api_key_here) res = client.check( wrong_sentences[j] ) # GrammarBotApiResponse(matches=[GrammarBotMatch(offset=2, length=4, rule={'CANT'}, category={'TYPOS'}), GrammarBotMatch(offset=26, length=5, rule={'CONFUSION_RULE'}, category={'TYPOS'})]) match0 = res.matches[0] if match0.corrections[0] == targeted_sentences[j]: counter += 1 except IndexError: j = j + 1 print(counter) # client = GrammarBotClient(api_key='AF5B9M2X') # res = client.check(wrong_sentences[7]) # match0 = res.matches[0] # print(match0.corrections[0] ) # print(targeted_sentences[7])
class GrammarChecker: def __init__(self, text, max_char_count=17, sensitivity=1): # Initialize checker with text and preprocess text by removing unwanted sentences # Sensitivity (0, 1): how strict you are with typos # Sensitivity of 1 is strictest, 0 is most lenient self.text = text self.max_char_count = max_char_count self.sensitivity = sensitivity self.client = GrammarBotClient() self.sentences = self.get_sentences() self.num_words = 0 self.preprocess_text() def get_sentences(self): # Returns sentences from text nlp = English() nlp.add_pipe(nlp.create_pipe('sentencizer')) # updated doc = nlp(self.text) sentences = [sent.string.strip() for sent in doc.sents] return sentences def preprocess_text(self): # Remove sentences with words greater than max_char_count count = 0 for sent in self.sentences: for word in sent.split(): if len(word) > self.max_char_count: if sent in self.sentences: self.sentences.remove(sent) count += 1 self.num_words = count self.text = ' '.join(sent for sent in self.sentences) def measure_grammar(self, sensitivity=1, ignore_doublespace=True, ignore_punctuation=True): # Return a measure of typo appearance in text res = self.client.check(self.text) matches = res.matches results = [] num_typos = 0 if ignore_doublespace and ignore_punctuation: for match in matches: if match.category != 'TYPOGRAPHY' and match.category != 'PUNCTUATION': results.append(match) num_typos = len(results) elif ignore_doublespace: for match in matches: if match.category != 'TYPOGRAPHY': results.append(match) num_typos = len(results) elif ignore_punctuation: for match in matches: if match.category != 'PUNCTUATION': results.append(match) num_typos = len(results) num_sentences = len(self.sentences) measure = num_typos**1.5 * math.exp(num_typos - ( 0.25 * (num_sentences))) * sensitivity / self.num_words return measure
def get_grammar(file): client = GrammarBotClient() return scoreAll(file, client)
import math import re import sys #import language_check import cosine_similarity as keywordVal from grammarbot import GrammarBotClient client = GrammarBotClient(api_key='AF5B9M2X') #pip install grammarbot def givVal(): # KEYWORDS =>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> # TODO : Enhacnce this thing model_answer = sys.argv[1] #tool = language_check.LanguageTool('en-US') keywords = sys.argv[2] #print ("Write answers specific") #answer=input() #print(sys.argv[1:]) answer = sys.argv[3] #matches = tool.check(text) out_of = int(sys.argv[4]) ''' if (len(answer.split())) <= 3: return 0 ''' k = keywordVal.givKeywordsValue(model_answer, answer, out_of) res = client.check(answer) #print(len(res.matches))
#https://www.grammarbot.io/quickstart from grammarbot import GrammarBotClient from numpy.random import choice # Creating the client # =================== client = GrammarBotClient() #API key is optional client = GrammarBotClient(api_key='KS9C5N3Y') # =================== RANDOM_CHANCE = 0.6 # Generates weights for use in numpy's random choice # Output looks somewhat like this: [0.41957572502685286, 0.2517454350161117, 0.15104726100966703, 0.09062835660580021, 0.05437701396348013, 0.03262620837808808 ...] # The output will always sum to 1 def generateWeights(size): weights = [RANDOM_CHANCE] for i in range(size - 1): weights.append(weights[i] * RANDOM_CHANCE) normalized = [float(i) / sum(weights) for i in weights] return normalized # Calls the grammarbot api to correct text def grammarCheck(text): if text is None or len(text) < 3: text = "" return ""
# -*- coding: utf-8 -*- """Untitled18.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1NC5k4CmrUhihDLYkQgiRyf0YSpxlLoz2 """ from grammarbot import GrammarBotClient import spacy nlp = spacy.load('en') text = input("Enter the body of the message: ") tokens = nlp(text) count = 0 for sent in tokens.sents: x = sent.string.strip() #print(x) client = GrammarBotClient() res = client.check(x, 'en-GB') if len(res.matches) > 0: #print(res.matches) count += 1 if count > 5: print("\n\nThis job is probably fake")
def __init__(self): self.client = GrammarBotClient(api_key='KS9C5N3Y')
sanswer.append(ele[1]) print(snumber) print('\n\n') print("Answers array:") print(sanswer) print('\n\n') from grammarbot import GrammarBotClient totm = 0 for i in range(len(sanswer)): count = 0 newm = marks[i] client = GrammarBotClient() client = GrammarBotClient(api_key='KS9C5N3Y') #client = GrammarBotClient(base_uri='http://backup.grammarbot.io:80') text = sanswer[i] print(text) res = client.check(text) match = res.matches for j in range(len(match)): match0 = match[j] if (match0.category != 'TYPOGRAPHY'): count += 1 """print(match0) print("Replacement offset:\n") print(match0.replacement_offset)
def __init__(self): dotenv_path = os.path.join(os.path.dirname(__file__), '.env') load_dotenv(dotenv_path) rapidapi_key = os.environ.get('RAPIDAPI_KEY') self.client = GrammarBotClient(api_key=rapidapi_key)
import re import time import pandas as pd import random from transformers import pipeline, set_seed from grammarbot import GrammarBotClient from textblob import TextBlob ## PARAGRAPH-GENERATING MODEL client = GrammarBotClient() # create client beforehand def load_model(): ''' Loads and returns a pre-trained GPT-2 text-generator model (https://huggingface.co/gpt2) Returns ------- model : transformers.pipelines.TextGenerationPipeline The pre-trained GPT-2 model ''' model = pipeline('text-generation', model='gpt2') set_seed(42) return model def generate_story(input_text, model, max_length=150, use_narrative_hook=True): ''' Returns a story generated using (i) a pre-trained GPT-2 model, and
def check_grammar_bot(text): client = GrammarBotClient() res = client.check(text, 'en-US') if len(res.matches) == 0: return True return False
def compute_spell_grammar_score(text): client = GrammarBotClient(api_key='PLEASE_INSERT_API_KEY_TO_HERE') text = text.strip("\n\t\r .,?!;") res_js = client.check(text).raw_json tokens = text.count(" ") + 1 sens = text.count(".") + text.count("?") + text.count("!") + text.count( ";") + 1 result = [] cnt_spelling = 0 cnt_grammar = 0 warn_start = "<span style=\"background-color:yellow\">" error_start = "<span style=\"background-color:red\">" we_end = "</span>" conflict = 0 cnt_error = 0 for messeage in res_js['matches']: offset = messeage['offset'] + conflict leng = messeage['length'] if messeage['rule']['issueType'] == 'non-conformance': text = text[0:offset] + warn_start + text[ offset:offset + leng] + we_end + text[offset + leng:] conflict += 44 error = {} error['id'] = cnt_error error['id2'] = cnt_error * 1000 error['offset'] = messeage['offset'] error['length'] = messeage['length'] error['msg'] = messeage['message'] try: error['dst'] = messeage['description'] except: error['dst'] = "" error['rep'] = messeage['replacements'] cnt_error += 1 result.append(error) elif messeage['rule']['issueType'].find("grammar") != -1 or messeage[ 'rule']['issueType'].find("misspelling") != -1: if messeage['rule']['issueType'].find("grammar") != -1: cnt_grammar += 1 else: cnt_spelling += 1 text = text[0:offset] + error_start + text[ offset:offset + leng] + we_end + text[offset + leng:] conflict += 41 error = {} error['id'] = cnt_error error['id2'] = cnt_error * 1000 error['offset'] = messeage['offset'] error['length'] = messeage['length'] error['msg'] = messeage['message'] try: error['dst'] = messeage['description'] except: error['dst'] = "" error['rep'] = messeage['replacements'] cnt_error += 1 result.append(error) point = { 'grammar': (1 - (cnt_grammar / sens)) * 100, 'spelling': (1 - (cnt_spelling / tokens)) * 100, 'total': int((1 - 0.5 * (cnt_grammar / sens) - 0.5 * (cnt_spelling / tokens)) * 100) } return point, result
import sys import os import csv import docx2txt as d2t from docx import Document from natsort import natsorted from grammarbot import GrammarBotClient client = GrammarBotClient( api_key=os.environ.get('GRAMMARBOT_API_KEY', 'KS9C5N3Y')) text_filepath = 'C:/Users/Matt/Downloads/CW/TEST_REPLACE_2/' root_cleaned_filepath = 'C:/Users/Matt/Downloads/CW/TEST_CHECK_2/' blacklist = ['document_process.csv'] if not os.path.exists(root_cleaned_filepath): os.makedirs(root_cleaned_filepath) for root, dirs, files in os.walk(text_filepath, topdown=True): # skip any loops that doesn't result in a folder of files if not files: continue # get folder name hierarchy = root.split('/') folder = '' for direc in reversed(hierarchy): if direc != '': folder = direc break cleaned_filepath = root_cleaned_filepath + str(folder) + '/' if not os.path.exists(cleaned_filepath): os.makedirs(cleaned_filepath) # testing first = False
def bot_errors(line): client = GrammarBotClient() res = client.check(line) return len(res.matches)