Ejemplo n.º 1
0
def output_text(request):
    if request.method == 'GET':
        name = request.GET.get('q')
        tool = grammar_check.LanguageTool('en-GB')
        texts = name
        matches = tool.check(texts)
        name = grammar_check.correct(texts, matches)
        return render(request, 'index2.html', {'result': name})
def grammer_chek(sen):
    matches = tool.check(sen)
    if matches:
        print(sen, "Syntax_error")
        print("grammatic suggestion")
        print(grammar_check.correct(sen, matches))
    else:
        print(sen, "No_syntax_error")
Ejemplo n.º 3
0
def interactive_decode():
    with tf.Session() as sess:
        # Create model and load parameters.
        model = create_model(sess, True)
        model.batch_size = 1  # We decode one sentence at a time.

        # Load vocabularies.
        en_vocab_path = os.path.join(FLAGS.data_dir, "vocab8000.from")
        fr_vocab_path = os.path.join(FLAGS.data_dir, "vocab8000.to")
        en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
        _, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)

        # Decode from standard input.
        sys.stdout.write("> ")
        sys.stdout.flush()
        sentence = sys.stdin.readline()
        while sentence:
            # Get token-ids for the input sentence.
            token_ids = data_utils.sentence_to_token_ids(
                tf.compat.as_bytes(sentence), en_vocab)
            # Which bucket does it belong to?
            bucket_id = len(_buckets) - 1
            for i, bucket in enumerate(_buckets):
                if bucket[0] >= len(token_ids):
                    bucket_id = i
                    break
        #   else:
        #     logging.warning("Sentence truncated: %s", sentence)

        # Get a 1-element batch to feed the sentence to the model.
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                {bucket_id: [(token_ids, [])]}, bucket_id)
            # Get output logits for the sentence.
            _, _, output_logits = model.step(sess, encoder_inputs,
                                             decoder_inputs, target_weights,
                                             bucket_id, True)
            # This is a greedy decoder - outputs are just argmaxes of output_logits.
            outputs = [
                int(np.argmax(logit, axis=1)) for logit in output_logits
            ]

            # If there is an EOS symbol in outputs, cut them at that point.
            if data_utils.EOS_ID in outputs:
                outputs[:] = (value for value in outputs
                              if value != data_utils.EOS_ID)

            # Print out French sentence corresponding to outputs.
            text = " ".join(
                [tf.compat.as_str(rev_fr_vocab[output]) for output in outputs])
            matches = tool.check(text)
            if len(matches):
                print("original: " + text)
                text = grammar_check.correct(text, matches)
                print("corrected: " + text)
            print(text)
            print("> ", end="")
            sys.stdout.flush()
            sentence = sys.stdin.readline()
Ejemplo n.º 4
0
def check_sentence(cont, sentence):
    '''
    '''
    try:
            # Automatically apply suggestions to the text.
            matches = tool.check(sentence)
            suggestions = grammar_check.correct(sentence, matches)
            print cont, sentence

            if sentence != suggestions:
                logging.info('Sentence: {}'.format(sentence))
                logging.info('Suggestion: {} \n'.format(suggestions))
    except Exception as err:
        print err
def grammarCheckFile(filePath, checkpt):
	for filename in glob.glob(os.path.join(filePath, 'dialogue_a_' + checkpt)):
		newFileName = filename.replace(filePath +"/", "corrected_")
		newFile = open(filePath +"/" + newFileName, 'w')

		oldFile = open(filename, 'r')
		dialogues = oldFile.readlines()
		for i,line in enumerate(dialogues):
			if i % 2 == 0:
				newFile.write(line)
			else:
				line = line.encode('ascii', 'ignore')
				matches = tool.check(line)
				if len(matches):
					line = grammar_check.correct(line, matches)
				newFile.write(line)
		newFile.close()
Ejemplo n.º 6
0
def decode(user_input, model, en_vocab, rev_fr_vocab, sess):
    # Decode from standard input.
    sys.stdout.write("> ")
    sys.stdout.flush()
    sentence = user_input
    # Get token-ids for the input sentence.
    token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence),
                                                 en_vocab)
    # Which bucket does it belong to?
    bucket_id = len(_buckets) - 1
    for i, bucket in enumerate(_buckets):
        if bucket[0] >= len(token_ids):
            bucket_id = i
            break
    else:
        logging.warning("Sentence truncated: %s", sentence)

    # Get a 1-element batch to feed the sentence to the model.
    encoder_inputs, decoder_inputs, target_weights = model.get_batch(
        {bucket_id: [(token_ids, [])]}, bucket_id)
    # Get output logits for the sentence.
    _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
                                     target_weights, bucket_id, True)

    # This is a greedy decoder - outputs are just argmaxes of output_logits.
    outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]

    # If there is an EOS symbol in outputs, cut them at that point.
    if data_utils.EOS_ID in outputs:
        outputs = outputs[:outputs.index(data_utils.EOS_ID)]

    # Print out corresponding outputs.
    output = " ".join(
        [tf.compat.as_str(rev_fr_vocab[output]) for output in outputs])
    matches = tool.check(text)
    if len(matches):
        print("original: " + output)
        output = grammar_check.correct(output, matches)
        print("corrected: " + output)
    print(output)
    return output
Ejemplo n.º 7
0
def main():
    plotto = Plotto()
    plotto.generate()
    
    tool = grammar_check.LanguageTool('en-US')

    masterplot = plotto.masterplot.getPlot()
    plot_checked = grammar_check.correct(masterplot, tool.check(masterplot))
    
    print capitalize(plot_checked.lower()) + '\n'

    conflict = plotto.conflicts.getConflict()
    characters = plotto.characters.getCharacters()

    print conflict + '\n'
    print 'Characters: '
    
    for char in characters:
    	print char.name + ", " + char.role

    print '\n'
Ejemplo n.º 8
0
def handlePost(request):
	content = request.get_json(silent=True)
	print content
	if content.has_key('inputText'):
		# Do basic grammar correction.
		input_text = replaceContractions(content['inputText'])
		matches = tool.check(input_text)
		input_text = grammar_check.correct(input_text, matches)

		passive_sentences = classifySentences(input_text)

		alchemy_response = alchemyapi.keywords('text', input_text, {'sentiment': 1})
		keywords = alchemy_response['keywords']

		transformKeywords(keywords)
		return memeSwitch(input_text)
		# return jsonify(
		#	text=input_text,
		#	passive=passive_sentences
		# )
	else:
		return 'ERROR: Couldn\'t find a text key.'
Ejemplo n.º 9
0
def handlePost(request):
    content = request.get_json(silent=True)
    print content
    if content.has_key('inputText'):
        # Do basic grammar correction.
        input_text = replaceContractions(content['inputText'])
        matches = tool.check(input_text)
        input_text = grammar_check.correct(input_text, matches)

        passive_sentences = classifySentences(input_text)

        alchemy_response = alchemyapi.keywords('text', input_text,
                                               {'sentiment': 1})
        keywords = alchemy_response['keywords']

        transformKeywords(keywords)
        return memeSwitch(input_text)
        # return jsonify(
        #	text=input_text,
        #	passive=passive_sentences
        # )
    else:
        return 'ERROR: Couldn\'t find a text key.'
Ejemplo n.º 10
0
def main():
    plotto = Plotto()
    while (plotto.isPlotting()):
        plotto.menu()
        plotto.display()
    plotto.generate()

    tool = grammar_check.LanguageTool('en-US')

    masterplot = plotto.masterplot.getPlot()
    plot_checked = grammar_check.correct(masterplot, tool.check(masterplot))

    print capitalize(plot_checked.lower()) + '\n'

    conflict = plotto.conflicts.getConflict()
    characters = plotto.characters.getCharacters()

    print conflict + '\n'
    print 'Characters: '

    for char in characters:
        print char.name + ", " + char.role

    print '\n'
Ejemplo n.º 11
0
			else:
				self.cache[key] = [w3]
				
	def generate_markov_text(self, size=10):
		seed = random.randint(0, self.word_size-3)
		seed_word, next_word = self.words[seed], self.words[seed+1]
		w1, w2 = seed_word, next_word
		gen_words = []
		for i in xrange(size):
			gen_words.append(w1)
			w1, w2 = w2, random.choice(self.cache[(w1, w2)])
		gen_words.append(w2)
		return ' '.join(gen_words)
			
fname = open('movielines20000_raw.txt')
markov = Markov(fname)
tool = grammar_check.LanguageTool('en-GB')

start_time = time.time()
for i in range(10):
	text = markov.generate_markov_text()
	print time.time()-start_time
	matches = tool.check(text)
	print time.time()-start_time
	print text
	print time.time()-start_time
	print grammar_check.correct(text, matches)
	print time.time()-start_time


Ejemplo n.º 12
0
    def generate_response(self, sentence, object_name, detail_name, question,
                          p_score, f_score, u_score, s_score, detail_array):
        response = ""
        object_score = 0
        detail_score = 0
        statement_score = 0
        question_score = 0
        self.question = question
        pronoun_pool = []
        noun_pool = []
        adjectives_pool = []
        connector_pool = []
        verb_pool = []
        past_verb_pool = []
        simple_verb_pool = []
        VRB_pool = []
        rb_pool = []
        MD_pool = []
        WRB_pool = []
        WP_pool = []
        #multi-dimensional array
        word_bank = []
        if self.find_object_node(object_name):
            print("Previous object found!")
            object_score += 1

        for entry in self.justTAG(sentence):
            pos = entry[0]
            tag = entry[1]
            if tag == 'NN':  #NOUNS
                noun_pool.append(pos)
                continue
            elif tag == 'NNS':  #NOUNS PLURAL
                noun_pool.append(pos)
                continue
            elif tag == 'NNP':  #unidentified pronoun
                pronoun_pool.append(pos)
                continue
            elif tag == 'JJ':  #ADJECTIVES
                adjectives_pool.append(pos)
                continue
            elif tag == 'VBP':  #VERB
                verb_pool.append(pos)
                continue
            elif tag == 'VRB':
                if pos == "do" and question:
                    WP_pool.append(pos)
                else:
                    VRB_pool.append(pos)
                continue
            elif tag == 'VB':
                simple_verb_pool.append(pos)
                continue
            elif tag == 'VBD':
                past_verb_pool.append(pos)
                continue
            elif tag == 'VBG':
                verb_pool.append(pos)
                continue
            elif tag == 'VBZ':
                verb_pool.append(pos)
                continue
            elif tag == 'RB':  #Example : do you STILL like me ?
                rb_pool.append(pos)
                continue
            elif tag == 'MD':
                MD_pool.append(pos)
                continue
            elif tag == 'WRB':
                WRB_pool.append(pos)
                continue
            elif tag == 'WP':
                WP_pool.append(pos)
                continue
            else:
                continue
        print("PRONOUNS " + str(pronoun_pool))
        print("NOUNS " + str(noun_pool))
        print("ADJECTIVES " + str(adjectives_pool))
        print("VERBS " + str(verb_pool))
        print("SIMPLE VERBS" + str(simple_verb_pool))
        print("PAST VERBS " + str(past_verb_pool))
        print("RB " + str(rb_pool))
        print("VRB " + str(VRB_pool))
        print("WRB " + str(WRB_pool))
        print("MD" + str(MD_pool))
        print("WP " + str(WP_pool))
        existing_objects = dict()
        detail_count = 0
        line_num = 0
        detail_array = []
        noun_scores = dict()
        self.global_noun_pool = noun_pool
        for word in pronoun_pool:
            if self.find_object_node(word):
                total_score = 2
                detail_count, line_num, detail_array = self.check_object_details(
                    self.get_object_line(word))
                existing_objects[word] = detail_count
                if word == object_name:
                    total_score = total_score * 3
                total_score = total_score * detail_count
                noun_scores[word] = total_score
            else:
                continue
        for word in noun_pool:
            print('word:' + word)
            if self.find_object_node(word):
                total_score = 2
                detail_count, line_num, detail_array = self.check_object_details(
                    self.get_object_line(word))
                existing_objects[word] = detail_count
                if word == object_name:
                    total_score = total_score * 3
                total_score = total_score * detail_count
                noun_scores[word] = total_score
        print("Existing objects" + str(existing_objects))
        local_memory = self.get_local_memory(noun_pool)
        server_memory = self.get_server_memory()
        total_memory = self.get_total_memory(local_memory, server_memory)
        print('LOCAL MEMORY = ' + str(local_memory))
        print('SERVER MEMORY = ' + str(server_memory))
        print('TOTAL MEMORY = ' + str(total_memory))
        generated_word_bank = self.create_word_bank(total_memory)
        print('WORD BANK : ' + str(generated_word_bank))
        if question:
            #get all segments from wrb,md, wp
            banana_split = sentence.split(" ")
            i = len(banana_split)
            banana_max = 0
            target_question = ""
            for word in banana_split:
                if word in WRB_pool or word in MD_pool or word in WP_pool:
                    banana_max = i
                    target_question = word
                    break
            print("TARGET QUESTION : " + target_question)
            local_memory = self.get_local_memory(noun_pool)
            print('LOCAL MEMORY = ' + str(local_memory))
            return response
        elif not question:
            pronouns = []
            nouns = []
            verbs = []
            adjectives = []
            wrb = []
            final_word_bank = self.apply_weights(generated_word_bank, question,
                                                 sentence, object_name,
                                                 detail_name)
            #segment everything into question words, nouns and verbs
            question_starters = [
                'Did', 'How', 'When', 'Where', 'How', 'Can', 'Is', 'What',
                'Should', 'Could', 'Would'
            ]
            wp = ['Who', 'What']
            wdt = ['Which']
            wp_doll = ['Whose']
            wrb = ['Where', 'When', 'How']
            md = ['Can', 'Could', 'Will', 'Should', 'Would']
            q_connect = ['Was', 'Did', 'Is']
            q_connect_2 = ['An', 'A', 'It']

            q_past = []
            q_present = [wdt]
            q_future = [md]
            q_pro = [wp, wp_doll]
            q_where = [wrb]
            self_perspective = False
            reverse_perspective = False
            sentence_bank = []

            if 'I' in sentence.split(" "):
                self_perspective = True
                pronouns.append('I')
            if len(final_word_bank[0]) != 0:
                pronouns = final_word_bank[0]
                sentence_bank.append(pronouns)
            if len(final_word_bank[1]) != 0:
                nouns = final_word_bank[1]
                sentence_bank.append(nouns)
            if len(final_word_bank[2]) != 0:
                verbs = final_word_bank[2]
                sentence_bank.append(verbs)
            if len(final_word_bank[3]) != 0:
                adjectives = final_word_bank[3]
                sentence_bank.append(adjectives)
            if len(final_word_bank[6]) != 0:
                for verb in final_word_bank[6]:
                    verbs.append(WordNetLemmatizer().lemmatize(verb, 'v'))
                sentence_bank.append(verbs)
            if len(final_word_bank[7]) != 0:
                for verb in final_word_bank[7]:
                    verbs.append(WordNetLemmatizer().lemmatize(verb, 'v'))
                sentence_bank.append(verb)
            if len(final_word_bank[9]) != 0:
                wrb = final_word_bank[9]
                sentence_bank.append(wrb)
            print(pronouns)
            print(verbs)
            print(nouns)
            print(adjectives)
            print(wrb)

            sentence_RAW = str(pronouns[0] + " " + verbs[0] + " " + nouns[0])
            matches = self.tool.check(sentence_RAW)
            print(matches)
            print(language_check.correct(sentence_RAW, matches))
            sentence_tier2 = []
            for i, question_starter in enumerate(question_starters):
                sentence_tier2.append(question_starters[i] + " " +
                                      sentence_RAW)
            print(sentence_tier2)
            tool = grammar_check.LanguageTool('en-GB')
            matches = tool.check(str(sentence_tier2[0]))
            print(matches)
            response = grammar_check.correct(str(sentence_tier2[0]), matches)
            print(self.get_sentence_tense(self.justTAG(sentence)))
            #assign gravity to each set
            #combine set gravities into composite gravities for noun verb pairs
            #combine set gravities into composite gravities for question noun verb triplets
            #filter out final gravity sequence
            return response
Ejemplo n.º 13
0
def do_something(val):
    tool = grammar_check.LanguageTool('en-GB')
	texts = val
	matches = tool.check(texts)
	return grammar_check.correct(texts,matches)
Ejemplo n.º 14
0
    def process_one_log(self, input_log, repo_info_topics):
        input_log = PreprocessManager.remove_non_ascii(input_log)
        # TODO : Do we need repo info?
        #repo_info_topics = PreprocessManager.remove_non_ascii(repo_info_topics)
        # Find the length
        # TODO : All the scores which are dependent on the length are not unbiased if not normalized! Check that
        length = len(PreprocessManager.get_raw_tokenized_text(input_log))

        # Find structural integrity.
        self.grammar_tool.enable_spellchecking()
        problematic_matches = self.grammar_tool.check(input_log)
        corrected_text = gc.correct(input_log, problematic_matches)
        degree_of_match = fuzz.ratio(input_log, corrected_text)
        structural_integrity_score = degree_of_match * (length - len(problematic_matches))

        # Check if topic is relevant
        # This is still in testing phase and not sure if it has a good impact on the final results.
        # Might be totally useless at times.
        sframe_data_for_topics = gl.SArray([PreprocessManager.get_word_counts(input_log)])
        # Add Associations here TODO: Make it proper
        associations = gl.SFrame({'word': ['fix', 'issue', 'implement', 'modify', 'changed', 'bug', 'error'],
                               'topic': [0, 0, 0, 0, 0, 0, 0]})

        topic_model = gl.topic_model.create(sframe_data_for_topics, associations=associations)

        # TODO : Add here the match with the description. Is that useful? Maybe Future work?

        #pred = topic_model.predict(sframe_data_for_topics, output_type='probability')
        topics = topic_model.get_topics()
        # The final score is the sum of all the topic 0 scores! As they were used in associations. Gives us relevance of being a commit message!
        topic_relevance_score = 0
        for i in xrange(0, len(topics)):
            curr = topics[i]
            topic_id = curr['topic']
            score_val = curr['score']
            if topic_id == 0:
                topic_relevance_score += score_val

        topic_relevance_score *= 100

        #print topics, topic_relevance_score



        # Check how much positivity
        log_dict = dict()
        log_dict['text'] = input_log
        positivity = self.senti_checker.predict_row(log_dict)
        positivity_score = 100 * positivity

        #print positivity_score




        # Spelling Goodness
        self.spell_master.set_text(input_log)
        error_words = list()
        for err in self.spell_master:
            error_words.append(err.word)
        spelling_integrity_score = length - len(error_words)


        #return all
        return length, structural_integrity_score, topic_relevance_score, positivity_score, spelling_integrity_score
Ejemplo n.º 15
0
import os
import pytesseract
from PIL import Image
from autocorrect import spell
import grammar_check

text = pytesseract.image_to_string(Image.open("Bruce-Lee-Absorb.png"))

tool = grammar_check.LanguageTool('en-GB')
matches = tool.check(text)
ctext = grammar_check.correct(text, matches)
print ctext
Ejemplo n.º 16
0
def webook():

    # endpoint for processing incoming messaging events
    #return "Hello world", 200
    data = request.get_json()
    log(data)  # you may not want to log every incoming message in production, but it's good for testing

    if data["object"] == "page":
        for entry in data["entry"]:
            for messaging_event in entry["messaging"]:
                if messaging_event.get("message"):  # someone sent us a message
                    sender_id = messaging_event["sender"]["id"]        # the facebook ID of the person sending you the message
                    if sender_id == u'1774667882802558':
                        log(sender_id)
                        return "ok", 200
                    recipient_id = messaging_event["recipient"]["id"]  # the recipient's ID, which should be your page's facebook ID
                    send_process(sender_id)
                    send_settings()
                    message_text = messaging_event["message"].get("text")
                    option = messaging_event["message"].get("quick_reply")
                    log(option)
                    log(grammarUserID)
                    if grammarUserID.get(sender_id) == 1:
                        grammarUserID[sender_id] = 0
                        log(grammarUserID)
                        tool = grammar_check.LanguageTool('en-US') 
                        tmp = tool.check(message_text)
                        outp = ''
                        log(tmp)
                        #for mis in tmp : 
                        #    outp += mis + '\n'
                        outp = grammar_check.correct(message_text, tmp)
                        send_message(sender_id, "Correct: " + outp)
                        return "ok", 200
                    if option is not None:
                        opt = messaging_event["message"]["quick_reply"]["payload"]
                        log(opt)
                        option_catch(opt,sender_id)
                    else:
                        if option is None:
                            option = messaging_event.get("messaging")
                        if option is not None:
                            opt = option[0]["postback"]["payload"]
                            option_catch(opt,sender_id)
                            return "ok", 200
                        if message_text is not None:
                            if message_text == 'Setting':
                                quick_replies = [["Volcabulary", "Vocab"], ["Grammar-Check","Gramma"],["Category", "Cate"]]
                                send_quickReplies(sender_id, "Which option do you choose?", quick_replies)
                            else:
                                send_Define(sender_id, message_text)

                    #else:
                        #messageType = messaging_event["message"].get("text")
                        #if messageType == 'audio':
                            #pass
                        #change to text

                if messaging_event.get("delivery"):  # delivery confirmation
                    pass

                if messaging_event.get("optin"):  # optin confirmation
                    pass

                if messaging_event.get("postback"):  # user clicked/tapped "postback" button in earlier message
                    pass
    return "ok", 200
Ejemplo n.º 17
0
def webook():

    # endpoint for processing incoming messaging events
    #return "Hello world", 200
    data = request.get_json()
    log(
        data
    )  # you may not want to log every incoming message in production, but it's good for testing

    if data["object"] == "page":
        for entry in data["entry"]:
            for messaging_event in entry["messaging"]:
                if messaging_event.get("message"):  # someone sent us a message
                    sender_id = messaging_event["sender"][
                        "id"]  # the facebook ID of the person sending you the message
                    if sender_id == u'1774667882802558':
                        log(sender_id)
                        return "ok", 200
                    recipient_id = messaging_event["recipient"][
                        "id"]  # the recipient's ID, which should be your page's facebook ID
                    send_process(sender_id)
                    send_settings()
                    message_text = messaging_event["message"].get("text")
                    option = messaging_event["message"].get("quick_reply")
                    log(option)
                    log(grammarUserID)
                    if grammarUserID.get(sender_id) == 1:
                        grammarUserID[sender_id] = 0
                        log(grammarUserID)
                        tool = grammar_check.LanguageTool('en-US')
                        tmp = tool.check(message_text)
                        outp = ''
                        log(tmp)
                        #for mis in tmp :
                        #    outp += mis + '\n'
                        outp = grammar_check.correct(message_text, tmp)
                        send_message(sender_id, "Correct: " + outp)
                        return "ok", 200
                    if option is not None:
                        opt = messaging_event["message"]["quick_reply"][
                            "payload"]
                        log(opt)
                        option_catch(opt, sender_id)
                    else:
                        if option is None:
                            option = messaging_event.get("messaging")
                        if option is not None:
                            opt = option[0]["postback"]["payload"]
                            option_catch(opt, sender_id)
                            return "ok", 200
                        if message_text is not None:
                            if message_text == 'Setting':
                                quick_replies = [["Volcabulary", "Vocab"],
                                                 ["Grammar-Check", "Gramma"],
                                                 ["Category", "Cate"]]
                                send_quickReplies(
                                    sender_id, "Which option do you choose?",
                                    quick_replies)
                            else:
                                send_Define(sender_id, message_text)

                    #else:
                    #messageType = messaging_event["message"].get("text")
                    #if messageType == 'audio':
                    #pass
                    #change to text

                if messaging_event.get("delivery"):  # delivery confirmation
                    pass

                if messaging_event.get("optin"):  # optin confirmation
                    pass

                if messaging_event.get(
                        "postback"
                ):  # user clicked/tapped "postback" button in earlier message
                    pass
    return "ok", 200
                        requirement += " " + lemma
                    elif part.tag == "PREP":
                        if 'value' in part.attrib:
                            requirement += " " + part.attrib['value']
                    elif 'value' in part.attrib:
                        if part.attrib['value'] in extended_parts:
                            thematicrolepart = random.choice(
                                extended_parts[part.attrib['value']])
                            requirement += " " + thematicrolepart
                        else:
                            falserequirement = True
                            break
                    counter += 1

                if falserequirement:
                    break

                if len(requirement) > 7:
                    matches = tool.check(requirement)
                    corrected_req = grammar_check.correct(requirement, matches)
                    new_reqs_file.write(corrected_req)
                    new_reqs_file.write("\n")
                    print corrected_req
                    i += 1
    new_reqs_file.close()
else:
    print "Files with identified parts of requirements do not exist. Please make sure to run AnalyzeRequirements.py first"
'''
Constructing requirements ends here
'''
Ejemplo n.º 19
0
def format_question(question):
    matches = tool.check(unicode(question))
    return grammar_check.correct(unicode(question), matches)
import grammar_check
tool = grammar_check.LanguageTool('en-US')
while True:
    text = raw_input("Enter  ")
    matches = tool.check(text)
    print matches
    print grammar_check.correct(text, matches)

Ejemplo n.º 21
0
 def grammar(self, essay):
     with open(essay, 'r') as text:
         self.essay1 = text.read()
     tool = grammar_check.LanguageTool('en-GB')
     matches = tool.check(self.essay1)
     print(grammar_check.correct(self.essay1, matches))