Example #1
0
 def main(self):
     start = time.time()
     self.parse_and_count()
     save_counts_wordcloud.save_counts_wordcloud(self.unigram_count, self.bigram_count, self.trigram_count)
     print("parse, count, wordcloud time:", time.time() - start)
     del self.unigram_count
     del self.bigram_count
     del self.trigram_count
     start = time.time()
     generate_sentence.generate_sentence(self.data)
     print("Generate sentence time:", time.time() - start)
Example #2
0
def findMain(text, chat):
    word_pos = nltk.pos_tag(nltk.word_tokenize(text))
    mainWord = ''
    mainPOS = ''
    POS_Order = []

    for i in range(0, len(word_pos)):
        if 'NN' in word_pos[i][1]:
            mainWord = word_pos[i][0]
            mainPOS = word_pos[i][1]
            #send_message('Main word is ' + mainWord, chat)
            #send_message('Main POS is ' + mainPOS, chat)
            break

    for i in range(0, len(word_pos) - 1):
        POS_Order.append((word_pos[i][1], word_pos[i + 1][1]))

    valu = find_pos.best_pos(POS_Order)
    PoS_seq = transitions.transition(valu)
    print(PoS_seq)
    sentence = generate_sentence(PoS_seq)
    print("sentence = generate_sentence(PoS_seq)")
    print("F**K YEA")
    #send_message(str(POS_Order), chat)
    #send_message(valu, chat)
    send_message(sentence, chat)

    return mainWord, mainPOS, POS_Order
Example #3
0
def just_get_it_working():

    if request.method == 'OPTIONS':
        resp = app.make_default_options_response()
        resp.headers['Access-Control-Allow-Origin'] = '*'
        resp.headers['Access-Control-Allow-Headers'] = 'Content-Type'
        return resp


    results = []
    for object in request.json:
        results.append({"text": generate_sentence(object)})
    
    #resp = jsonify(results)
    resp = make_response(dumps(results))
    resp.headers['Access-Control-Allow-Origin'] = '*'
    resp.headers['Access-Control-Allow-Headers'] = 'Content-Type'
    
    return resp
Example #4
0
def main(argv=sys.argv[1:]):
    parser = argparse.ArgumentParser()
    parser.add_argument('--target',
                        type=argparse.FileType('r'),
                        default=sys.stdin)
    parser.add_argument('--ngwords', default='NG_WORDS.txt')
    parser.add_argument('--endwords', default='END_WORDS.txt')
    parser.add_argument('--conf',
                        default='~/.config/mastodon/mstdn.jp.sub/user.txt')
    parser.add_argument('--interval', type=int, default=60)

    opts = parser.parse_args(argv)

    with open(os.path.expanduser(opts.conf)) as fp:
        token = fp.read().strip()

    load_ng_words(opts.ngwords)
    load_end_words(opts.endwords)

    model = load_model(opts.target)

    while not time.sleep(opts.interval):
        try:
            sentence = generate_sentence(model)
            if sentence:
                res = requests.post('https://mstdn.jp/api/v1/statuses/',
                                    headers={
                                        'Content-Type': 'application/json',
                                        'Authorization': f'Bearer {token}',
                                    },
                                    json={
                                        'status': sentence,
                                        'in_reply_to_id': None,
                                        'media_ids': None,
                                        'sensitive': None,
                                        'spoiler_text': None,
                                        'visibility': 'unlisted',
                                    })
                print(res, sentence)
        except SentenceGenerationError:
            pass
Example #5
0
# Основная программа
from loadpairs import loadpairs
from get_q_templs import get_q_templs
from gram_filter import gram_filter
from ngram import train
from json import dumps
from pymystem3 import Mystem
from generate_sentence import generate_sentence

ma = Mystem()

pairs = loadpairs(filename = 'data.json') 
q_templs = get_q_templs( 
	znanie = pairs, 
	kolvo = 5, 
	form = "Правда ли что %s %s %s?")
model = train(corpus = 'voprosi')
questions = gram_filter(q_templs,model) 
#print(questions)
kutokens = []
for question in questions:
	kutokens = []
	for a in ma.analyze(question):
		try:
			_ = a['analysis'][0]['lex']
			kutokens += [_]
		except (KeyError, IndexError):
			pass
	#print(kutokens)
	print(generate_sentence(model,kutokens))
Example #6
0
def index():
    return generate_sentence.generate_sentence(model, retries=100)
Example #7
0
def render_page():
    sentence = generate_sentence()
    return render_template('index.html', sentence=sentence)