def generate(corpus='./poetry/english_poems.txt', db='./graphs/small_graph.db', theme=''): """ Generates an English poem. :param corpus: poetry corpus :param db: graph database :param theme: theme of the poem :return: poem """ # The character used in the graph to separate words from tags. separator = '_' # The tags used in the graph. tag_list = ['NN', 'VB', 'JJ', 'NNP'] if db.endswith('lsa.db'): separator = '\\' tag_list = ['n', 'v', 'a'] return generate_poem('en', corpus, db, tag_list, theme, tag_separator=separator, newline='&')
def index(): if request.method == 'POST': sentences_count = 4 poem = ['' for i in range(sentences_count)] try: poem_1 = request.form['poem_1'] poem_2 = request.form['poem_2'] poem_3 = request.form['poem_3'] poem_4 = request.form['poem_4'] if poem_1: topic_id = random.randint(0, 10) #TODO given_poem = [poem_1, poem_2, poem_3, poem_4] sentence_len = len(given_poem[0]) while True: poem = generate_poem.generate_poem_with_poem4(given_poem,\ collocations_v, collocations_h, words, topic_words[topic_id], start_words) print(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break else: topic_id = random.randint(0, 10) sentence_len = 7 start_word = random.choice(start_words) print(start_word) while True: poem = generate_poem.generate_poem(topic_id, sentence_len, sentences_count, start_word,\ collocations_v, collocations_h, words, topic_words[topic_id], start_words) print(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break except Exception as e: print(e) return render_template('index.htm', poem_1=poem[0], poem_2=poem[1], poem_3=poem[2], poem_4=poem[3]) return render_template('index.htm', g = g)
def index(): if request.method == 'POST': sentences_count = 4 poem = ['' for i in range(sentences_count)] try: poem_1 = request.form['poem_1'] poem_2 = request.form['poem_2'] poem_3 = request.form['poem_3'] poem_4 = request.form['poem_4'] if poem_1: topic_id = random.randint(0, 10) #TODO given_poem = [poem_1, poem_2, poem_3, poem_4] sentence_len = len(given_poem[0]) while True: poem = generate_poem.generate_poem_with_poem4(given_poem,\ collocations_v, collocations_h, words, topic_words[topic_id], start_words) print(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break else: topic_id = random.randint(0, 10) sentence_len = 7 start_word = random.choice(start_words) print(start_word) while True: poem = generate_poem.generate_poem(topic_id, sentence_len, sentences_count, start_word,\ collocations_v, collocations_h, words, topic_words[topic_id], start_words) print(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break except Exception as e: print(e) return render_template('index.htm', poem_1=poem[0], poem_2=poem[1], poem_3=poem[2], poem_4=poem[3]) return render_template('index.htm', g=g)
def generate(corpus='./poesie/poesie_francaise.txt', db='./graphs/french_pruned.db', theme=''): """ Generates a French poem. :param corpus: poetry corpus :param db: graph database :param theme: theme of the poem :return: poem """ # The tags used in the graph. tag_list = ['NN', 'NNP', 'VB', 'JJ'] # The character that separates the word and the tag in the graph tag_separator = '_' # The character that replaces the newline character in text processing newline = ';' return generate_poem('fr', corpus, db, tag_list, theme, tag_separator, newline)
def generate(corpus='./runoutta/runoutta_aakkosellinen.txt', db='./graphs/verkko.db', theme=''): """ Generates a Finnish poem. :param corpus: poetry corpus :param db: graph database :param theme: theme of the poem :return: poem """ # The character used in the graph to separate words from tags. separator = '_' # The tags used in the graph. tag_list = ['POS=NOUN', 'POS=VERB', 'POS=ADJECTIVE', 'POS=ADVERB'] return generate_poem('fi', corpus, db, tag_list, theme, tag_separator=separator, newline='*')
def again_train(self, event): #再次训练新的诗歌 generate_poem.generate_poem() kl = [] ml = [] flag2 = 1 nm_dic = {self.vv1: 1} transitory = {} for item in [self.vv2, self.vv3]: if item != '': flag2 += 1 nm_dic[item] = 1 for key in nm_dic.keys(): nm1_similar = w2v_model.most_similar(positive=[key], topn=10) for j in range(0, len(nm1_similar)): transitory[nm1_similar[j][0]] = nm1_similar[j][1] nm1_dic = {**nm_dic, **transitory} poems = {} with open('./poem_output.txt', 'r') as f1: for line in f1.readlines(): line = zhconv.convert(line.strip('\n'), 'zh-cn') kl.append(line) while '' in kl: kl.remove('') i = 0 while (i >= 0) & ((2 * i + 1) < len(kl)): ml.append(kl[2 * i] + '\n' + kl[2 * i + 1]) i += 1 cl = ml[:] # 判断输入不同关键字个数的阈值 for i in ml: score = 0 for item in nm1_dic.keys(): if item in i: score += nm1_dic[item] if score == 0: cl.remove(i) elif (flag2 == 1) & (score >= 1.7): poems[i] = score elif (flag2 == 2) & (score >= 2.7): poems[i] = score elif (flag2 == 3) & (score >= 3.7): poems[i] = score # 输出要排序 with open('./similar_output.txt', 'w') as fileObject: for k in sorted(poems, key=poems.__getitem__, reverse=True): fileObject.write(k) fileObject.write('\n\n\n') # with open('./poem_output.txt', 'r') as f1: # for line in f1.readlines(): # line = line.strip('\n') # kl.append(line) # while '' in kl: # kl.remove('') # i = 0 # while (i >= 0) & ((2 * i + 1) < len(kl)): # ml.append(kl[2 * i] + '\n' + kl[2 * i + 1]) # i += 1 # cl = ml[:] # # for i in ml: # if (self.vv1) not in i: # cl.remove(i) # elif (self.vv2) not in i: # cl.remove(i) # elif (self.vv3) not in i: # cl.remove(i) # ml = cl # with open('./similar_output.txt', 'w') as fileObject: # for p in ml: # fileObject.write(p) # fileObject.write('\n\n\n') self.again_button.Bind(wx.EVT_BUTTON, MyDialog5(None).OnClick)
def find(self, event): v1 = self.nm1.GetValue() v2 = self.nm2.GetValue() v3 = self.nm3.GetValue() MyDialog3(self).vv1 = MyDialog3(self).vv1.replace( MyDialog3(self).vv1, v1) MyDialog3(self).vv2 = MyDialog3(self).vv2.replace( MyDialog3(self).vv2, v2) MyDialog3(self).vv3 = MyDialog3(self).vv3.replace( MyDialog3(self).vv3, v3) if (self.nm1.GetValue() == '') & (self.nm2.GetValue() == '') & (self.nm3.GetValue() == ''): wx.MessageBox('请输入关键字!', '提示') elif (len(self.nm1.GetValue()) > 1) | (len( self.nm2.GetValue()) > 1) | (len(self.nm3.GetValue()) > 1): wx.MessageBox('请输入一个关键字!', '提示') else: #处理关键字和输出的相似 generate_poem.generate_poem() kl = [] ml = [] flag = 1 # 读取用户所有的输入,构建关键字以及语义相似字字典 nm_dict = {self.nm1.GetValue(): 1} transitory = {} for item in [self.nm2.GetValue(), self.nm3.GetValue()]: if item != '': flag += 1 nm_dict[item] = 1 for key in nm_dict.keys(): nm1_similar = w2v_model.most_similar(positive=[key], topn=10) for j in range(0, len(nm1_similar)): transitory[nm1_similar[j][0]] = nm1_similar[j][1] nm1_dict = {**nm_dict, **transitory} #构建诗歌列表 poems = {} with open('./poem_output.txt', 'r') as f1: for line in f1.readlines(): line = zhconv.convert(line.strip('\n'), 'zh-cn') kl.append(line) while '' in kl: kl.remove('') i = 0 while (i >= 0) & ((2 * i + 1) < len(kl)): ml.append(kl[2 * i] + '\n' + kl[2 * i + 1]) i += 1 cl = ml[:] #判断输入不同关键字个数的阈值 for i in ml: score = 0 for item in nm1_dict.keys(): if item in i: score += nm1_dict[item] if score == 0: cl.remove(i) elif (flag == 1) & (score >= 1.7): poems[i] = score elif (flag == 2) & (score >= 2.7): poems[i] = score elif (flag == 3) & (score >= 3.7): poems[i] = score #输出要排序 with open('./similar_output.txt', 'w') as fileObject: for k in sorted(poems, key=poems.__getitem__, reverse=True): fileObject.write(k) fileObject.write('\n\n\n') wx.MessageBox('诗歌已生成!请再次点击确定输入', '提示') self.okButton.Bind(wx.EVT_BUTTON, MyDialog3(None).OnClick)
def index(): if request.method == 'POST': sentences_count = 4 poem = ['' for i in range(sentences_count)] poem_1 = request.form['poem_1'] poem_2 = request.form['poem_2'] poem_3 = request.form['poem_3'] poem_4 = request.form['poem_4'] if poem_1: # 用户给了诗句 ### # poem.append(poem_1) # duilian_file = "data/对联-唐诗-5言或7言.txt" # shi_file = "data/对联-唐诗.txt" # all_shi_file = "data/唐诗句子.txt" # coup = ChineseCouplet(duilian_file, shi_file, all_shi_file) # # coup.set_up(poem_1) # coup.train() # poem.append(coup.down) # print coup.down # topic_id = random.randint(0, 9) # sentence_len = len(poem_1) # start_word = random.choice(start_words) # up2 = generate_poem.generate_first_sentence(start_word, sentence_len, topic_words[topic_id], words,collocations_h) # up2 = ''.join(up2) # print up2 # poem.append(up2) # coup.set_up(up2) # coup.train() # poem.append(coup.down) # print coup.down ### topic_id = random.randint(0, 9) #TODO given_poem = [poem_1, poem_2, poem_3, poem_4] sentence_len = len(given_poem[0]) flag = False for tried_count in range(3): poem = generate_poem.generate_poem_with_poem4( given_poem, collocations_v, collocations_h, words, topic_words[topic_id]) print '\n'.join(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break if not flag: poem = [u'系统暂无法生成', u'请您稍候再尝试', u'很抱歉带来困扰', u'感谢您的使用啦'] else: topic_id = random.randint(0, 9) sentence_len = 7 start_word = random.choice(start_words) flag = False for tried_count in range(3): poem = generate_poem.generate_poem(sentence_len, sentences_count, start_word,\ collocations_v, collocations_h, words, topic_words[topic_id]) print(poem) flag = True for i in range(4): if len(poem[i]) != sentence_len: flag = False break if flag: break if not flag: poem = [u'系统暂无法生成', u'请您稍候再尝试', u'很抱歉带来困扰', u'感谢您的使用啦'] return render_template('index.htm', poem_1=poem[0], poem_2=poem[1], poem_3=poem[2], poem_4=poem[3]) return render_template('index.htm', g=g)
def get_poem(image_file,reludir): img_feature = extract_feature.extract_feature(image_file,reludir) return generate_poem.generate_poem(img_feature)