# coding: utf-8 import sys sys.path.append('..') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('../ch06/Rnnlm.pkl') start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
# coding: utf-8 import sys sys.path.append('/home/hiromasa/deep-learning-from-scratch-2') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('/home/hiromasa/deep-learning-from-scratch-2/ch06/Rnnlm.pkl') # start文字とskip文字の設定 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 文章生成 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
import sys sys.path.append('..') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('../ch06/BetterRnnlm.pkl') start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
# chap07/generate_text.py import sys sys.path.append('..') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('./Rnnlm.pkl') # start 문자와 skip 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
import sys sys.path.append('..') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('../Ch6/Rnnlm.pkl') # setting start word and skip word start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # generating text word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
import sys sys.path.append('..') from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params('..\\ch06\\Rnnlm.pkl') # start 문자와 skip 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt)
import sys sys.path.append("..") from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params("../ch06/Rnnlm.pkl") # start文字とskip文字の設定 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 文章生成 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace("<eos>", ".\n") print(txt)
#import rnnlm_train #import train_better_lstm import train_simple_lstmlm #import txt #from tangoatume import preprocess #file_name = "./txt/koi_z.txt" #file = open(file_name,encoding='utf-8') corpus, word_to_id, id_to_word = train_simple_lstmlm.corpus, train_simple_lstmlm.word_to_id, train_simple_lstmlm.id_to_word #print(word_to_id) vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen(vocab_size, train_simple_lstmlm.wordvec_size, train_simple_lstmlm.hidden_size) model.load_params("./pkl/SimpleLstmlm_epoch50.pkl") #start文字とskip文字の設定 start_word = 'コロナウイルス' start_id = word_to_id[start_word] skip_words = [''] skip_ids = [word_to_id[w] for w in skip_words] #文章生成 word_ids = model.generate(start_id, skip_ids) #print(word_ids) ''' map_result = map(str, word_ids) result = ' '.join(map_result) for i in result '''
import sys sys.path.append("..") from rnnlm_gen import RnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = RnnlmGen() model.load_params( '../ch06_게이트가_추가된_RNN/Rnnlm.pkl') # 미리 학습된 가중치 불러와 성능 높임 # 시작 (start) 문자와 건너뛸 (skip) 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace('<eos>', '.\n') print(txt)