# coding: utf-8 import sys sys.path.append('..') from common.np import * from rnnlm_gen import BetterRnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params('../ch6/BetterRnnlm.pkl') start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt) model.reset_state() start_words = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_words.split(' ')]
import sys sys.path.append("..") from rnnlm_gen import BetterRnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params('../ch06_게이트가_추가된_RNN/BetterRnnlm.pkl') # 미리 학습된 가중치 불러와 성능 높임 # 시작 (start) 문자와 건너뛸 (skip) 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace('<eos>', '.\n') print(txt) model.reset_state() # not continue the previous sequences anymore, now you will start feeding new sequences. start_words = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_words.split(' ')] for x in start_ids[:-1]: x = np.array(x).reshape(1, 1)
# coding: utf-8 import sys sys.path.append( '/Users/ahjeong_park/Study/WegraLee/deep-learning-from-scratch-2') from common.np import * from rnnlm_gen import BetterRnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params( '/Users/ahjeong_park/Study/Deep-Learning-from-Scratch-2/ch06/BetterRnnlm.pkl' ) # start 문자와 skip 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt) model.reset_state()
sys.path.append('..') from common import config config.GPU = True from common.np import * from rnnlm_gen import BetterRnnlmGen #import txt import train_better_lstm from tangoatume import preprocess corpus, word_to_id, id_to_word = train_better_lstm.corpus, train_better_lstm.word_to_id, train_better_lstm.id_to_word print(word_to_id) vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params('BetterLstmlm.pkl') # start文字とskip文字の設定 start_word = 'コロナウイルス' start_id = word_to_id[start_word] skip_words = [''] skip_ids = [word_to_id[w] for w in skip_words] # 文章生成 word_ids = model.generate(start_id, skip_ids) text = ''.join([id_to_word[i] for i in word_ids]) text = text.replace(' <eos>', '.\n') print(text) model.reset_state()
# coding: utf-8 import sys sys.path.append('/home/hiromasa/deep-learning-from-scratch-2') from common.np import * from rnnlm_gen import BetterRnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params( '/home/hiromasa/deep-learning-from-scratch-2/ch06/BetterRnnlm.pkl') # start文字とskip文字の設定 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 文章生成 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt) model.reset_state() start_words = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_words.split(' ')]
import sys sys.path.append('..') from common.np import * from rnnlm_gen import BetterRnnlmGen from dataset import ptb corpus, word_to_id, id_to_word = ptb.load_data('train') vocab_size = len(word_to_id) corpus_size = len(corpus) model = BetterRnnlmGen() model.load_params('..\\ch06\\BetterRnnlm.pkl') # start 문자와 skip 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt) model.reset_state() start_word = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_word.split(' ')]