model = BetterRnnlmGen() model.load_params('../ch6/BetterRnnlm.pkl') start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print(txt) model.reset_state() start_words = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_words.split(' ')] for x in start_ids[:-1]: x = np.array(x).reshape(1, 1) model.predict(x) word_ids = model.generate(start_ids[-1], skip_ids) word_ids = start_ids[:-1] + word_ids txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print('-' * 50) print(txt)
model = BetterRnnlmGen() model.load_params('../ch06_게이트가_추가된_RNN/BetterRnnlm.pkl') # 미리 학습된 가중치 불러와 성능 높임 # 시작 (start) 문자와 건너뛸 (skip) 문자 설정 start_word = 'you' start_id = word_to_id[start_word] skip_words = ['N', '<unk>', '$'] skip_ids = [word_to_id[w] for w in skip_words] # 문장 생성 word_ids = model.generate(start_id, skip_ids) txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace('<eos>', '.\n') print(txt) model.reset_state() # not continue the previous sequences anymore, now you will start feeding new sequences. start_words = 'the meaning of life is' start_ids = [word_to_id[w] for w in start_words.split(' ')] for x in start_ids[:-1]: x = np.array(x).reshape(1, 1) model.predict(x) # shape = (1, 1, 10000) word_ids = model.generate(start_ids[-1], skip_ids) word_ids = start_ids[:-1] + word_ids txt = ' '.join([id_to_word[i] for i in word_ids]) txt = txt.replace(' <eos>', '.\n') print('-' * 50) print(txt)