def main() -> None: (x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt') x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] char_to_id, id_to_char = sequence.get_vocab() vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 128 batch_size = 128 max_epoch = 25 max_grad = 5.0 model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) acc_list = [] for epoch in range(1, max_epoch+1): trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad) correct_num = 0 for i in range(len(x_test)): question, correct = x_test[[i]], t_test[[i]] verbose = i < 10 correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose) acc = float(correct_num) / len(x_test) acc_list.append(acc) print(f'val acc {acc*100}%') print('DONE')
def main(): (x_train, t_train), (x_test, t_test) = load_data('addition.txt', seed=1984) char_to_id, id_to_char = get_vocab() print(x_train.shape, t_train.shape) print(x_test.shape, t_test.shape) print(''.join([id_to_char[c] for c in x_train[0]])) print(''.join([id_to_char[c] for c in t_train[0]]))
def main(): # データセットの読み込み (x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt') char_to_id, id_to_char = sequence.get_vocab() # 入力列を逆順にするとSeq2Se2の精度が上がるらしいが。。。クソ理論 is_reverse = True if is_reverse: x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] # ハイパーパラメータの設定 vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 128 batch_size = 128 max_epoch = 25 max_grad = 5.0 # モデル/オプティマイザ/トレーナーの生成 # model = Seq2seq(vocab_size, wordvec_size, hidden_size) model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) acc_list = [] for epoch in range(max_epoch): trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad) correct_num = 0 for i in range(len(x_test)): question, correct = x_test[[i]], t_test[[i]] verbose = i < 10 correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose) acc = float(correct_num) / len(x_test) acc_list.append(acc) print(f'val acc {acc * 100}')
def main(): # データの読み込み (x_train, t_train), (x_test, t_test) = sequence.load_data('date.txt') char_to_id, id_to_char = sequence.get_vocab() # 入力文を反転 x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] # ハイパーパラメータの設定 vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 256 batch_size = 128 max_epoch = 10 max_grad = 5.0 model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) acc_list = [] for epoch in range(max_epoch): trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad) correct_num = 0 for i in range(len(x_test)): question, correct = x_test[[i]], t_test[[i]] verbose = i < 10 correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose, is_reverse=True) acc = float(correct_num) / len(x_test) acc_list.append(acc) print('val acc %.3f%%' % (acc * 100))
def main() -> None: (x_train, t_train), (x_test, t_test) = sequence.load_data('data.txt') char_to_id, id_to_char = sequence.get_vocab() x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 256 batch_size = 128 max_epoch = 10 max_grad = 5.0 model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size) optimizer = Adam() trainer = Trainer(model, optimizer) acc_list = [] for epoch in range(max_epoch): trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad) correct_num = 0 for i in range(len(x_test)): question, correct = x_test[[i]], t_test[[i]] verbose = i < 10 correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose, is_reverse=True) acc = float(correct_num) / len(x_test) acc_list.append(acc) print(f"val acc {acc*100}%") model.save_params() print("DONE")
class AttentionSeq2seq(Seq2seq): def __init__(self, vocab_size, wordvec_size, hidden_size): args = vocab_size, wordvec_size, hidden_size self.encoder = AttentionEncoder(*args) self.decoder = AttentionDecoder(*args) self.softmax = TimeSoftmaxWithLoss() self.params = self.encoder.params + self.decoder.params self.grads = self.encoder.grads + self.decoder.grads if __name__ == "__main__": (x_train, t_train), (x_test, t_test) = sequence.load_data('date.txt') char_to_id, id_to_char = sequence.get_vocab() # 入力文を反転 x_train, x_test = x_train[:, ::-1], x_test[:, ::-1] # ハイパーパラメータの設定 vocab_size = len(char_to_id) wordvec_size = 16 hidden_size = 256 batch_size = 128 max_epoch = 4 max_grad = 5.0 model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size) optimizer = Adam()