Exemplo n.º 1
0
def main() -> None:
    (x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt')
    x_train, x_test = x_train[:, ::-1], x_test[:, ::-1]

    char_to_id, id_to_char = sequence.get_vocab()

    vocab_size = len(char_to_id)
    wordvec_size = 16
    hidden_size = 128
    batch_size = 128
    max_epoch = 25
    max_grad = 5.0

    model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
    optimizer = Adam()
    trainer = Trainer(model, optimizer)

    acc_list = []
    for epoch in range(1, max_epoch+1):
        trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad)

        correct_num = 0
        for i in range(len(x_test)):
            question, correct = x_test[[i]], t_test[[i]]
            verbose = i < 10
            correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose)
        
        acc = float(correct_num) / len(x_test)
        acc_list.append(acc)
        print(f'val acc {acc*100}%')
    print('DONE')
Exemplo n.º 2
0
def train_eval(x_train, x_test, is_peeky):
    if is_peeky:
        model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
    else:
        model = Seq2seq(vocab_size, wordvec_size, hidden_size)
    optimizer = Adam()
    trainer = Trainer(model, optimizer)

    acc_list = []
    for epoch in range(max_epoch):
        trainer.fit(x_train,
                    t_train,
                    max_epoch=1,
                    batch_size=batch_size,
                    max_grad=max_grad)
        correct_num = 0
        for i in range(len(x_test)):
            question, correct = x_test[[i]], t_test[[i]]
            verbose = i < 10
            correct_num += eval_seq2seq(model, question, correct, id_to_char,
                                        verbose)
        acc = float(correct_num) / len(x_test)
        acc_list.append(acc)
        print('val acc %.3f%%' % (acc * 100))
    return acc_list
Exemplo n.º 3
0
def main():

    # データセットの読み込み
    (x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt')
    char_to_id, id_to_char = sequence.get_vocab()

    # 入力列を逆順にするとSeq2Se2の精度が上がるらしいが。。。クソ理論
    is_reverse = True
    if is_reverse:
        x_train, x_test = x_train[:, ::-1], x_test[:, ::-1]

    # ハイパーパラメータの設定
    vocab_size = len(char_to_id)
    wordvec_size = 16
    hidden_size = 128
    batch_size = 128
    max_epoch = 25
    max_grad = 5.0

    # モデル/オプティマイザ/トレーナーの生成
    # model = Seq2seq(vocab_size, wordvec_size, hidden_size)
    model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
    optimizer = Adam()
    trainer = Trainer(model, optimizer)

    acc_list = []
    for epoch in range(max_epoch):
        trainer.fit(x_train,
                    t_train,
                    max_epoch=1,
                    batch_size=batch_size,
                    max_grad=max_grad)

        correct_num = 0
        for i in range(len(x_test)):
            question, correct = x_test[[i]], t_test[[i]]
            verbose = i < 10
            correct_num += eval_seq2seq(model, question, correct, id_to_char,
                                        verbose)

        acc = float(correct_num) / len(x_test)
        acc_list.append(acc)
        print(f'val acc {acc * 100}')
Exemplo n.º 4
0
xs = corpus[:-1]
ts = corpus[1:]

# ハイパーパラメータの設定
vocab_size = len(word_to_id)
wordvec_size = 16
hidden_size = 128
batch_size = 1
max_epoch = 50
max_grad = 5.0
sample_size = 100
lr = 0.001
time_size = 35

#モデルの生成
model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
optimizer = Adam()
trainer = RnnlmTrainer(model, optimizer)

#学習
best_ppl = float('inf')
t1 = time.time()
for epoch in range(max_epoch):
    trainer.fit(xs, ts, max_epoch=1, batch_size=batch_size, max_grad=max_grad)

    model.reset_state()
    ppl = eval_perplexity(model, corpus)
    print('valid perplexity: ', ppl)

    if best_ppl > ppl:
        best_ppl = ppl