def __train(lr, weight_decay, epocs=50):
    network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
                            output_size=10, weight_decay_lambda=weight_decay)
    trainer = Trainer(network, x_train, t_train, x_val, t_val,
                      epochs=epocs, mini_batch_size=100,
                      optimizer='sgd', optimizer_param={'lr': lr}, verbose=False)
    trainer.train()

    return trainer.test_acc_list, trainer.train_acc_list
예제 #2
0
def translate_test(index):
    print('source:')
    print(' '.join([i2w_source[i]
                    for i in test_source[index]][::-1]).strip(' pad'))
    print('target:')
    print(''.join([i2w_target[i] for i in test_target[index]]).strip('pad'))
    print('encoder-decoder output:')
    print(''.join([i2w_target[i]
                   for i in predict(test_source[index])]).strip('pad'))


def translate(sentence):
    sentence = list(map(lambda x: w2i_source[x], sentence.split()))
    sentence += [0] * (sentence_length_source - len(sentence))
    sentence.reverse()
    return ''.join([i2w_target[i] for i in predict(np.array([sentence]))])


x, y, loss = build_model()

# Create solver.
solver = S.Momentum(1e-2, momentum=0.9)
solver.set_parameters(nn.get_parameters())

trainer = Trainer(inputs=[x, y],
                  loss=loss,
                  metrics=dict(PPL=np.e**loss),
                  solver=solver)
trainer.run(train_data_iter, dev_data_iter, epochs=5, verbose=1)
예제 #3
0
import numpy as np
import matplotlib.pyplot as plt
from dataset.mnist import load_mnist
from deep_convnet import DeepConvNet
from common.trainer import Trainer

opt = sys.argv[1]
print(opt)
if opt is None:
    opt = 'Adam'

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

network = DeepConvNet()
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=20,
                  mini_batch_size=100,
                  optimizer=opt,
                  optimizer_param={},
                  evaluate_sample_num_per_epoch=1000,
                  verbose2=True)
trainer.train()

# パラメータの保存
network.save_params(opt + "_deep_convnet_params.pkl")
print("Saved Network Parameters!")
예제 #4
0
hidden_size = 100
batch_size = 100
max_epoch = 10

# reading data
corpus, word_to_id, id_to_word = ptb.load_data("train")
vocab_size = len(word_to_id)

contexts, target = create_contexts_target(corpus, window_size)
if config.GPU:
    contexts, target = to_gpu(contexts), to_gpu(target)

# generating models
model = CBOW(vocab_size, hidden_size, window_size, corpus)
optimizer = Adam()
trainer = Trainer(model, optimizer)

# start train
trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot()

# save data in order to use later
word_vecs = model.word_vecs
if config.GPU:
    word_vecs = to_cpu(word_vecs)
params = {}
params["word_vecs"] = word_vecs.astype(np.float64)
params["word_to_id"] = word_to_id
params["id_to_word"] = id_to_word
pkl_file = "cbow_params.pkl"
with open(pkl_file, "wb") as f:
예제 #5
0
from common.trainer import Trainer
import numpy as np
import matplotlib.pyplot as plt

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

# 시간이 오래 걸릴 경우 데이터를 줄인다.
x_train, t_train = x_train[:5000], t_train[:5000]
x_test, t_test = x_test[:1000], t_test[:1000]

network = DeepConvNet()
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=1,
                  mini_batch_size=100,
                  optimizer='Adam',
                  optimizer_param={'lr': 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# # 매개변수 보관
# network.save_params("deep_convnet_params.pkl")
# print("Saved Network Parameters!")

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

# 그래프 그리기==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
예제 #6
0
import sys
sys.path.append('..')
from common.optimizer import SGD
from common.trainer import Trainer
from dataset import spiral
from ch1_neural_network.two_layer_net import TwoLayerNet


# hyperparameter
max_epoch = 300
batch_size = 30
hidden_size = 10
learning_Rate = 1.0

x, t = spiral.load_data()
print(f'x: {x.shape}')
print(f't: {t.shape}')
model = TwoLayerNet(input_size=2, hidden_size=hidden_size, output_size=3)
optimizer = SGD(lr=learning_Rate)

trainer = Trainer(model, optimizer)
trainer.fit(x, t, max_epoch, batch_size, eval_interval=10)
trainer.plot()
예제 #7
0
# is_reverse = True
if is_reverse:
    x_train, x_test = x_train[:, ::-1], x_test[:, ::-1]

vocab_size = len(char_to_id)
wordvec_size = 16
hidden_size = 128
batch_size = 128
max_epoch = 25
max_grad = 5.0

model = Seq2seq(vocab_size, wordvec_size, hidden_size)
# model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)

optimizer = Adam()
trainer = Trainer(model, optimizer)

acc_list = []
for epoch in range(max_epoch):
    trainer.fit(x_train,
                t_train,
                max_epoch=1,
                batch_size=batch_size,
                max_grad=max_grad)
    correct_num = 0

    for i in range(len(x_test)):
        question, correct = x_test[[i]], t_test[[i]]
        verbose = i < 10
        correct_num += eval_seq2seq(model, question, correct, id_to_char,
                                    verbose, is_reverse)
예제 #8
0
파일: train.py 프로젝트: zju3dv/RVL-Dynamic
        valid_data = SevenScenes(train=False, **kwargs)
    elif configuration.dataset == 'RobotCar':
        train_data = RobotCar(train=True, **kwargs)
        valid_data = RobotCar(train=False, **kwargs)
    elif configuration.dataset == 'SenseTime':
        train_data = SenseTime(train=True, **kwargs)
        valid_data = SenseTime(train=False, **kwargs)
    else:
        raise NotImplementedError

    # Trainer
    print("Setup trainer...")
    pose_stats_file = osp.join(configuration.preprocessed_data_path,
                               configuration.scene, 'pose_stats.txt')

    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        train_criterion=train_criterion,
        val_criterion=val_criterion,
        result_criterion=AbsoluteCriterion(),
        # config_name=args.config_file,
        configuration=configuration,
        experiment=configuration.experiment_name,
        train_dataset=train_data,
        val_dataset=valid_data,
        checkpoint_file=configuration.checkpoint,
        resume_optim=configuration.resume_optim,
        pose_stats_file=pose_stats_file)
    trainer.run()
예제 #9
0
(x_train, t_train), (x_test, t_test) = load_cifar()

max_epochs = 20

if x_train.ndim == 3:
    in_dim = x_train.shape
else:
    in_dim = x_train[0].shape

network = CifarConvNet(in_dim)

trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=max_epochs,
                  mini_batch_size=1024,
                  optimizer="AdaGrad",
                  optimizer_param={'lr': 0.1},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# パラメータの保存
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")

# lossの描画
markers = {'loss': 'o'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_loss_list, marker='o', label='loss', markevery=1)
plt.xlabel('epochs')
    'n1': 64,
    'n2': 32,
    'channel': 3
},
                weight_init_std=0.01)

start_epoch_file = "./result/params_epoch_" + "{0:06d}".format(
    start_epoch) + ".pkl"
network.load_params(start_epoch_file)

trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=max_epochs,
                  mini_batch_size=50,
                  optimizer='Adam',
                  optimizer_param={'lr': 0.01},
                  evaluate_sample_num_per_epoch=50,
                  start_epoch=start_epoch)
trainer.train()

# パラメータの保存
network.save_params("./result/srcnn_params.pkl")
print("Saved Network Parameters!")

# グラフの描画
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker='o', label='train', markevery=2)
예제 #11
0
        if args.name is not None:
            with open("logs/{}.log".format(args.name), "a") as f:
                f.write(str(args))
                f.write("\nParameters : {}".format(n_parameters))
                if hasattr(model, "n_filters"):
                    f.write("\nFilters : {}".format(model.n_filters))
                else:
                    f.write("\nFilters : _ ")
                f.write("\n*******\n")
            print("-" * 80 + "\n")

        trainer1 = Trainer(device,
                           model,
                           dataset,
                           optimizer,
                           [CrossEntropy(), AlphaLoss()],
                           name=args.name,
                           topk=topk,
                           checkpointFreq=args.checkpoint_freq)
        trainer1.temperature = args.starting_temp
        trainer1.callbacks.append(AlphaCallback(args.alpha))
        if scheduler is not None:
            trainer1.callbacks.append(SchedulerCB(scheduler))

        trainer1.train(args.epochs)

        torch.save(model.state_dict(), args.name + ".model")

    else:  # arg.resume is not None
        model.load_state_dict(torch.load(args.resume))
예제 #12
0
x = nn.Variable((batch_size, sentence_length))
mask = get_mask(x)
t = nn.Variable((batch_size, sentence_length))

with nn.parameter_scope('embedding'):
    h = PF.embed(x, vocab_size, embedding_size) * mask
with nn.parameter_scope('lstm1'):
    h = lstm(h, hidden_size, mask=mask, return_sequences=True)
with nn.parameter_scope('lstm2'):
    h = lstm(h, hidden_size, mask=mask, return_sequences=True)
with nn.parameter_scope('output'):
    y = time_distributed(PF.affine)(h, vocab_size)

mask = F.sum(mask, axis=2)  # do not predict 'pad'.
entropy = time_distributed_softmax_cross_entropy(y, expand_dims(
    t, axis=-1)) * mask
# count = F.sum(mask, axis=1)
# loss = F.mean(F.div2(F.sum(entropy, axis=1), count))
loss = F.sum(entropy) / F.sum(mask)

# Create solver.
solver = S.Momentum(1e-2, momentum=0.9)
solver.set_parameters(nn.get_parameters())

trainer = Trainer(inputs=[x, t],
                  loss=loss,
                  metrics={'PPL': np.e**loss},
                  solver=solver)
trainer.run(train_data_iter, valid_data_iter, epochs=max_epoch)
예제 #13
0
        for j in range(0, len(weight_decay)):
            for k in range(0, len(learning_rate)):
                network = MultiLayerNetExtend(
                    input_size=784,
                    hidden_size_list=[100, 100, 100, 100],
                    output_size=10,
                    activation='sigmoid',
                    weight_init_std='xavier',
                    weight_decay_lambda=weight_decay[j],
                    use_dropout=use_dropout,
                    dropout_ration=dropout_ratio[i])
                trainer = Trainer(network,
                                  x_train,
                                  t_train,
                                  x_test,
                                  t_test,
                                  epochs=5,
                                  mini_batch_size=500,
                                  optimizer='adam',
                                  optimizer_param={'lr': learning_rate[k]},
                                  verbose=False)
                trainer.train()
                test_acc = trainer.test_acc_list[-1]
                best_hp.append({
                    "test_acc": test_acc,
                    "dropout_ratio": dropout_ratio[i],
                    "weight_decay": weight_decay[j],
                    "learning_rate": learning_rate[k]
                })
                del network
                bar.update(25 * i + 5 * j + k)
예제 #14
0
파일: train.py 프로젝트: s801210/tibamedl
batch_size = 100
max_epoch = 10

# 載入資料
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)

contexts, target = create_contexts_target(corpus, window_size)
if config.GPU:
    contexts, target = to_gpu(contexts), to_gpu(target)

# 產生模型
model = CBOW(vocab_size, hidden_size, window_size, corpus)
# model = SkipGram(vocab_size, hidden_size, window_size, corpus)
optimizer = Adam()
trainer = Trainer(model, optimizer)

# 開始學習trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot()

# 儲存必要資料,方便日後使用
word_vecs = model.word_vecs
if config.GPU:
    word_vecs = to_cpu(word_vecs)
params = {}
params['word_vecs'] = word_vecs.astype(np.float16)
params['word_to_id'] = word_to_id
params['id_to_word'] = id_to_word
pkl_file = 'cbow_params.pkl'  # or 'skipgram_params.pkl'
with open(pkl_file, 'wb') as f:
    pickle.dump(params, f, -1)
예제 #15
0
def cnn_constructor():
    """
    Referenced by https://github.com/oreilly-japan/deep-learning-from-scratch
    common modules referenced there too.
    """

    global network, classes, imsize

    (x_train, t_train), (x_test,
                         t_test), classes = dataset(image_dir="images",
                                                    test_percentage=10,
                                                    validation_percentage=10,
                                                    imsize=imsize)

    x_train = chenneling(x_train)
    x_test = chenneling(x_test)

    train_num = x_train.shape[0]
    test_num = x_test.shape[0]

    x_train, t_train = shuffle_dataset(x_train, t_train)
    x_test, t_test = shuffle_dataset(x_test, t_test)

    net_param = "cnn_params" + str(imsize) + ".pkl"
    if not os.path.exists("params/"):
        os.makedirs("params/")

    # make convolution eural network
    # x_train.shape[1:] returns channel, height, width
    network = ConvNet(input_dim=(x_train.shape[1:]),
                      conv_param={
                          'filter_num': 20,
                          'filter_size': 3,
                          'pad': 0,
                          'stride': 1
                      },
                      hidden_size=32,
                      output_size=classes,
                      weight_init_std=0.001)

    trainer = Trainer(network,
                      x_train,
                      t_train,
                      x_test,
                      t_test,
                      epochs=1,
                      mini_batch_size=FLAGS.batch_size,
                      optimizer='Adam',
                      optimizer_param={'lr': 0.001},
                      evaluate_sample_num_per_epoch=train_num)

    params_loaded = False
    if not os.path.exists("params/"):
        os.makedirs("params/")
    if (os.path.exists("params/" + net_param)):
        network.load_params("params/" + net_param)
        params_loaded = True
        print("\n* Loaded Network Parameters!  -  " + net_param)
    if ((FLAGS.train_epochs > 0) or (params_loaded == False)):
        if (FLAGS.train_epochs <= 0):
            FLAGS.train_epochs = 10
        # Training
        for ep in range(FLAGS.train_epochs):
            trainer.train()
            # Save parameters
            network.save_params("params/" + net_param)

            # plot graphs
            # Grpah 1: Accuracy
            markers = {'train': 'o', 'test': 's', 'loss': 'd'}
            x1 = np.arange(len(trainer.train_acc_list))
            plt.clf()
            plt.plot(x1,
                     trainer.train_acc_list,
                     marker='o',
                     label='train',
                     markevery=1)
            plt.plot(x1,
                     trainer.test_acc_list,
                     marker='s',
                     label='test',
                     markevery=1)
            plt.xlabel("epochs")
            plt.ylabel("accuracy")
            plt.ylim(0, 1.1)
            plt.legend(loc='lower right')
            plt.title("Accuracy")
            now = datetime.now()
            filename = "params/" + now.strftime(
                '%Y%m%d_%H%M%S%f') + "_" + "ep" + ".png"
            plt.savefig(filename)
            #plt.show()

            # Graph 2: Loss
            x2 = np.arange(len(trainer.train_loss_list))
            plt.clf()
            plt.plot(x2,
                     trainer.train_loss_list,
                     marker='o',
                     label='loss',
                     markevery=1)
            plt.xlabel("iter")
            plt.ylabel("loss")
            plt.legend(loc='lower right')
            plt.title("Cross entropy loss")
            now = datetime.now()
            filename = "params/" + now.strftime(
                '%Y%m%d_%H%M%S%f') + "_" + "ep" + ".png"
            plt.savefig(filename)
            #plt.show()
        print("\n* Saved Network Parameters!  -  " + net_param)
#드랍아웃 사용 유무와 비율 설정===========
use_dropout = True
dropout_ratio = 0.2
#x_shpae이랑 같은 모양 중에서 랜덤하게 dropout_ratio보다 작은 값으로 예측 한 것들은 무시해 버리겠다..
#====================================

network = MultiLayerNetExtend(input_size=784,
                              hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10,
                              use_dropout=use_dropout,
                              dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=301,
                  mini_batch_size=100,
                  optimizer='sgd',
                  optimizer_param={'lr': 0.01},
                  verbose=True)  #verbose는 중간중간 출력값을 나타내고 싶을 때

trainer.train()  #훈련 시작!

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

#그래프 그리기=======================
markers = {'trainer': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))

plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
예제 #17
0
        y = time_distributed(PF.affine)(h, word_vocab_size)

    mask = F.sign(t) # do not predict 'pad'.
    entropy = time_distributed_softmax_cross_entropy(y, expand_dims(t, axis=-1)) * mask
    count = F.sum(mask, axis=1)
    loss = F.mean(F.div2(F.sum(entropy, axis=1), count))
    return x, t, loss

x, t, loss = build_model()

# Create solver.
solver = S.Momentum(1e-2, momentum=0.9)
solver.set_parameters(nn.get_parameters())

x, t, loss = build_model(train=True)
trainer = Trainer(inputs=[x, t], loss=loss, metrics={'PPL': np.e**loss}, solver=solver, save_path='char-cnn-lstmlm')
trainer.run(train_data_iter, valid_data_iter, epochs=max_epoch)

for epoch in range(max_epoch):
    x, t, loss = build_model(train=True)
    trainer.update_variables(inputs=[x, t], loss=loss, metrics={'PPL': np.e**loss})
    trainer.run(train_data_iter, None, epochs=1, verbose=1)
    
    x, t, loss = build_model(train=False)
    trainer.update_variables(inputs=[x, t], loss=loss, metrics={'PPL': np.e**loss})
    trainer.evaluate(valid_data_iter, verbose=1)

# nn.load_parameters('char-cnn-lstm_best.h5')

# batch_size = 1
# sentence_length = 1
예제 #18
0
                     'enable_bp_gradient_quantization': False
                 },
                 hidden_size_1=4096,
                 hidden_size_2=1000,
                 output_size=10,
                 enable_compensation_L2_regularization=True,
                 compensation_L2_regularization_lambda=0.1,
                 mini_batch_size=batch_size)
# ======================================= Network Configuration =======================================

trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=max_epochs,
                  mini_batch_size=batch_size,
                  optimizer=optimizer,
                  optimizer_param={'lr': learning_rate},
                  evaluate_sample_num_per_epoch=evaluate_sample_num,
                  log_per_epoch=log_per_epoch,
                  verbose=True)
trainer.train(log_time=method_time_inspection)

# Draw figure
markers = {'train loss': '^', 'train acc': 'o', 'test acc': 's'}
x = np.arange(len(trainer.train_loss_list))
plt.plot(x,
         trainer.train_loss_list,
         marker='^',
         label='train loss',
         markevery=2)
예제 #19
0
def global_average_pooling_1d(x, mask):
    count = F.sum(mask, axis=1)
    global_average_pooled = F.sum(h, axis=1) / count
    return global_average_pooled


x = nn.Variable((batch_size, max_len))
t = nn.Variable((batch_size, 1))
mask = get_mask(x)
with nn.parameter_scope('embedding'):
    h = time_distributed(PF.embed)(x, vocab_size, embedding_size) * mask
h = global_average_pooling_1d(h, mask)
with nn.parameter_scope('output'):
    y = F.sigmoid(PF.affine(h, 1))

accuracy = F.mean(F.equal(F.round(y), t))
loss = F.mean(F.binary_cross_entropy(y, t))

# Create solver.
solver = S.Adam()
solver.set_parameters(nn.get_parameters())

trainer = Trainer(inputs=[x, t],
                  loss=loss,
                  metrics={
                      'cross entropy': loss,
                      'accuracy': accuracy
                  },
                  solver=solver)
trainer.run(train_data_iter, dev_data_iter, epochs=5, verbose=1)
예제 #20
0
    # Optimizer
    if configuration.learn_beta:
        param_list.append({
            'params': [
                train_criterion.beta, train_criterion.gamma,
                train_criterion.rel_beta, train_criterion.rel_gamma
            ]
        })

    if configuration.optimizer == 'adam':
        optimizer = optim.Adam(param_list,
                               lr=configuration.lr,
                               weight_decay=5e-4)

    # Data
    train_dataloader, valid_dataloader = get_mapnet_train_dataloader(
        configuration)

    # Trainer
    print("Setup trainer...")
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      configuration=configuration,
                      train_criterion=train_criterion,
                      val_criterion=train_criterion,
                      result_criterion=AbsoluteCriterion(),
                      train_dataloader=train_dataloader,
                      val_dataloader=valid_dataloader)
    trainer.run()
예제 #21
0
network = SimpleConvNet(input_dim=(1, 28, 28),
                        conv_param={
                            "filter_num": 30,
                            "filter_size": 5,
                            "pad": 0,
                            "stride": 1
                        },
                        hidden_size=100,
                        output_size=10,
                        weight_init_std=0.01)

trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=max_epochs,
                  mini_batch_size=100,
                  optimizer="Adam",
                  optimizer_param={"lr": 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

network.save_params("params.pkl")
print("Saved Network Parameters!")

markers = {"train": "o", "test": "s"}
x = np.arange(max_epochs)
plt.plot(x, trainer.train_acc_list, marker="o", label="train", markevery=2)
plt.plot(x, trainer.test_acc_list, marker="s", label="test", markevery=2)
plt.xlabel("epochs")
plt.ylabel("accuracy")
예제 #22
0
t_test, t_train = preprocessing.divide_test_train(t_train, test_rate=0.1)

model = Transformer(vocab_size,
                    wordvec_size,
                    head_size,
                    num_heads=8,
                    num_encoders=1,
                    num_decoders=1)

if os.path.isfile("../pkl/myTransformer_params.pkl"):
    model.load_params("../pkl/myTransformer_params.pkl")

optimizer = Adam(lr=0.00001)
# optimizer = SGD(lr=0.00005)
# optimizer = RMSprop(lr=0.00005)
trainer = Trainer(model, optimizer)

acc_list = []
for epoch in range(max_epoch):
    trainer.fit(x_train,
                t_train,
                max_epoch=1,
                batch_size=batch_size,
                max_grad=max_grad,
                eval_interval=10)
    model.save_params('../pkl/myTransformer_params.pkl')

    correct_num = 0
    for i in range(len(x_test)):
        question, correct = x_test[[i]], t_test[[i]]
        verbose = i < 10
예제 #23
0
max_epoch = 50
max_grad = 5.0

# shuffle
x, t = seq.shuffle(seed=1)

# K-分割交差法(test_size = 1/K)
K = 4

for i, (x_train, x_test, t_train, t_test) in enumerate(seq.cv_dataset_gen(x, t, K=K)):
    # モデル選択(パラメータの初期化)
    # model = Seq2seq(vocab_size, wordvec_size, hidden_size)
    model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
    # model = AttentionSeq2seq(vocab_size, wordvec_size, hidden_size)
    optimizer = Adam()
    trainer = Trainer(model, optimizer)
    print("Cross Validation: iter", str(i+1))
    # Train
    trainer.fit(x_train, t_train, x_test, t_test,
                max_epoch=max_epoch,
                batch_size=batch_size,
                max_grad=max_grad)

    # Inference
    start_id = seq.start_id
    sample_size = seq.sample_size
    guess_train = model.generate(x_train, start_id, sample_size)
    guess_test = model.generate(x_test, start_id, sample_size)

    # Save result as csv
    suffix = "_cv" + str(i + 1)
예제 #24
0
# Set Using Drop-out & Ratio
use_dropout = False
dropout_ratio = 0.2
# ====================================================

network = MultiLayerNet(input_size=784,
                        hidden_size_list=[100, 100, 100, 100, 100, 100],
                        output_size=10,
                        use_dropout=use_dropout,
                        dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=301,
                  mini_batch_size=100,
                  optimizer='sgd',
                  optimizer_param={'lr': 0.01},
                  verbose=True)
trainer.train()

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

# Draw Graph
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
예제 #25
0
import sys
sys.path.append("..")
from common.trainer import Trainer
from common.optimizer import Adam
from simple_skip_gram import SimpleSkipGram
from common.util import preprocess, create_contexts_target, convert_one_hot

window_size = 1
hidden_size = 5
batch_size = 3
max_epoch = 1000

text = 'You say goodbye and I say hello .'
corpus, word_to_id, id_to_word = preprocess(text)

vocab_size = len(word_to_id)
contexts, target = create_contexts_target(corpus, window_size)
target = convert_one_hot(target, vocab_size)
contexts = convert_one_hot(contexts, vocab_size)

model = SimpleSkipGram(vocab_size, hidden_size)
optimizer = Adam()
trainer = Trainer(model, optimizer)

trainer.fit(contexts, target, max_epoch, batch_size)
trainer.plot(path='../output/03-train_skip_gram.png')

word_vecs = model.word_vecs
for word_id, word in id_to_word.items():
    print(word, word_vecs[word_id])
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)

# 오버피팅을 재현하기 위해 학습 데이터 수를 줄임
x_train = x_train[:300]
t_train = t_train[:300]

# 드롭아웃 사용 유무와 비울 설정 ========================
use_dropout = True  # 드롭아웃을 쓰지 않을 때는 False
dropout_ratio = 0.2
# ====================================================

network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10, use_dropout=use_dropout, dropout_ration=dropout_ratio)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
                  epochs=301, mini_batch_size=100,
                  optimizer='sgd', optimizer_param={'lr': 0.01}, verbose=True)
trainer.train()

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

# 그래프 그리기==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
예제 #27
0
# coding: utf-8
import sys
sys.path.append('..')  # 親ディレクトリのファイルをインポートするための設定
from common.optimizer import SGD
from common.trainer import Trainer
from dataset import spiral
from two_layer_net import TwoLayerNet

# ハイパーパラメータの設定
max_epoch = 300
batch_size = 30
hidden_size = 10
learning_rate = 1.0

x, t = spiral.load_data()
model = TwoLayerNet(input_size=2, hidden_size=hidden_size, output_size=3)
optimizer = SGD(lr=learning_rate)

trainer = Trainer(model, optimizer)
trainer.fit(x, t, max_epoch, batch_size, eval_interval=10)
trainer.plot(path='../output/01-plot.png')