def __train(lr, weight_decay, epocs=50):
	network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
							output_size=10, weight_decay_lambda=weight_decay)
	trainer = Trainer(network, x_train, t_train, x_val, t_val,
					  epochs=epocs, mini_batch_size=100,
					  optimizer='sgd', optimizer_param={'lr': lr}, verbose=False)
	trainer.train()

	return trainer.test_acc_list, trainer.train_acc_list
Ejemplo n.º 2
0
 def __init__(self):
     self.onl = Online()
     self.vis = Metric_Visualizer()
     self.trainer = Trainer(sess_type="online")
     self.serv = ExperienceServer(self.ob_callback, deserialize_obs(), 4)
     self.exp_path = self.trainer.get_exp_path()
     self.modelpath = self.trainer.get_model_path()
     self.modelname = self.trainer.get_model_name()
     m.patch()
Ejemplo n.º 3
0
    def setUp(self):
        self.trainer = Trainer(None, None)
        self.dataset = DatasetwithCombinedInput(SimpleDataset(num = 2, size = self.dataset_size))

        params = TrainerParams()
        params.batch_size = 10
        params.val_check_period = 0
        params.max_epochs = 1

        self.params = params

        tf.reset_default_graph()
Ejemplo n.º 4
0
from nnet.Metric_Visualizer import Metric_Visualizer
from common.Trainer import Trainer
import os, pdb, glob

vis = Metric_Visualizer()
trainer = Trainer()
exp_path = trainer.get_exp_path()
listy = os.listdir(os.path.join(exp_path, 'data'))
for i in range(len(listy)):
    pkl_name = 'batch' + str(i)
    vis.vid_from_pklpath(os.path.join(exp_path, 'data', pkl_name),
                         0,
                         0,
                         show_steer=True,
                         units='rad',
                         live=True,
                         wk=0)
Ejemplo n.º 5
0
        element_dict = {
            'encoder':
            ConvEncoder(config.convs,
                        DenseElement),  #CapsEncoder(),#ConvEncoder(),
            'predictor': EmptyElementConfig(
            ),  #DensePredict(),#CapsPredict(),#EmptyElementConfig(),
        }
        super().__init__(modes_dict,
                         'classification',
                         element_dict,
                         config=config)


dataset = SimpleDataset(num=2)
save_folder = 'simple-test'
params = TrainerParams(0.001)
params.batch_size = 1
params.val_check_period = 0
params.max_epochs = 100
network_base = SimpleTestNetwork()
network = Network(network_base, *network_base.get_functions_for_trainer())

tf.reset_default_graph()

trainer = Trainer(network, dataset, params)

saver = CustomSaver(folders=[
    save_folder + '/classification', save_folder + '/classification/epoch'
])
trainer.train(saver, restore_from_epochend=True)
Ejemplo n.º 6
0
# coding: utf-8

import sys
sys.path.append('../../')
import numpy as np
import matplotlib.pyplot as plt
from data.mnist import load_mnist
from DeepCNN import DeepCNN
from common.Trainer import Trainer

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

network = DeepCNN()
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=20,
                  mini_batch_size=100,
                  optimizer='Adam',
                  optimizer_param={'lr': 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# パラメータの保存
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")
Ejemplo n.º 7
0
# Dropuoutの有無、割り合いの設定 ========================
use_dropout = True  # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.15
# ====================================================

network = MultiLayerNetExtend(input_size=784,
                              hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10,
                              use_dropout=use_dropout,
                              dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=601,
                  mini_batch_size=100,
                  optimizer='sgd',
                  optimizer_param={'lr': 0.01},
                  verbose=True)
trainer.train()

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

# グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")