def __train(lr, weight_decay, epocs=50):
	network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
							output_size=10, weight_decay_lambda=weight_decay)
	trainer = Trainer(network, x_train, t_train, x_val, t_val,
					  epochs=epocs, mini_batch_size=100,
					  optimizer='sgd', optimizer_param={'lr': lr}, verbose=False)
	trainer.train()

	return trainer.test_acc_list, trainer.train_acc_list
 def __init__(self):
     self.onl = Online()
     self.vis = Metric_Visualizer()
     self.trainer = Trainer(sess_type="online")
     self.serv = ExperienceServer(self.ob_callback, deserialize_obs(), 4)
     self.exp_path = self.trainer.get_exp_path()
     self.modelpath = self.trainer.get_model_path()
     self.modelname = self.trainer.get_model_name()
     m.patch()
示例#3
0
    def setUp(self):
        self.trainer = Trainer(None, None)
        self.dataset = DatasetwithCombinedInput(SimpleDataset(num = 2, size = self.dataset_size))

        params = TrainerParams()
        params.batch_size = 10
        params.val_check_period = 0
        params.max_epochs = 1

        self.params = params

        tf.reset_default_graph()
class SSIL_server(object):
    """
    Serverside Class for Self Supervised Imitation Learning (runs on the server)
    """
    def __init__(self):
        self.onl = Online()
        self.vis = Metric_Visualizer()
        self.trainer = Trainer(sess_type="online")
        self.serv = ExperienceServer(self.ob_callback, deserialize_obs(), 4)
        self.exp_path = self.trainer.get_exp_path()
        self.modelpath = self.trainer.get_model_path()
        self.modelname = self.trainer.get_model_name()
        m.patch()

    def ob_callback(self, obs_array):
        obs_array = transform_obsarray(obs_array, flipNonZero())
        pkl_name = self.onl.save_obsarray_to_pickle(
            obs_array, os.path.join(self.exp_path, 'data'))
        self.vis.vid_from_pklpath(os.path.join(self.exp_path, 'data',
                                               pkl_name),
                                  0,
                                  0,
                                  show_steer=True,
                                  units='rad',
                                  live=True)

        self.trainer.train_model([pkl_name])

        #Send model back
        with open(os.path.join(self.modelpath, "train_" + self.modelname),
                  'rb') as binary_file:
            model_dump = bytes(binary_file.read())
        return [model_dump]

    def start_serv(self):
        self.serv.start()
        self.serv.join()
示例#5
0
from nnet.Metric_Visualizer import Metric_Visualizer
from common.Trainer import Trainer
import os, pdb, glob

vis = Metric_Visualizer()
trainer = Trainer()
exp_path = trainer.get_exp_path()
listy = os.listdir(os.path.join(exp_path, 'data'))
for i in range(len(listy)):
    pkl_name = 'batch' + str(i)
    vis.vid_from_pklpath(os.path.join(exp_path, 'data', pkl_name),
                         0,
                         0,
                         show_steer=True,
                         units='rad',
                         live=True,
                         wk=0)
示例#6
0
        element_dict = {
            'encoder':
            ConvEncoder(config.convs,
                        DenseElement),  #CapsEncoder(),#ConvEncoder(),
            'predictor': EmptyElementConfig(
            ),  #DensePredict(),#CapsPredict(),#EmptyElementConfig(),
        }
        super().__init__(modes_dict,
                         'classification',
                         element_dict,
                         config=config)


dataset = SimpleDataset(num=2)
save_folder = 'simple-test'
params = TrainerParams(0.001)
params.batch_size = 1
params.val_check_period = 0
params.max_epochs = 100
network_base = SimpleTestNetwork()
network = Network(network_base, *network_base.get_functions_for_trainer())

tf.reset_default_graph()

trainer = Trainer(network, dataset, params)

saver = CustomSaver(folders=[
    save_folder + '/classification', save_folder + '/classification/epoch'
])
trainer.train(saver, restore_from_epochend=True)
示例#7
0
# coding: utf-8

import sys
sys.path.append('../../')
import numpy as np
import matplotlib.pyplot as plt
from data.mnist import load_mnist
from DeepCNN import DeepCNN
from common.Trainer import Trainer

(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)

network = DeepCNN()
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=20,
                  mini_batch_size=100,
                  optimizer='Adam',
                  optimizer_param={'lr': 0.001},
                  evaluate_sample_num_per_epoch=1000)
trainer.train()

# パラメータの保存
network.save_params("deep_convnet_params.pkl")
print("Saved Network Parameters!")
示例#8
0
class TestLPElements(unittest.TestCase):
    dataset_size = 20

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.print_train_output = False
        self.trainer = None

    def no_output(self, f):
        #sys.stdout = open(os.devnull, 'w') # hide messages
        with contextlib.redirect_stdout(None):
            f()
        #sys.stdout = sys.__stdout__ # restore output

    def control_output(self, f):
        if not self.print_train_output:
            self.no_output(f)
        else:
            f()

    def run_trainer(self, config = LpTestNetworkConfig()):
        network_base = LpTestNetwork(config = config)
        network = Network(network_base, *network_base.get_functions_for_trainer())

        self.trainer.resetTrainerWith(network, self.dataset, self.params)

        saver = NoSaveSaver(allow_empty = True)
        self.trainer.train(saver, restore_from_epochend = True)
        return

    def get_config_with_mocked_element(self, f = None):
        config = LpTestNetworkConfig()
        element = LinearProgrammingDenseElement(config.num_outputs)

        def step_mock(X, y, training):
            return y

        element.step = Mock(side_effect = step_mock if f is None else f)

        config.convs.convs = [element]
        return config



    ### TEST INITIALIZATION ###

    def setUp(self):
        self.trainer = Trainer(None, None)
        self.dataset = DatasetwithCombinedInput(SimpleDataset(num = 2, size = self.dataset_size))

        params = TrainerParams()
        params.batch_size = 10
        params.val_check_period = 0
        params.max_epochs = 1

        self.params = params

        tf.reset_default_graph()

    ### TEST CASES ###

    def test_step_called(self):
        config = self.get_config_with_mocked_element()
        self.control_output(lambda: self.run_trainer(config))

        self.assertGreaterEqual(config.convs.convs[0].step.call_count, 1, 'Step function should be called at least once')

    def test_step_executed_for_every_example(self):
        self.num_ex = tf.Variable(0, dtype = tf.int16, trainable = False)
        self.result = -1

        def step_mock_acc(X, y, training):
            num_ex_update = tf.assign(self.num_ex, self.num_ex + 1)
            with tf.control_dependencies([num_ex_update]):
                y = tf.identity(y) # using just return does not add update step to graph
                return y

        def save_num_ex():
            self.result = self.num_ex.eval()

        config = self.get_config_with_mocked_element(step_mock_acc)

        self.trainer.set_on_train_complete(save_num_ex)
        self.control_output(lambda: self.run_trainer(config))

        self.assertEqual(self.result, self.dataset_size * 2, 'Update should be executed for each training and testing example in dataset')

    def test_perfect_result(self):
        # to prove output of step can be compared to y
        self.test_acc = -1.0

        def save_acc(test_loss, test_acc):
            # test loss is pretty big, because y doesn't contain logits rather than actual probabilities
            self.test_acc = test_acc

        self.trainer.set_on_test_complete(save_acc)
        config = self.get_config_with_mocked_element()
        self.control_output(lambda: self.run_trainer(config))
        np.testing.assert_allclose(self.test_acc, [1.0], rtol = 0.0, atol = 1e-6, err_msg = 'Accuracy should be 1.0 as long as element returns y as its output')

    def test_creates_out_layer(self):
        config = LpTestNetworkConfig()
        self.control_output(lambda: self.run_trainer(config))

        layers = config.convs.convs[0].layers
        _, y = self.dataset.get_batch(self.dataset.get_dataset('train'), 0, 1)

        self.assertGreaterEqual(len(layers), 1, 'Should have at least one layer')
        self.assertEqual(len(layers[-1]), y.shape[-1], 'Number of elements in last layer should be equal to number of classes')

    def test_initializes_only_once(self):
        config = LpTestNetworkConfig()
        X, y = self.dataset.get_batch(self.dataset.get_dataset('train'), 0, 1)
        element = config.convs.convs[0]
        X = np.reshape(X[0], [-1, np.multiply.reduce(X[0].shape[1:])])

        element.init_out_layer(X.shape, y.shape)
        element.layers[-1][0].test_attr = 'testing'

        self.assertTrue(element.initialized, 'Initialized flag should be set to True after initialization')

        element.init_out_layer(X.shape, y.shape)
        has_test_attr = hasattr(element.layers[-1][0], 'test_attr')
        self.assertTrue(has_test_attr, 'New attribute should be still present')
示例#9
0
# Dropuoutの有無、割り合いの設定 ========================
use_dropout = True  # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.15
# ====================================================

network = MultiLayerNetExtend(input_size=784,
                              hidden_size_list=[100, 100, 100, 100, 100, 100],
                              output_size=10,
                              use_dropout=use_dropout,
                              dropout_ration=dropout_ratio)
trainer = Trainer(network,
                  x_train,
                  t_train,
                  x_test,
                  t_test,
                  epochs=601,
                  mini_batch_size=100,
                  optimizer='sgd',
                  optimizer_param={'lr': 0.01},
                  verbose=True)
trainer.train()

train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list

# グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")