Esempio n. 1
0
def mlp():

    data = Mnist(train_valid_test_ratio=[5,1,1])

    mlp = MLP(input_dim = data.feature_size())
    
    mlp.add_layer(Sigmoid(dim=100, name='h1_layer', W=None, b=None, dropout_below=None))
    mlp.add_layer(Sigmoid(dim=data.target_size(), name='output_layer', W=None, b=None, dropout_below=None))
    
    learning_rule = LearningRule(max_col_norm = 0.1,
                                learning_rate = 0.01,
                                momentum = 0.1,
                                momentum_type = 'normal',
                                L1_lambda = None,
                                L2_lambda = None,
                                cost = Cost(type='mse'),
                                stopping_criteria = {'max_epoch' : 100, 
                                                    'epoch_look_back' : 3,
                                                    'cost' : Cost(type='error'), 
                                                    'percent_decrease' : 0.001}
                                )
    
    log = Log(experiment_name = 'mnistest2',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = False,
            save_model = False,
            send_to_database = 'Database_Name.db')
    
    train_object = TrainObject(model = mlp,
                                dataset = data,
                                learning_rule = learning_rule,
                                log = log)
    train_object.run()
Esempio n. 2
0
    def run(self):
        log = self.build_log()

        dataset = self.build_dataset()

        learning_rule = self.build_learning_rule()

        with open(os.environ['smartNN_SAVE_PATH'] + '/log/' +
                self.state.hidden1.model_name + '/model.pkl', 'rb') as f:
            print('unpickling model: ' + self.state.hidden1.model_name)
            h1 = cPickle.load(f)

        with open(os.environ['smartNN_SAVE_PATH'] + '/log/' +
                self.state.hidden2.model_name + '/model.pkl', 'rb') as f:
            print('unpickling model: ' + self.state.hidden2.model_name)
            h2 = cPickle.load(f)

        model = AutoEncoder(input_dim = dataset.feature_size(), rand_seed=self.state.model.rand_seed)
        model.add_encode_layer(h1.layers[0])
        model.add_encode_layer(h2.layers[0])
        model.add_decode_layer(h2.layers[1])
        model.add_decode_layer(h1.layers[1])

        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                model = model)
        train_obj.run()
Esempio n. 3
0
    def run(self):
        log = self.build_log()
        dataset = self.build_dataset()

        learning_rule = self.build_learning_rule()
        model = self.build_model(dataset)
        train_obj = TrainObject(log = log,
                                dataset = dataset,
                                learning_rule = learning_rule,
                                model = model)
        train_obj.run()
Esempio n. 4
0
def autoencoder():

    log = Log(experiment_name = 'AE',
            description = 'This experiment is about autoencoder',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')

    learning_rule = LearningRule(max_col_norm = None,
                            learning_rate = 0.01,
                            momentum = 0.1,
                            momentum_type = 'normal',
                            L1_lambda = None,
                            L2_lambda = None,
                            cost = Cost(type='mse'),
                            stopping_criteria = {'max_epoch' : 100,
                                                'cost' : Cost(type='mse'),
                                                'epoch_look_back' : 10,
                                                'percent_decrease' : 0.001}
                            )
    
    # building dataset, change to P276 to train on P276 dataset
    data = Mnist(train_valid_test_ratio=[5,1,1])
    
    # for AutoEncoder, the inputs and outputs must be the same
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    # building autoencoder
    ae = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)
    h1_layer = RELU(dim=100, name='h1_layer', W=None, b=None)
    
    # adding encoding layer
    ae.add_encode_layer(h1_layer)
    
    # adding decoding mirror layer
    ae.add_decode_layer(Sigmoid(dim=data.target_size(), name='output_layer', W=h1_layer.W.T, b=None))

    train_object = TrainObject(model = ae,
                                dataset = data,
                                learning_rule = learning_rule,
                                log = log)
                                
    train_object.run()
Esempio n. 5
0
    def run(self):

        print(type(self.state.xmen))

        parts = ['Laura_data_000.npy', 'Laura_data_010.npy', 'Laura_data_020.npy', 'Laura_data_030.npy',
                'Laura_data_001.npy','Laura_data_011.npy','Laura_data_021.npy','Laura_data_031.npy',
                'Laura_data_002.npy','Laura_data_012.npy','Laura_data_022.npy','Laura_data_032.npy',
                'Laura_data_003.npy','Laura_data_013.npy','Laura_data_023.npy','Laura_data_033.npy',
                'Laura_data_004.npy','Laura_data_014.npy','Laura_data_024.npy','Laura_data_034.npy',
                'Laura_data_005.npy','Laura_data_015.npy','Laura_data_025.npy','Laura_data_035.npy',
                'Laura_data_006.npy','Laura_data_016.npy','Laura_data_026.npy','Laura_data_036.npy',
                'Laura_data_007.npy','Laura_data_017.npy','Laura_data_027.npy','Laura_data_037.npy',
                'Laura_data_008.npy','Laura_data_018.npy','Laura_data_028.npy','Laura_data_038.npy',
                'Laura_data_009.npy','Laura_data_019.npy','Laura_data_029.npy','Laura_data_039.npy']

        learning_rule = self.build_learning_rule()
        dataset = self.build_dataset(parts[0])
        model = self.build_two_hid_model(dataset.feature_size())
        parts.pop(0)
        log = self.build_log()
        train_obj = TrainObject(log = log,
                               dataset = dataset,
                               learning_rule = learning_rule,
                               model = model)
        train_obj.run()

        for r in range(self.state.num_runs):
            for part in parts:
                log.log('run: ' + str(r+1) + ' of %s'%self.state.num_runs)
                log.log('part: ' + part + ' of ' + str(parts))
                log.log('loading dataset..')
                dataset = self.build_dataset(part)
                train_obj.dataset = dataset
                print_mem_usage()
                train_obj.run()
                print_mem_usage()
Esempio n. 6
0
def stacked_autoencoder():

    name = 'Stacked_AE'

    #=====[ Train First layer of stack autoencoder ]=====#
    print('Start training First Layer of AutoEncoder')

    
    log = Log(experiment_name = name + '_layer1',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    learning_rule = LearningRule(max_col_norm = None,
                                learning_rate = 0.01,
                                momentum = 0.1,
                                momentum_type = 'normal',
                                L1_lambda = None,
                                L2_lambda = None,
                                cost = Cost(type='mse'),
                                stopping_criteria = {'max_epoch' : 3, 
                                                    'epoch_look_back' : 1,
                                                    'cost' : Cost(type='mse'), 
                                                    'percent_decrease' : 0.001}
                                )

    data = Mnist()
                    
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    ae = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)

    h1_layer = RELU(dim=500, name='h1_layer', W=None, b=None)
    ae.add_encode_layer(h1_layer)
    h1_mirror = RELU(dim = data.target_size(), name='h1_mirror', W=h1_layer.W.T, b=None)
    ae.add_decode_layer(h1_mirror)

    
    train_object = TrainObject(model = ae,
                                dataset = data,
                                learning_rule = learning_rule,
                                log = log)
                                
    train_object.run()
    
    #=====[ Train Second Layer of autoencoder ]=====#

    print('Start training Second Layer of AutoEncoder')
    
    log2 = Log(experiment_name = name + '_layer2',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    learning_rule = LearningRule(max_col_norm = None,
                            learning_rate = 0.01,
                            momentum = 0.1,
                            momentum_type = 'normal',
                            L1_lambda = None,
                            L2_lambda = None,
                            cost = Cost(type='mse'),
                            stopping_criteria = {'max_epoch' : 3, 
                                                'epoch_look_back' : 1,
                                                'cost' : Cost(type='mse'), 
                                                'percent_decrease' : 0.001}
                            )

    # fprop == forward propagation
    reduced_train_X = ae.encode(train.X)
    reduced_valid_X = ae.encode(valid.X)
    reduced_test_X = ae.encode(test.X)

    data.set_train(X=reduced_train_X, y=reduced_train_X)
    data.set_valid(X=reduced_valid_X, y=reduced_valid_X)
    data.set_test(X=reduced_test_X, y=reduced_test_X)
    
    # create a new mlp taking inputs from the encoded outputs of first autoencoder
    ae2 = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)

    
    h2_layer = RELU(dim=100, name='h2_layer', W=None, b=None)
    ae2.add_encode_layer(h2_layer)
    
    h2_mirror = RELU(dim=h1_layer.dim, name='h2_mirror', W=h2_layer.W.T, b=None)
    ae2.add_decode_layer(h2_mirror)
    
              
    train_object = TrainObject(model = ae2,
                            dataset = data,
                            learning_rule = learning_rule,
                            log = log2)
    
    train_object.run()
    
    #=====[ Fine Tuning ]=====#
    print('Fine Tuning')

    log3 = Log(experiment_name = name + '_full',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    data = Mnist()
    
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    ae3 = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)
    ae3.add_encode_layer(h1_layer)
    ae3.add_encode_layer(h2_layer)
    ae3.add_decode_layer(h2_mirror)
    ae3.add_decode_layer(h1_mirror)

    train_object = TrainObject(model = ae3,
                            dataset = data,
                            learning_rule = learning_rule,
                            log = log3)
    
    train_object.run()
    print('Training Done')