예제 #1
0
파일: example.py 프로젝트: hycis/smartNN
def unpickle_mlp(model):

    import cPickle
    from smartNN.utils.image import tile_raster_images
    from PIL.Image import fromarray
    from smartNN.datasets.preprocessor import GCN, Standardize
    
    with open(os.environ['smartNN_SAVE_PATH'] + '/log/' + model + '/model.pkl', 'rb') as f:
        mlp = cPickle.load(f)
    
    data = Mnist(train_valid_test_ratio = [5,1,1],
                    iter_class = 'SequentialSubsetIterator',
                    rng = None)
    
    test = data.get_test()
#     prep = Standardize()
#     prep = GCN(use_std = False)
#     test.X = prep.apply(test.X)
    
    orig_array = tile_raster_images(X = test.X[-1001:-1], img_shape=(28,28), tile_shape=(50,20), 
                                    tile_spacing=(5, 5), scale_rows_to_unit_interval=True, output_pixel_vals=True)
    orig_im = fromarray(orig_array)
    orig_im.save(NNdir + '/save/images/' + model + '_orig.jpeg')
    print('orig image saved. Opening image..')
#     orig_im.show()
    
    new_X = mlp.fprop(test.X)
    new_array = tile_raster_images(X = new_X[-1001:-1], img_shape=(28,28), tile_shape=(50,20), 
                                    tile_spacing=(0, 0), scale_rows_to_unit_interval=True, output_pixel_vals=True)
    new_im = fromarray(new_array)

    new_im.save(NNdir + '/save/images/' + model + '_reconstruct.jpeg')
    print('reconstruct image saved. Opening image..') 
예제 #2
0
파일: example.py 프로젝트: hycis/smartNN
def test_AE():

    import cPickle
    
    AE1 = 'stacked_AE3_layer1_20140407_0142_53816454'
    AE2 = 'stacked_AE3_layer2_20140407_0144_52735085'
    model = 'stacked_AE_layer3_20140407_0019_48317469'
    
    data = Mnist(preprocessor = None, 
                    binarize = False,
                    batch_size = 100,
                    num_batches = None, 
                    train_ratio = 5, 
                    valid_ratio = 1,
                    iter_class = 'SequentialSubsetIterator',
                    rng = None)
                    
    with open(os.environ['smartNN_SAVE_PATH'] + '/' + AE1 + '/model.pkl', 'rb') as f:

        mlp1 = cPickle.load(f)
    
    mlp1.pop_layer(-1)
    reduced_test_X = mlp1.fprop(data.get_test().X)
    
    with open(os.environ['smartNN_SAVE_PATH'] + '/' + AE2 + '/model.pkl', 'rb') as f:
        mlp2 = cPickle.load(f)
    
    output = mlp2.fprop(reduced_test_X)
    import pdb
    pdb.set_trace()
예제 #3
0
파일: models.py 프로젝트: hycis/smartNN
    def build_dataset(self, part=None):

        dataset = None

        preprocessor = None if self.state.dataset.preprocessor is None else \
                       getattr(preproc, self.state.dataset.preprocessor)()

        if self.state.dataset.type == 'Mnist':
            dataset = Mnist(train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
                            preprocessor = preprocessor,
                            batch_size = self.state.dataset.batch_size,
                            num_batches = self.state.dataset.num_batches,
                            iter_class = self.state.dataset.iter_class,
                            rng = self.state.dataset.rng)

        elif self.state.dataset.type[:4] == 'P276':
            dataset = getattr(spec, self.state.dataset.type)(
                            train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
                            preprocessor = preprocessor,
                            batch_size = self.state.dataset.batch_size,
                            num_batches = self.state.dataset.num_batches,
                            iter_class = self.state.dataset.iter_class,
                            rng = self.state.dataset.rng)

        elif self.state.dataset.type[:5] == 'Laura':
            assert part is not None, 'split name is required for Laura dataset'
            dataset = getattr(spec, self.state.dataset.type)(
                            part = part,
                            train_valid_test_ratio = self.state.dataset.train_valid_test_ratio,
                            preprocessor = preprocessor,
                            batch_size = self.state.dataset.batch_size,
                            num_batches = self.state.dataset.num_batches,
                            iter_class = self.state.dataset.iter_class,
                            rng = self.state.dataset.rng)

        train = dataset.get_train()
        dataset.set_train(train.X, train.X)

        valid = dataset.get_valid()
        dataset.set_valid(valid.X, valid.X)

        test = dataset.get_test()
        dataset.set_test(test.X, test.X)

        return dataset
예제 #4
0
파일: test.py 프로젝트: hycis/smartNN


import os
os.environ['smartNN_DATA_PATH'] = '/Volumes/Storage/Dropbox/CodingProjects/smartNN/data/'

from smartNN.datasets.mnist import Mnist
from smartNN.datasets.preprocessor import GCN

data = Mnist()
train = data.get_train()
proc = GCN()
out = proc.apply(train.X)
inv = proc.invert(out)
예제 #5
0
파일: AE_example.py 프로젝트: hycis/smartNN
def stacked_autoencoder():

    name = 'Stacked_AE'

    #=====[ Train First layer of stack autoencoder ]=====#
    print('Start training First Layer of AutoEncoder')

    
    log = Log(experiment_name = name + '_layer1',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    learning_rule = LearningRule(max_col_norm = None,
                                learning_rate = 0.01,
                                momentum = 0.1,
                                momentum_type = 'normal',
                                L1_lambda = None,
                                L2_lambda = None,
                                cost = Cost(type='mse'),
                                stopping_criteria = {'max_epoch' : 3, 
                                                    'epoch_look_back' : 1,
                                                    'cost' : Cost(type='mse'), 
                                                    'percent_decrease' : 0.001}
                                )

    data = Mnist()
                    
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    ae = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)

    h1_layer = RELU(dim=500, name='h1_layer', W=None, b=None)
    ae.add_encode_layer(h1_layer)
    h1_mirror = RELU(dim = data.target_size(), name='h1_mirror', W=h1_layer.W.T, b=None)
    ae.add_decode_layer(h1_mirror)

    
    train_object = TrainObject(model = ae,
                                dataset = data,
                                learning_rule = learning_rule,
                                log = log)
                                
    train_object.run()
    
    #=====[ Train Second Layer of autoencoder ]=====#

    print('Start training Second Layer of AutoEncoder')
    
    log2 = Log(experiment_name = name + '_layer2',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    learning_rule = LearningRule(max_col_norm = None,
                            learning_rate = 0.01,
                            momentum = 0.1,
                            momentum_type = 'normal',
                            L1_lambda = None,
                            L2_lambda = None,
                            cost = Cost(type='mse'),
                            stopping_criteria = {'max_epoch' : 3, 
                                                'epoch_look_back' : 1,
                                                'cost' : Cost(type='mse'), 
                                                'percent_decrease' : 0.001}
                            )

    # fprop == forward propagation
    reduced_train_X = ae.encode(train.X)
    reduced_valid_X = ae.encode(valid.X)
    reduced_test_X = ae.encode(test.X)

    data.set_train(X=reduced_train_X, y=reduced_train_X)
    data.set_valid(X=reduced_valid_X, y=reduced_valid_X)
    data.set_test(X=reduced_test_X, y=reduced_test_X)
    
    # create a new mlp taking inputs from the encoded outputs of first autoencoder
    ae2 = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)

    
    h2_layer = RELU(dim=100, name='h2_layer', W=None, b=None)
    ae2.add_encode_layer(h2_layer)
    
    h2_mirror = RELU(dim=h1_layer.dim, name='h2_mirror', W=h2_layer.W.T, b=None)
    ae2.add_decode_layer(h2_mirror)
    
              
    train_object = TrainObject(model = ae2,
                            dataset = data,
                            learning_rule = learning_rule,
                            log = log2)
    
    train_object.run()
    
    #=====[ Fine Tuning ]=====#
    print('Fine Tuning')

    log3 = Log(experiment_name = name + '_full',
            description = 'This experiment is to test the model',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')
    
    data = Mnist()
    
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    ae3 = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)
    ae3.add_encode_layer(h1_layer)
    ae3.add_encode_layer(h2_layer)
    ae3.add_decode_layer(h2_mirror)
    ae3.add_decode_layer(h1_mirror)

    train_object = TrainObject(model = ae3,
                            dataset = data,
                            learning_rule = learning_rule,
                            log = log3)
    
    train_object.run()
    print('Training Done')
예제 #6
0
파일: AE_example.py 프로젝트: hycis/smartNN
def autoencoder():

    log = Log(experiment_name = 'AE',
            description = 'This experiment is about autoencoder',
            save_outputs = True,
            save_hyperparams = True,
            save_model = True,
            send_to_database = 'Database_Name.db')

    learning_rule = LearningRule(max_col_norm = None,
                            learning_rate = 0.01,
                            momentum = 0.1,
                            momentum_type = 'normal',
                            L1_lambda = None,
                            L2_lambda = None,
                            cost = Cost(type='mse'),
                            stopping_criteria = {'max_epoch' : 100,
                                                'cost' : Cost(type='mse'),
                                                'epoch_look_back' : 10,
                                                'percent_decrease' : 0.001}
                            )
    
    # building dataset, change to P276 to train on P276 dataset
    data = Mnist(train_valid_test_ratio=[5,1,1])
    
    # for AutoEncoder, the inputs and outputs must be the same
    train = data.get_train()
    data.set_train(train.X, train.X)
    
    valid = data.get_valid()
    data.set_valid(valid.X, valid.X)
    
    test = data.get_test()
    data.set_test(test.X, test.X)
    
    # building autoencoder
    ae = AutoEncoder(input_dim = data.feature_size(), rand_seed=None)
    h1_layer = RELU(dim=100, name='h1_layer', W=None, b=None)
    
    # adding encoding layer
    ae.add_encode_layer(h1_layer)
    
    # adding decoding mirror layer
    ae.add_decode_layer(Sigmoid(dim=data.target_size(), name='output_layer', W=h1_layer.W.T, b=None))

    train_object = TrainObject(model = ae,
                                dataset = data,
                                learning_rule = learning_rule,
                                log = log)
                                
    train_object.run()