Ejemplo n.º 1
0
def train():

    # build dataset
    batch_size = 64
    data = Mnist(batch_size=batch_size, train_valid_test_ratio=[5, 1, 1])

    # build model
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    model.add(Linear(prev_dim=28 * 28, this_dim=200))
    model.add(RELU())
    model.add(Linear(prev_dim=200, this_dim=100))
    model.add(RELU())
    model.add(Dropout(0.5))
    model.add(Linear(prev_dim=100, this_dim=10))
    model.add(Softmax())

    # build learning method
    decay_batch = int(data.train.X.shape[0] * 2 / batch_size)
    learning_method = SGD(learning_rate=0.1,
                          momentum=0.9,
                          lr_decay_factor=0.9,
                          decay_batch=decay_batch)

    # Build Logger
    log = Log(
        experiment_name='MLP',
        description='This is a tutorial',
        save_outputs=True,  # log all the outputs from the screen
        save_model=True,  # save the best model
        save_epoch_error=True,  # log error at every epoch
        save_to_database={
            'name': 'Example.sqlite3',
            'records': {
                'Batch_Size': batch_size,
                'Learning_Rate': learning_method.learning_rate,
                'Momentum': learning_method.momentum
            }
        })  # end log

    # put everything into the train object
    train_object = TrainObject(model=model,
                               log=log,
                               dataset=data,
                               train_cost=mse,
                               valid_cost=error,
                               learning_method=learning_method,
                               stop_criteria={
                                   'max_epoch': 100,
                                   'epoch_look_back': 5,
                                   'percent_decrease': 0.01
                               })
    # finally run the code
    train_object.setup()
    train_object.run()

    ypred = model.fprop(data.get_test().X)
    ypred = np.argmax(ypred, axis=1)
    y = np.argmax(data.get_test().y, axis=1)
    accuracy = np.equal(ypred, y).astype('f4').sum() / len(y)
    print('test accuracy:', accuracy)
Ejemplo n.º 2
0
def train():
    """
    This examples implements the variational autoencoder from the paper
    Auto-Encoding Variational Bayes by Diederik P Kingma, Max Welling, arXiv:1312.6114
    """

    # build dataset
    data = Mnist(batch_size=100, binary=False, train_valid_test_ratio=[5,1,1])
    # for autoencoder, the output will be equal to input
    data.set_train(X=data.get_train().X, y=data.get_train().X)
    data.set_valid(X=data.get_valid().X, y=data.get_valid().X)

    # build model
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    model.add(VariationalAutoencoder(input_dim=28*28, bottlenet_dim=200, z_dim=20))

    # build learning method
    learning_method = SGD(learning_rate=0.0001, momentum=0.9,
                              lr_decay_factor=0.9, decay_batch=10000)

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = None,
                               dataset = data,
                               train_cost = SGVB_bin,
                               valid_cost = SGVB_bin,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 10,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()
Ejemplo n.º 3
0
def train():
    """
    This examples implements the variational autoencoder from the paper
    Auto-Encoding Variational Bayes by Diederik P Kingma, Max Welling, arXiv:1312.6114
    """

    # build dataset
    data = Mnist(batch_size=100, binary=False, train_valid_test_ratio=[5,1,1])
    # for autoencoder, the output will be equal to input
    data.set_train(X=data.get_train().X, y=data.get_train().X)
    data.set_valid(X=data.get_valid().X, y=data.get_valid().X)

    # build model
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    model.add(VariationalAutoencoder(input_dim=28*28, bottlenet_dim=200, z_dim=20))

    # build learning method
    learning_method = SGD(learning_rate=0.0001, momentum=0.9,
                              lr_decay_factor=0.9, decay_batch=10000)

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = None,
                               dataset = data,
                               train_cost = SGVB_bin,
                               valid_cost = SGVB_bin,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 10,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()
Ejemplo n.º 4
0
def train():

    # build dataset
    batch_size = 64
    data = Mnist(batch_size=batch_size, train_valid_test_ratio=[5,1,1])

    # build model
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    model.add(Linear(prev_dim=28*28, this_dim=200))
    model.add(RELU())
    model.add(Linear(prev_dim=200, this_dim=100))
    model.add(RELU())
    model.add(Dropout(0.5))
    model.add(Linear(prev_dim=100, this_dim=10))
    model.add(Softmax())

    # build learning method
    decay_batch = int(data.train.X.shape[0] * 2 / batch_size)
    learning_method = SGD(learning_rate=0.1, momentum=0.9,
                          lr_decay_factor=0.9, decay_batch=decay_batch)

    # Build Logger
    log = Log(experiment_name = 'MLP',
              description = 'This is a tutorial',
              save_outputs = True, # log all the outputs from the screen
              save_model = True, # save the best model
              save_epoch_error = True, # log error at every epoch
              save_to_database = {'name': 'Example.sqlite3',
                                  'records': {'Batch_Size': batch_size,
                                              'Learning_Rate': learning_method.learning_rate,
                                              'Momentum': learning_method.momentum}}
             ) # end log

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = log,
                               dataset = data,
                               train_cost = mse,
                               valid_cost = error,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 100,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()

    ypred = model.fprop(data.get_test().X)
    ypred = np.argmax(ypred, axis=1)
    y = np.argmax(data.get_test().y, axis=1)
    accuracy = np.equal(ypred, y).astype('f4').sum() / len(y)
    print 'test accuracy:', accuracy
Ejemplo n.º 5
0
def train():

    # build dataset
    data = Mnist(batch_size=64, train_valid_test_ratio=[5, 1, 1])
    # for autoencoder, the output will be equal to input
    data.set_train(X=data.get_train().X, y=data.get_train().X)
    data.set_valid(X=data.get_valid().X, y=data.get_valid().X)

    # build model
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    # build encoder
    model.add(Gaussian())
    encode_layer1 = Linear(prev_dim=28 * 28, this_dim=200)
    model.add(encode_layer1)
    model.add(RELU())
    encode_layer2 = Linear(prev_dim=200, this_dim=50)
    model.add(encode_layer2)
    model.add(Tanh())

    # build decoder
    decode_layer1 = Linear(prev_dim=50, this_dim=200, W=encode_layer2.W.T)
    model.add(decode_layer1)
    model.add(RELU())
    decode_layer2 = Linear(prev_dim=200, this_dim=28 * 28, W=encode_layer1.W.T)
    model.add(decode_layer2)
    model.add(Sigmoid())

    # build learning method
    learning_method = AdaGrad(learning_rate=0.01,
                              momentum=0.9,
                              lr_decay_factor=0.9,
                              decay_batch=10000)

    # put everything into the train object
    train_object = TrainObject(model=model,
                               log=None,
                               dataset=data,
                               train_cost=entropy,
                               valid_cost=entropy,
                               learning_method=learning_method,
                               stop_criteria={
                                   'max_epoch': 10,
                                   'epoch_look_back': 5,
                                   'percent_decrease': 0.01
                               })
    # finally run the code
    train_object.setup()
    train_object.run()
Ejemplo n.º 6
0
def train():

    # build dataset
    data = Mnist(batch_size=64, train_valid_test_ratio=[5,1,1])
    # for autoencoder, the output will be equal to input
    data.set_train(X=data.get_train().X, y=data.get_train().X)
    data.set_valid(X=data.get_valid().X, y=data.get_valid().X)

    # build model
    model = Sequential()
    # build encoder
    model.add(Gaussian(input_var=T.matrix()))
    encode_layer1 = Linear(prev_dim=28*28, this_dim=200)
    model.add(encode_layer1)
    model.add(RELU())
    encode_layer2 = Linear(prev_dim=200, this_dim=50)
    model.add(encode_layer2)
    model.add(Tanh())

    # build decoder
    decode_layer1 = Linear(prev_dim=50, this_dim=200, W=encode_layer2.W.T)
    model.add(decode_layer1)
    model.add(RELU())
    decode_layer2 = Linear(prev_dim=200, this_dim=28*28, W=encode_layer1.W.T)
    model.add(decode_layer2)
    model.add(Sigmoid())

    # build learning method
    learning_method = AdaGrad(learning_rate=0.01, momentum=0.9,
                              lr_decay_factor=0.9, decay_batch=10000)

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = None,
                               dataset = data,
                               train_cost = entropy,
                               valid_cost = entropy,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 10,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()
Ejemplo n.º 7
0
def train():

    # build dataset
    data = Mnist(batch_size=64, train_valid_test_ratio=[5,1,1])

    # build model
    model = Sequential()
    model.add(Linear(prev_dim=28*28, this_dim=200))
    model.add(RELU())
    model.add(Linear(prev_dim=200, this_dim=100))
    model.add(RELU())
    model.add(Dropout(0.5))
    model.add(Linear(prev_dim=100, this_dim=10))
    model.add(Softmax())

    # build learning method
    learning_method = AdaGrad(learning_rate=0.1, momentum=0.9,
                              lr_decay_factor=0.9, decay_batch=10000)

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = None,
                               dataset = data,
                               train_cost = mse,
                               valid_cost = error,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 10,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()

    ypred = model.fprop(data.get_test().X)
    ypred = np.argmax(ypred, axis=1)
    y = np.argmax(data.get_test().y, axis=1)
    print 'test accuracy:', accuracy_score(y, ypred)