def main():
    import numpy as np
    import pycuda.autoinit
    from pycuda import gpuarray
    from skdata import toy
    from hebel import memory_pool
    from hebel.data_providers import BatchDataProvider
    from hebel.models import NeuralNetRegression
    from hebel.optimizers import SGD
    from hebel.parameter_updaters import SimpleSGDUpdate
    from hebel.monitors import SimpleProgressMonitor
    from hebel.schedulers import exponential_scheduler

    # Get data
    data_cpu, targets_cpu = toy.Boston().regression_task()
    data = gpuarray.to_gpu(data_cpu.astype(np.float32), allocator=memory_pool.allocate)
    targets = gpuarray.to_gpu(targets_cpu.astype(np.float32), allocator=memory_pool.allocate)
    data_provider = BatchDataProvider(data, targets)

    # Create model object
    model = NeuralNetRegression(n_in=data_cpu.shape[1], n_out=targets_cpu.shape[1],
                                layers=[100], activation_function='relu')
    
    # Create optimizer object
    optimizer = SGD(model, SimpleSGDUpdate, data_provider, data_provider,
                    learning_rate_schedule=exponential_scheduler(.1, .9999),
                    early_stopping=True)
    optimizer.run(3000)
Exemplo n.º 2
0
 def test_relu(self):
     model = NeuralNet(n_in=self.D, n_out=self.n_out,
                       layers=[1000], activation_function='relu',
                       dropout=True)
     optimizer = SGD(model, SimpleSGDUpdate, self.train_data,
                     self.test_data,
                     learning_rate_schedule=exponential_scheduler(1., .99),
                     progress_monitor=SimpleProgressMonitor())
     optimizer.run(20)
     self.assertLess(optimizer.progress_monitor.train_error[-1][1],
                     optimizer.progress_monitor.train_error[0][1])
     del model, optimizer
Exemplo n.º 3
0
 def test_relu(self):
     model = NeuralNet(n_in=self.D, n_out=self.n_out,
                       layers=[1000], activation_function='relu',
                       dropout=True)
     optimizer = SGD(model, SimpleSGDUpdate, self.train_data,
                     self.test_data,
                     learning_rate_schedule=exponential_scheduler(1., .99),
                     progress_monitor=SimpleProgressMonitor())
     optimizer.run(20)
     self.assertLess(optimizer.progress_monitor.train_error[-1][1],
                     optimizer.progress_monitor.train_error[0][1])
     del model, optimizer
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D                        # Dimensionality of inputs 
K = 10                                  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D, n_out=K,
                  layers=[1000, 500, 500],
                  activation_function='relu',
                  dropout=True)

# Create optimizer object
progress_monitor = ProgressMonitor(
    experiment_name='mnist',
    save_model_path='examples/mnist',
    save_interval=5,
    output_to_log=True)

optimizer = SGD(model, MomentumUpdate, train_data, validation_data,
                learning_rate_schedule=exponential_scheduler(1., .995),
                momentum_schedule=linear_scheduler_up(.5, .9, 10))

# Run model
optimizer.run(100)

# Evaulate error on test set
test_error = 0
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
K = 10  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D,
                  n_out=K,
                  layers=[2000, 2000, 2000, 500],
                  activation_function='relu',
                  dropout=True,
                  input_dropout=0.2)

# Create optimizer object
progress_monitor = ProgressMonitor(experiment_name='mnist',
                                   save_model_path='examples/mnist',
                                   save_interval=5,
                                   output_to_log=True)

optimizer = SGD(model,
                MomentumUpdate,
                train_data,
                validation_data,
                progress_monitor,
                learning_rate_schedule=exponential_scheduler(5., .995),
                momentum_schedule=linear_scheduler_up(.1, .9, 100))

# Run model
optimizer.run(50)

# Evaulate error on test set
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
Exemplo n.º 6
0
model = NeuralNet(
    n_in=train_data.D,
    n_out=K,
    layers=[2000, 2000, 2000, 500],
    activation_function="relu",
    dropout=True,
    input_dropout=0.2,
)

# Create optimizer object
progress_monitor = ProgressMonitor(
    experiment_name="mnist", save_model_path="examples/mnist", save_interval=5, output_to_log=True
)

optimizer = SGD(
    model,
    MomentumUpdate,
    train_data,
    validation_data,
    progress_monitor,
    learning_rate_schedule=exponential_scheduler(5.0, 0.995),
    momentum_schedule=linear_scheduler_up(0.1, 0.9, 100),
)

# Run model
optimizer.run(50)

# Evaulate error on test set
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error