Ejemplo n.º 1
0
 def test_nesterov_momentum(self):
     model = NeuralNet(n_in=self.D, n_out=self.n_out,
                       layers=[100], activation_function='relu',
                       dropout=True)
     optimizer = SGD(model, NesterovMomentumUpdate, self.train_data,
                     self.test_data,
                     learning_rate_schedule=exponential_scheduler(1., .99),
                     momentum_schedule=linear_scheduler_up(.5, .9, 5),
                     progress_monitor=SimpleProgressMonitor())
     optimizer.run(20)
     self.assertLess(optimizer.progress_monitor.train_error[-1][1],
                     optimizer.progress_monitor.train_error[0][1])
     del model, optimizer
Ejemplo n.º 2
0
 def test_nesterov_momentum(self):
     model = NeuralNet(n_in=self.D, n_out=self.n_out,
                       layers=[100], activation_function='relu',
                       dropout=True)
     optimizer = SGD(model, NesterovMomentumUpdate, self.train_data,
                     self.test_data,
                     learning_rate_schedule=exponential_scheduler(1., .99),
                     momentum_schedule=linear_scheduler_up(.5, .9, 5),
                     progress_monitor=SimpleProgressMonitor())
     optimizer.run(20)
     self.assertLess(optimizer.progress_monitor.train_error[-1][1],
                     optimizer.progress_monitor.train_error[0][1])
     del model, optimizer
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D                        # Dimensionality of inputs 
K = 10                                  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D, n_out=K,
                  layers=[1000, 500, 500],
                  activation_function='relu',
                  dropout=True)

# Create optimizer object
progress_monitor = ProgressMonitor(
    experiment_name='mnist',
    save_model_path='examples/mnist',
    save_interval=5,
    output_to_log=True)

optimizer = SGD(model, MomentumUpdate, train_data, validation_data,
                learning_rate_schedule=exponential_scheduler(1., .995),
                momentum_schedule=linear_scheduler_up(.5, .9, 10))

# Run model
optimizer.run(100)

# Evaulate error on test set
test_error = 0
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
K = 10  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D,
                  n_out=K,
                  layers=[2000, 2000, 2000, 500],
                  activation_function='relu',
                  dropout=True,
                  input_dropout=0.2)

# Create optimizer object
progress_monitor = ProgressMonitor(experiment_name='mnist',
                                   save_model_path='examples/mnist',
                                   save_interval=5,
                                   output_to_log=True)

optimizer = SGD(model,
                MomentumUpdate,
                train_data,
                validation_data,
                progress_monitor,
                learning_rate_schedule=exponential_scheduler(5., .995),
                momentum_schedule=linear_scheduler_up(.1, .9, 100))

# Run model
optimizer.run(50)

# Evaulate error on test set
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
from hebel.monitors import SimpleProgressMonitor
from hebel.schedulers import exponential_scheduler, linear_scheduler_up

# Initialize data providers
train_data = MNISTDataProvider('train', batch_size=100)
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D                        # Dimensionality of inputs 
K = 10                                  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D, n_out=K,
                  layers=[2000, 2000, 2000, 500],
                  activation_function='relu',
                  dropout=True, input_dropout=0.2)

# Create optimizer object
progress_monitor = SimpleProgressMonitor()

optimizer = SGD(model, MomentumUpdate, train_data, validation_data,
                learning_rate_schedule=exponential_scheduler(5., .995),
                momentum_schedule=linear_scheduler_up(.1, .9, 100))

# Run model
optimizer.run(5)

# Evaulate error on test set
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
Ejemplo n.º 6
0
D = train_data.D  # Dimensionality of inputs
K = 10  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D,
                  n_out=K,
                  layers=[1000, 500, 500],
                  activation_function='relu',
                  dropout=True)

# Create optimizer object
progress_monitor = ProgressMonitor(experiment_name='mnist',
                                   save_model_path='examples/mnist',
                                   save_interval=5,
                                   output_to_log=True)

optimizer = SGD(model,
                MomentumUpdate,
                train_data,
                validation_data,
                learning_rate_schedule=exponential_scheduler(1., .995),
                momentum_schedule=linear_scheduler_up(.5, .9, 10))

# Run model
optimizer.run(100)

# Evaulate error on test set
test_error = 0
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error
Ejemplo n.º 7
0
model = NeuralNet(
    n_in=train_data.D,
    n_out=K,
    layers=[2000, 2000, 2000, 500],
    activation_function="relu",
    dropout=True,
    input_dropout=0.2,
)

# Create optimizer object
progress_monitor = ProgressMonitor(
    experiment_name="mnist", save_model_path="examples/mnist", save_interval=5, output_to_log=True
)

optimizer = SGD(
    model,
    MomentumUpdate,
    train_data,
    validation_data,
    progress_monitor,
    learning_rate_schedule=exponential_scheduler(5.0, 0.995),
    momentum_schedule=linear_scheduler_up(0.1, 0.9, 100),
)

# Run model
optimizer.run(50)

# Evaulate error on test set
test_error = model.test_error(test_data)
print "Error on test set: %.3f" % test_error