Пример #1
0
 def test_relu(self):
     model = NeuralNet(n_in=self.D, n_out=self.n_out,
                       layers=[1000], activation_function='relu',
                       dropout=True)
     optimizer = SGD(model, SimpleSGDUpdate, self.train_data,
                     self.test_data,
                     learning_rate_schedule=exponential_scheduler(1., .99),
                     progress_monitor=SimpleProgressMonitor())
     optimizer.run(20)
     self.assertLess(optimizer.progress_monitor.train_error[-1][1],
                     optimizer.progress_monitor.train_error[0][1])
     del model, optimizer
from hebel.parameter_updaters import MomentumUpdate
from hebel.data_providers import MNISTDataProvider
from hebel.monitors import ProgressMonitor
from hebel.schedulers import exponential_scheduler, linear_scheduler_up

# Initialize data providers
train_data = MNISTDataProvider('train', batch_size=100)
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D                        # Dimensionality of inputs 
K = 10                                  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D, n_out=K,
                  layers=[1000, 500, 500],
                  activation_function='relu',
                  dropout=True)

# Create optimizer object
progress_monitor = ProgressMonitor(
    experiment_name='mnist',
    save_model_path='examples/mnist',
    save_interval=5,
    output_to_log=True)

optimizer = SGD(model, MomentumUpdate, train_data, validation_data,
                learning_rate_schedule=exponential_scheduler(1., .995),
                momentum_schedule=linear_scheduler_up(.5, .9, 10))

# Run model
optimizer.run(100)
Пример #3
0
from hebel.parameter_updaters import MomentumUpdate
from hebel.data_providers import MNISTDataProvider
from hebel.monitors import ProgressMonitor
from hebel.schedulers import exponential_scheduler, linear_scheduler_up

# Initialize data providers
train_data = MNISTDataProvider('train', batch_size=100)
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D                        # Dimensionality of inputs 
K = 10                                  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D, n_out=K,
                  layers=[1000, 500, 500],
                  activation_function='relu',
                  dropout=True)

# Create optimizer object
optimizer = SGD(model, MomentumUpdate, train_data, validation_data,
                learning_rate_schedule=exponential_scheduler(1., .995),
                momentum_schedule=linear_scheduler_up(.5, .9, 10))

# Run model
optimizer.run(100)

# Evaulate error on test set
test_error = 0
for batch_data, batch_targets in test_data:
    test_error += model.test_error(batch_data, batch_targets, average=False)
test_error /= float(test_data.N)
from hebel.schedulers import exponential_scheduler, linear_scheduler_up

hebel.init(random_seed=0)

# Initialize data providers
train_data = MNISTDataProvider('train', batch_size=100)
validation_data = MNISTDataProvider('val')
test_data = MNISTDataProvider('test')

D = train_data.D  # Dimensionality of inputs
K = 10  # Number of classes

# Create model object
model = NeuralNet(n_in=train_data.D,
                  n_out=K,
                  layers=[2000, 2000, 2000, 500],
                  activation_function='relu',
                  dropout=True,
                  input_dropout=0.2)

# Create optimizer object
progress_monitor = ProgressMonitor(experiment_name='mnist',
                                   save_model_path='examples/mnist',
                                   save_interval=5,
                                   output_to_log=True)

optimizer = SGD(model,
                MomentumUpdate,
                train_data,
                validation_data,
                progress_monitor,
                learning_rate_schedule=exponential_scheduler(5., .995),