def test_relu(self): model = NeuralNet(n_in=self.D, n_out=self.n_out, layers=[1000], activation_function='relu', dropout=True) optimizer = SGD(model, SimpleSGDUpdate, self.train_data, self.test_data, learning_rate_schedule=exponential_scheduler(1., .99), progress_monitor=SimpleProgressMonitor()) optimizer.run(20) self.assertLess(optimizer.progress_monitor.train_error[-1][1], optimizer.progress_monitor.train_error[0][1]) del model, optimizer
from hebel.schedulers import exponential_scheduler, linear_scheduler_up hebel.init(random_seed=0) # Initialize data providers train_data = MNISTDataProvider('train', batch_size=100) validation_data = MNISTDataProvider('val') test_data = MNISTDataProvider('test') D = train_data.D # Dimensionality of inputs K = 10 # Number of classes # Create model object model = NeuralNet(n_in=train_data.D, n_out=K, layers=[2000, 2000, 2000, 500], activation_function='relu', dropout=True, input_dropout=0.2) # Create optimizer object progress_monitor = ProgressMonitor(experiment_name='mnist', save_model_path='examples/mnist', save_interval=5, output_to_log=True) optimizer = SGD(model, MomentumUpdate, train_data, validation_data, progress_monitor, learning_rate_schedule=exponential_scheduler(5., .995),