def test_simulate_lrs_batch_step(self, policy): lr_sch = LRScheduler(policy, base_lr=1, max_lr=5, step_size_up=4, step_every='batch') lrs = lr_sch.simulate(11, 1) expected = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]) assert np.allclose(expected, lrs)
def test_lr_scheduler_record_epoch_step(self, classifier_module, classifier_data, policy, kwargs): epochs = 3 scheduler = LRScheduler(policy, **kwargs) lrs = scheduler.simulate(epochs, initial_lr=123.) net = NeuralNetClassifier(classifier_module, max_epochs=epochs, lr=123., callbacks=[('scheduler', scheduler)]) net.fit(*classifier_data) assert np.all(net.history[:, 'event_lr'] == lrs)
def test_lr_scheduler_record_batch_step(self, classifier_module, classifier_data): X, y = classifier_data batch_size = 128 scheduler = LRScheduler(TorchCyclicLR, base_lr=1, max_lr=5, step_size_up=4) net = NeuralNetClassifier(classifier_module, max_epochs=1, lr=123., batch_size=batch_size, callbacks=[('scheduler', scheduler)]) net.fit(X, y) new_lrs = scheduler.simulate( net.history[-1, 'train_batch_count'], initial_lr=123., ) assert np.all(net.history[-1, 'batches', :, 'event_lr'] == new_lrs)
def test_simulate_lrs_batch_step(self): lr_policy = LRScheduler(CyclicLR, base_lr=1, max_lr=5, step_size_up=4) lrs = lr_policy.simulate(11, 1) expected = np.array([1, 2, 3, 4, 5, 4, 3, 2, 1, 2, 3]) assert np.allclose(expected, lrs)
def test_simulate_lrs_epoch_step(self): lr_policy = LRScheduler(StepLR, step_size=2) lrs = lr_policy.simulate(6, 1) expected = np.array([1.0, 1.0, 0.1, 0.1, 0.01, 0.01]) assert np.allclose(expected, lrs)