def test_lambda_lr(self, lr_mock):
        state = {torchbearer.OPTIMIZER: 'optimizer'}

        scheduler = StepLR(10, gamma=0.4, last_epoch=-4, step_on_batch='batch')
        scheduler.on_start(state)

        lr_mock.assert_called_once_with('optimizer', 10, gamma=0.4, last_epoch=-4)
        self.assertTrue(scheduler._step_on_batch == 'batch')
    def test_lambda_lr(self, lr_mock):
        state = {torchbearer.OPTIMIZER: 'optimizer', torchbearer.EPOCH: 0}

        scheduler = StepLR(step_size=10, gamma=0.4, step_on_batch=True)
        scheduler.on_start(state)

        lr_mock.assert_called_once_with('optimizer',
                                        step_size=10,
                                        gamma=0.4,
                                        last_epoch=-1)
        self.assertTrue(scheduler._step_on_batch)
testset = SVHN(".", split='test', download=True, transform=transform)
valset = SVHN(".", split='extra', download=True, transform=transform)

# create data loaders
trainloader = DataLoader(trainset, batch_size=128, shuffle=True)
testloader = DataLoader(testset, batch_size=128, shuffle=True)
valloader = DataLoader(valset, batch_size=128, shuffle=True)

# build the model
model = SimpleCNN()

# define the loss function and the optimiser
loss_function = nn.CrossEntropyLoss()
live_loss_plot = LiveLossPlot(draw_once=True)
optimiser = optim.Adam(model.parameters(), lr=0.001)
scheduler = StepLR(step_size=10, gamma=0.5)

device = "cuda:0" if torch.cuda.is_available() else "cpu"
trial = Trial(model, optimiser, loss_function, callbacks=[scheduler, live_loss_plot], metrics=['loss', 'accuracy']).to(device)
trial.with_generators(trainloader, val_generator=valloader, test_generator=testloader)
history = trial.run(verbose=1, epochs=30)#

results = trial.evaluate(data_key=torchbearer.TEST_DATA)
print(results)

# build the model
model = SimpleCNN()

# define the loss function and the optimiser
loss_function = nn.CrossEntropyLoss()
live_loss_plot = LiveLossPlot(draw_once=True)