def test_poutyne():
    callback = PlotLossesPoutyne(outputs=(CheckOutput(), ))
    network = Network()
    optimizer = optim.Adam(params=network.parameters(), lr=0.001)
    loss_fn = nn.CrossEntropyLoss()

    train_dataloader = get_random_data()

    model = Model(network, optimizer, loss_fn)
    model.fit_generator(train_dataloader, epochs=2, callbacks=[callback])
Example #2
0
class LRSchedulersTest(TestCase):
    batch_size = 20
    epochs = 10
    steps_per_epoch = 5

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.train_gen = some_data_generator(20)
        self.valid_gen = some_data_generator(20)

    def test_lambda_lr_integration(self):
        my_lambda = lambda epoch: 0.95**epoch
        lambda_lr = LambdaLR(lr_lambda=[my_lambda])
        self._fit_with_callback_integration(lambda_lr)

    def test_step_lr_integration(self):
        step_lr = StepLR(step_size=3)
        self._fit_with_callback_integration(step_lr)

    def test_multistep_lr_integration(self):
        multistep_lr = MultiStepLR(milestones=[2, 5, 7])
        self._fit_with_callback_integration(multistep_lr)

    def test_exponential_lr_integration(self):
        exponential_lr = ExponentialLR(gamma=0.01)
        self._fit_with_callback_integration(exponential_lr)

    def test_cosine_annealing_lr_integration(self):
        cosine_annealing_lr = CosineAnnealingLR(T_max=8)
        self._fit_with_callback_integration(cosine_annealing_lr)

    def test_reduce_lr_on_plateau_integration(self):
        reduce_lr = ReduceLROnPlateau(monitor='loss', patience=3)
        self._fit_with_callback_integration(reduce_lr)

    def _fit_with_callback_integration(self, callback):
        self.model.fit_generator(
            self.train_gen,
            self.valid_gen,
            epochs=LRSchedulersTest.epochs,
            steps_per_epoch=LRSchedulersTest.steps_per_epoch,
            callbacks=[callback],
        )

    def test_exception_is_thrown_on_optimizer_argument(self):
        with self.assertRaises(ValueError):
            StepLR(self.optimizer, step_size=3)
Example #3
0
    def test_correct_optim_calls_1_batch_per_step(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)

        mocked_optimizer = some_mocked_optimizer()
        mocked_optim_model = Model(self.pytorch_network,
                                   mocked_optimizer,
                                   self.loss_function,
                                   batch_metrics=self.batch_metrics,
                                   epoch_metrics=self.epoch_metrics)
        mocked_optim_model.fit_generator(train_generator,
                                         None,
                                         epochs=1,
                                         steps_per_epoch=1,
                                         batches_per_step=1)

        self.assertEqual(1, mocked_optimizer.step.call_count)
        self.assertEqual(1, mocked_optimizer.zero_grad.call_count)
Example #4
0
 def test_metrics_integration(self):
     num_steps = 10
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   batch_metrics=[F.mse_loss])
     train_generator = some_data_tensor_generator(ModelTest.batch_size)
     valid_generator = some_data_tensor_generator(ModelTest.batch_size)
     model.fit_generator(train_generator,
                         valid_generator,
                         epochs=ModelTest.epochs,
                         steps_per_epoch=ModelTest.steps_per_epoch,
                         validation_steps=ModelTest.steps_per_epoch,
                         callbacks=[self.mock_callback])
     generator = some_data_tensor_generator(ModelTest.batch_size)
     loss, mse = model.evaluate_generator(generator, steps=num_steps)
     self.assertEqual(type(loss), float)
     self.assertEqual(type(mse), float)
Example #5
0
    def test_correct_optim_calls__valid_n_batches_per_step(self):
        n_batches = 5
        items_per_batch = int(ModelTest.batch_size / n_batches)

        x = torch.rand(n_batches, items_per_batch, 1)
        y = torch.rand(n_batches, items_per_batch, 1)

        mocked_optimizer = some_mocked_optimizer()
        mocked_optim_model = Model(self.pytorch_network,
                                   mocked_optimizer,
                                   self.loss_function,
                                   batch_metrics=self.batch_metrics,
                                   epoch_metrics=self.epoch_metrics)
        mocked_optim_model.fit_generator(list(zip(x, y)),
                                         None,
                                         epochs=1,
                                         batches_per_step=n_batches)

        self.assertEqual(1, mocked_optimizer.step.call_count)
        self.assertEqual(1, mocked_optimizer.zero_grad.call_count)
Example #6
0
class BaseTensorBoardLoggerTest:
    SummaryWriter = None
    batch_size = 20
    lr = 1e-3
    num_epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=BaseTensorBoardLoggerTest.lr)
        self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        # pylint: disable=not-callable
        self.writer = self.SummaryWriter(self.temp_dir_obj.name)
        self.writer.add_scalars = MagicMock()

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_logging(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = TensorBoardLogger(self.writer)
        history = self.model.fit_generator(
            train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
        )
        self._test_logging(history)

    def test_multiple_learning_rates(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = TensorBoardLogger(self.writer)
        lrs = [BaseCSVLoggerTest.lr, BaseCSVLoggerTest.lr / 2]
        optimizer = torch.optim.SGD(
            [dict(params=[self.pytorch_network.weight], lr=lrs[0]), dict(params=[self.pytorch_network.bias], lr=lrs[1])]
        )
        model = Model(self.pytorch_network, optimizer, self.loss_function)
        history = model.fit_generator(
            train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
        )
        self._test_logging(history, lrs=lrs)

    def _test_logging(self, history, lrs=None):
        if lrs is None:
            lrs = [BaseCSVLoggerTest.lr]
        calls = []
        for h in history:
            calls.append(call('loss', {'loss': h['loss'], 'val_loss': h['val_loss']}, h['epoch']))
            if len(lrs) == 1:
                calls.append(call('lr', {'lr': self.lr}, h['epoch']))
            else:
                calls.append(call('lr', {f'lr_group_{i}': lr for i, lr in enumerate(lrs)}, h['epoch']))
        self.writer.add_scalars.assert_has_calls(calls, any_order=True)
Example #7
0
 def test_multiple_learning_rates(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = self.CSVLogger(self.csv_filename)
     lrs = [BaseCSVLoggerTest.lr, BaseCSVLoggerTest.lr / 2]
     optimizer = torch.optim.SGD(
         [dict(params=[self.pytorch_network.weight], lr=lrs[0]), dict(params=[self.pytorch_network.bias], lr=lrs[1])]
     )
     model = Model(self.pytorch_network, optimizer, self.loss_function)
     history = model.fit_generator(
         train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
     )
     self._test_logging(history, lrs=lrs)
Example #8
0
 def test_epoch_metrics_integration(self):
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   epoch_metrics=[SomeEpochMetric()])
     train_generator = some_data_tensor_generator(ModelTest.batch_size)
     valid_generator = some_data_tensor_generator(ModelTest.batch_size)
     logs = model.fit_generator(train_generator,
                                valid_generator,
                                epochs=1,
                                steps_per_epoch=ModelTest.steps_per_epoch,
                                validation_steps=ModelTest.steps_per_epoch)
     actual_value = logs[-1]['some_epoch_metric']
     val_actual_value = logs[-1]['val_some_epoch_metric']
     expected_value = 5
     self.assertEqual(val_actual_value, expected_value)
     self.assertEqual(actual_value, expected_value)
Example #9
0
class BaseTensorBoardLoggerTest:
    SummaryWriter = None
    batch_size = 20
    lr = 1e-3
    num_epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=BaseTensorBoardLoggerTest.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        # pylint: disable=not-callable
        self.writer = self.SummaryWriter(self.temp_dir_obj.name)
        self.writer.add_scalars = MagicMock()

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_logging(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = TensorBoardLogger(self.writer)
        history = self.model.fit_generator(train_gen,
                                           valid_gen,
                                           epochs=self.num_epochs,
                                           steps_per_epoch=5,
                                           callbacks=[logger])
        self._test_logging(history)

    def _test_logging(self, history):
        calls = list()
        for h in history:
            calls.append(
                call('loss', {
                    'loss': h['loss'],
                    'val_loss': h['val_loss']
                }, h['epoch']))
            calls.append(call('lr', {'lr': self.lr}, h['epoch']))
        self.writer.add_scalars.assert_has_calls(calls, any_order=True)
Example #10
0
class EarlyStoppingTest(TestCase):
    batch_size = 20

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)

    def test_integration(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=2, verbose=False)
        self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[earlystopper])

    def test_early_stopping_patience_of_1(self):
        earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=False)

        val_losses = [8, 4, 5, 2]
        early_stop_epoch = 3
        self._test_early_stopping(earlystopper, val_losses, early_stop_epoch)

    def test_early_stopping_with_delta(self):
        earlystopper = EarlyStopping(monitor='val_loss', min_delta=3, patience=2, verbose=False)

        val_losses = [8, 4, 5, 2, 2]
        early_stop_epoch = 4
        self._test_early_stopping(earlystopper, val_losses, early_stop_epoch)

    def test_early_stopping_with_max(self):
        earlystopper = EarlyStopping(monitor='val_loss', mode='max', min_delta=0, patience=2, verbose=False)

        val_losses = [2, 8, 4, 5, 2]
        early_stop_epoch = 4
        self._test_early_stopping(earlystopper, val_losses, early_stop_epoch)

    def _test_early_stopping(self, earlystopper, val_losses, early_stop_epoch):
        generator = some_data_generator(EarlyStoppingTest.batch_size)

        self.model.stop_training = False

        earlystopper.set_params({'epochs': len(val_losses), 'steps': 1})
        earlystopper.set_model(self.model)
        earlystopper.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            earlystopper.on_epoch_begin(epoch, {})
            earlystopper.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            earlystopper.on_train_batch_end(1, {'batch': 1, 'size': EarlyStoppingTest.batch_size, 'loss': loss})
            earlystopper.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': val_loss})
            self.assertEqual(self.model.stop_training, epoch == early_stop_epoch)
            if epoch == early_stop_epoch:
                break

        earlystopper.on_train_end({})

    def _update_model(self, generator):
        self.pytorch_network.zero_grad()

        x, y = next(generator)
        pred_y = self.pytorch_network(x)
        loss = self.loss_function(pred_y, y)
        loss.backward()

        self.optimizer.step()

        return float(loss)
Example #11
0
class ModelTest(ModelFittingTestCase):
    # pylint: disable=too-many-public-methods

    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.Adam(self.pytorch_network.parameters(),
                                          lr=1e-3)
        self.batch_metrics = [
            some_batch_metric_1, ('custom_name', some_batch_metric_2),
            repeat_batch_metric, repeat_batch_metric
        ]
        self.batch_metrics_names = [
            'some_batch_metric_1', 'custom_name', 'repeat_batch_metric1',
            'repeat_batch_metric2'
        ]
        self.batch_metrics_values = [
            some_metric_1_value, some_metric_2_value,
            repeat_batch_metric_value, repeat_batch_metric_value
        ]
        self.epoch_metrics = [SomeConstantEpochMetric()]
        self.epoch_metrics_names = ['some_constant_epoch_metric']
        self.epoch_metrics_values = [some_constant_epoch_metric_value]

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)

    def test_fitting_tensor_generator(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)
        valid_generator = some_data_tensor_generator(ModelTest.batch_size)
        logs = self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTest.epochs,
            steps_per_epoch=ModelTest.steps_per_epoch,
            validation_steps=ModelTest.steps_per_epoch,
            callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': ModelTest.steps_per_epoch,
            'valid_steps': ModelTest.steps_per_epoch
        }
        self._test_callbacks_train(params,
                                   logs,
                                   valid_steps=ModelTest.steps_per_epoch)

    def test_fitting_without_valid_generator(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)
        logs = self.model.fit_generator(
            train_generator,
            None,
            epochs=ModelTest.epochs,
            steps_per_epoch=ModelTest.steps_per_epoch,
            callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': ModelTest.steps_per_epoch
        }
        self._test_callbacks_train(params, logs, has_valid=False)

    def test_correct_optim_calls_1_batch_per_step(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)

        mocked_optimizer = some_mocked_optimizer()
        mocked_optim_model = Model(self.pytorch_network,
                                   mocked_optimizer,
                                   self.loss_function,
                                   batch_metrics=self.batch_metrics,
                                   epoch_metrics=self.epoch_metrics)
        mocked_optim_model.fit_generator(train_generator,
                                         None,
                                         epochs=1,
                                         steps_per_epoch=1,
                                         batches_per_step=1)

        self.assertEqual(1, mocked_optimizer.step.call_count)
        self.assertEqual(1, mocked_optimizer.zero_grad.call_count)

    def test_correct_optim_calls__valid_n_batches_per_step(self):
        n_batches = 5
        items_per_batch = int(ModelTest.batch_size / n_batches)

        x = torch.rand(n_batches, items_per_batch, 1)
        y = torch.rand(n_batches, items_per_batch, 1)

        mocked_optimizer = some_mocked_optimizer()
        mocked_optim_model = Model(self.pytorch_network,
                                   mocked_optimizer,
                                   self.loss_function,
                                   batch_metrics=self.batch_metrics,
                                   epoch_metrics=self.epoch_metrics)
        mocked_optim_model.fit_generator(list(zip(x, y)),
                                         None,
                                         epochs=1,
                                         batches_per_step=n_batches)

        self.assertEqual(1, mocked_optimizer.step.call_count)
        self.assertEqual(1, mocked_optimizer.zero_grad.call_count)

    def test_fitting_generator_n_batches_per_step(self):
        total_batch_size = 6

        x = torch.rand(1, total_batch_size, 1)
        y = torch.rand(1, total_batch_size, 1)

        initial_params = self.model.get_weight_copies()

        self.model.fit_generator(list(zip(x, y)),
                                 None,
                                 epochs=1,
                                 batches_per_step=1)

        expected_params = self.model.get_weight_copies()

        for mini_batch_size in [1, 2, 5]:
            self.model.set_weights(initial_params)

            n_batches_per_step = int(total_batch_size / mini_batch_size)

            x.resize_((n_batches_per_step, mini_batch_size, 1))
            y.resize_((n_batches_per_step, mini_batch_size, 1))

            self.model.fit_generator(list(zip(x, y)),
                                     None,
                                     epochs=1,
                                     batches_per_step=n_batches_per_step)

            returned_params = self.model.get_weight_copies()

            self.assertEqual(returned_params.keys(), expected_params.keys())
            for k in expected_params.keys():
                np.testing.assert_almost_equal(returned_params[k].numpy(),
                                               expected_params[k].numpy(),
                                               decimal=4)

    def test_fitting_generator_n_batches_per_step_higher_than_num_batches(
            self):
        total_batch_size = 6

        x = torch.rand(1, total_batch_size, 1)
        y = torch.rand(1, total_batch_size, 1)

        initial_params = self.model.get_weight_copies()

        self.model.fit_generator(list(zip(x, y)),
                                 None,
                                 epochs=1,
                                 batches_per_step=1)

        expected_params = self.model.get_weight_copies()

        self.model.set_weights(initial_params)

        self.model.fit_generator(list(zip(x, y)),
                                 None,
                                 epochs=1,
                                 batches_per_step=2)

        returned_params = self.model.get_weight_copies()

        self.assertEqual(returned_params.keys(), expected_params.keys())
        for k in expected_params.keys():
            np.testing.assert_almost_equal(returned_params[k].numpy(),
                                           expected_params[k].numpy(),
                                           decimal=4)

    def test_fitting_generator_n_batches_per_step_uneven_batches(self):
        total_batch_size = 6

        x = torch.rand(1, total_batch_size, 1)
        y = torch.rand(1, total_batch_size, 1)

        initial_params = self.model.get_weight_copies()

        self.model.fit_generator(list(zip(x, y)),
                                 None,
                                 epochs=1,
                                 batches_per_step=1)

        expected_params = self.model.get_weight_copies()

        x.squeeze_(dim=0)
        y.squeeze_(dim=0)

        uneven_chunk_sizes = [4, 5]

        for chunk_size in uneven_chunk_sizes:
            self.model.set_weights(initial_params)

            splitted_x = x.split(chunk_size)
            splitted_y = y.split(chunk_size)

            n_batches_per_step = ceil(total_batch_size / chunk_size)

            self.model.fit_generator(list(zip(splitted_x, splitted_y)),
                                     None,
                                     epochs=1,
                                     batches_per_step=n_batches_per_step)

            returned_params = self.model.get_weight_copies()

            self.assertEqual(returned_params.keys(), expected_params.keys())
            for k in expected_params.keys():
                np.testing.assert_almost_equal(returned_params[k].numpy(),
                                               expected_params[k].numpy(),
                                               decimal=4)

    def test_fitting_ndarray_generator(self):
        train_generator = some_ndarray_generator(ModelTest.batch_size)
        valid_generator = some_ndarray_generator(ModelTest.batch_size)
        logs = self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTest.epochs,
            steps_per_epoch=ModelTest.steps_per_epoch,
            validation_steps=ModelTest.steps_per_epoch,
            callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': ModelTest.steps_per_epoch,
            'valid_steps': ModelTest.steps_per_epoch
        }
        self._test_callbacks_train(params,
                                   logs,
                                   valid_steps=ModelTest.steps_per_epoch)

    def test_fitting_with_data_loader(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = torch.rand(train_size, 1)
        train_y = torch.rand(train_size, 1)
        train_dataset = TensorDataset(train_x, train_y)
        train_generator = DataLoader(train_dataset, train_batch_size)

        valid_real_steps_per_epoch = 10
        valid_batch_size = 15
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = torch.rand(valid_size, 1)
        valid_y = torch.rand(valid_size, 1)
        valid_dataset = TensorDataset(valid_x, valid_y)
        valid_generator = DataLoader(valid_dataset, valid_batch_size)

        logs = self.model.fit_generator(train_generator,
                                        valid_generator,
                                        epochs=ModelTest.epochs,
                                        steps_per_epoch=None,
                                        validation_steps=None,
                                        callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': train_real_steps_per_epoch,
            'valid_steps': valid_real_steps_per_epoch
        }
        self._test_callbacks_train(params, logs)

    def test_fitting_generator_calls(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = torch.rand(train_size, 1)
        train_y = torch.rand(train_size, 1)
        train_dataset = TensorDataset(train_x, train_y)
        train_generator = DataLoader(train_dataset, train_batch_size)

        valid_real_steps_per_epoch = 10
        valid_batch_size = 15
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = torch.rand(valid_size, 1)
        valid_y = torch.rand(valid_size, 1)
        valid_dataset = TensorDataset(valid_x, valid_y)
        valid_generator = DataLoader(valid_dataset, valid_batch_size)

        mock_train_generator = IterableMock(train_generator)
        mock_valid_generator = IterableMock(valid_generator)
        self.model.fit_generator(mock_train_generator,
                                 mock_valid_generator,
                                 epochs=ModelTest.epochs)
        expected_train_calls = ['__len__'] + \
                               (['__iter__'] + ['__next__'] * train_real_steps_per_epoch) * ModelTest.epochs
        expected_valid_calls = ['__len__'] + \
                               (['__iter__'] + ['__next__'] * valid_real_steps_per_epoch) * ModelTest.epochs
        self.assertEqual(mock_train_generator.calls, expected_train_calls)
        self.assertEqual(mock_valid_generator.calls, expected_valid_calls)

    def test_fitting_generator_calls_with_longer_validation_set(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = torch.rand(train_size, 1)
        train_y = torch.rand(train_size, 1)
        train_dataset = TensorDataset(train_x, train_y)
        train_generator = DataLoader(train_dataset, train_batch_size)

        valid_real_steps_per_epoch = 40
        valid_batch_size = 15
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = torch.rand(valid_size, 1)
        valid_y = torch.rand(valid_size, 1)
        valid_dataset = TensorDataset(valid_x, valid_y)
        valid_generator = DataLoader(valid_dataset, valid_batch_size)

        mock_train_generator = IterableMock(train_generator)
        mock_valid_generator = IterableMock(valid_generator)
        self.model.fit_generator(mock_train_generator,
                                 mock_valid_generator,
                                 epochs=ModelTest.epochs)
        expected_train_calls = ['__len__'] + \
                               (['__iter__'] + ['__next__'] * train_real_steps_per_epoch) * ModelTest.epochs
        expected_valid_calls = ['__len__'] + \
                               (['__iter__'] + ['__next__'] * valid_real_steps_per_epoch) * ModelTest.epochs
        self.assertEqual(mock_train_generator.calls, expected_train_calls)
        self.assertEqual(mock_valid_generator.calls, expected_valid_calls)

    def test_fitting_with_tensor(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = torch.rand(train_size, 1)
        train_y = torch.rand(train_size, 1)

        valid_real_steps_per_epoch = 10
        # valid_batch_size will be the same as train_batch_size in the fit method.
        valid_batch_size = train_batch_size
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = torch.rand(valid_size, 1)
        valid_y = torch.rand(valid_size, 1)

        logs = self.model.fit(train_x,
                              train_y,
                              validation_data=(valid_x, valid_y),
                              epochs=ModelTest.epochs,
                              batch_size=train_batch_size,
                              steps_per_epoch=None,
                              validation_steps=None,
                              callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': train_real_steps_per_epoch,
            'valid_steps': valid_real_steps_per_epoch
        }
        self._test_callbacks_train(params, logs)

    def test_fitting_with_np_array(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = np.random.rand(train_size, 1).astype(np.float32)
        train_y = np.random.rand(train_size, 1).astype(np.float32)

        valid_real_steps_per_epoch = 10
        # valid_batch_size will be the same as train_batch_size in the fit method.
        valid_batch_size = train_batch_size
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = np.random.rand(valid_size, 1).astype(np.float32)
        valid_y = np.random.rand(valid_size, 1).astype(np.float32)

        logs = self.model.fit(train_x,
                              train_y,
                              validation_data=(valid_x, valid_y),
                              epochs=ModelTest.epochs,
                              batch_size=train_batch_size,
                              steps_per_epoch=None,
                              validation_steps=None,
                              callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': train_real_steps_per_epoch,
            'valid_steps': valid_real_steps_per_epoch
        }
        self._test_callbacks_train(params, logs)

    def test_fitting_with_generator_with_len(self):
        train_real_steps_per_epoch = 30
        train_generator = SomeDataGeneratorWithLen(
            batch_size=ModelTest.batch_size,
            length=train_real_steps_per_epoch,
            num_missing_samples=7)
        valid_real_steps_per_epoch = 10
        valid_generator = SomeDataGeneratorWithLen(
            batch_size=15,
            length=valid_real_steps_per_epoch,
            num_missing_samples=3)
        logs = self.model.fit_generator(train_generator,
                                        valid_generator,
                                        epochs=ModelTest.epochs,
                                        steps_per_epoch=None,
                                        validation_steps=None,
                                        callbacks=[self.mock_callback])
        params = {
            'epochs': ModelTest.epochs,
            'steps': train_real_steps_per_epoch,
            'valid_steps': valid_real_steps_per_epoch
        }
        self._test_callbacks_train(params, logs)

    def test_fitting_with_generator_with_stop_iteration(self):
        train_real_steps_per_epoch = 30
        train_generator = SomeDataGeneratorUsingStopIteration(
            batch_size=ModelTest.batch_size, length=train_real_steps_per_epoch)
        valid_generator = SomeDataGeneratorUsingStopIteration(batch_size=15,
                                                              length=10)
        logs = self.model.fit_generator(train_generator,
                                        valid_generator,
                                        epochs=ModelTest.epochs,
                                        steps_per_epoch=None,
                                        validation_steps=None,
                                        callbacks=[self.mock_callback])
        params = {'epochs': ModelTest.epochs, 'steps': None}
        self._test_callbacks_train(params,
                                   logs,
                                   steps=train_real_steps_per_epoch)

    def test_tensor_train_on_batch(self):
        x = torch.rand(ModelTest.batch_size, 1)
        y = torch.rand(ModelTest.batch_size, 1)
        loss, metrics = self.model.train_on_batch(x, y)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)

    def test_train_on_batch_with_pred(self):
        x = torch.rand(ModelTest.batch_size, 1)
        y = torch.rand(ModelTest.batch_size, 1)
        loss, metrics, pred_y = self.model.train_on_batch(x,
                                                          y,
                                                          return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)
        self.assertEqual(pred_y.shape, (ModelTest.batch_size, 1))

    def test_ndarray_train_on_batch(self):
        x = np.random.rand(ModelTest.batch_size, 1).astype(np.float32)
        y = np.random.rand(ModelTest.batch_size, 1).astype(np.float32)
        loss, metrics = self.model.train_on_batch(x, y)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)

    def test_evaluate(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        loss, metrics = self.model.evaluate(x,
                                            y,
                                            batch_size=ModelTest.batch_size)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)

    def test_evaluate_with_pred(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        # We also test the unpacking.
        _, _, pred_y = self.model.evaluate(x,
                                           y,
                                           batch_size=ModelTest.batch_size,
                                           return_pred=True)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_evaluate_with_callback(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        # We also test the unpacking.
        _, _, pred_y = self.model.evaluate(x,
                                           y,
                                           batch_size=ModelTest.batch_size,
                                           return_pred=True,
                                           callbacks=[self.mock_callback])
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_evaluate_with_return_dict(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        logs = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelTest.batch_size,
                                   return_dict_format=True)

        self._test_return_dict_logs(logs)

    def test_evaluate_with_np_array(self):
        x = np.random.rand(ModelTest.evaluate_dataset_len,
                           1).astype(np.float32)
        y = np.random.rand(ModelTest.evaluate_dataset_len,
                           1).astype(np.float32)
        loss, metrics, pred_y = self.model.evaluate(
            x, y, batch_size=ModelTest.batch_size, return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_evaluate_data_loader(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        dataset = TensorDataset(x, y)
        generator = DataLoader(dataset, ModelTest.batch_size)
        loss, metrics, pred_y = self.model.evaluate_generator(generator,
                                                              return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_evaluate_generator(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        loss, metrics, pred_y = self.model.evaluate_generator(generator,
                                                              steps=num_steps,
                                                              return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(pred_y.shape, (num_steps * ModelTest.batch_size, 1))

    def test_evaluate_generator_with_stop_iteration(self):
        test_generator = SomeDataGeneratorUsingStopIteration(
            ModelTest.batch_size, 10)

        loss, _ = self.model.evaluate_generator(test_generator)

        self.assertEqual(type(loss), float)

    def test_evaluate_generator_with_callback(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        self.model.evaluate_generator(generator,
                                      steps=num_steps,
                                      callbacks=[self.mock_callback])

        params = {'steps': ModelTest.epochs}
        self._test_callbacks_test(params)

    def test_evaluate_generator_with_return_dict(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        logs = self.model.evaluate_generator(generator,
                                             steps=num_steps,
                                             return_dict_format=True)

        self._test_return_dict_logs(logs)

    def test_evaluate_generator_with_ground_truth(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        loss, metrics, pred_y, true_y = self.model.evaluate_generator(
            generator,
            steps=num_steps,
            return_pred=True,
            return_ground_truth=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(type(true_y), np.ndarray)
        self.assertEqual(pred_y.shape, (num_steps * ModelTest.batch_size, 1))
        self.assertEqual(true_y.shape, (num_steps * ModelTest.batch_size, 1))

    def test_evaluate_generator_with_no_concatenation(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        loss, metrics, pred_y, true_y = self.model.evaluate_generator(
            generator,
            steps=num_steps,
            return_pred=True,
            return_ground_truth=True,
            concatenate_returns=False)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(),
                         self.batch_metrics_values + self.epoch_metrics_values)

        self.assertEqual(type(pred_y), list)
        for pred in pred_y:
            self.assertEqual(type(pred), np.ndarray)
            self.assertEqual(pred.shape, (ModelTest.batch_size, 1))
        self.assertEqual(type(true_y), list)
        for true in true_y:
            self.assertEqual(type(true), np.ndarray)
            self.assertEqual(true.shape, (ModelTest.batch_size, 1))

    def test_evaluate_with_only_one_metric(self):
        model = Model(self.pytorch_network,
                      self.optimizer,
                      self.loss_function,
                      batch_metrics=self.batch_metrics[:1])
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        loss, first_metric = model.evaluate(x,
                                            y,
                                            batch_size=ModelTest.batch_size)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(first_metric), float)
        self.assertEqual(first_metric, some_metric_1_value)

    def test_metrics_integration(self):
        num_steps = 10
        model = Model(self.pytorch_network,
                      self.optimizer,
                      self.loss_function,
                      batch_metrics=[F.mse_loss])
        train_generator = some_data_tensor_generator(ModelTest.batch_size)
        valid_generator = some_data_tensor_generator(ModelTest.batch_size)
        model.fit_generator(train_generator,
                            valid_generator,
                            epochs=ModelTest.epochs,
                            steps_per_epoch=ModelTest.steps_per_epoch,
                            validation_steps=ModelTest.steps_per_epoch,
                            callbacks=[self.mock_callback])
        generator = some_data_tensor_generator(ModelTest.batch_size)
        loss, mse = model.evaluate_generator(generator, steps=num_steps)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(mse), float)

    def test_epoch_metrics_integration(self):
        model = Model(self.pytorch_network,
                      self.optimizer,
                      self.loss_function,
                      epoch_metrics=[SomeEpochMetric()])
        train_generator = some_data_tensor_generator(ModelTest.batch_size)
        valid_generator = some_data_tensor_generator(ModelTest.batch_size)
        logs = model.fit_generator(train_generator,
                                   valid_generator,
                                   epochs=1,
                                   steps_per_epoch=ModelTest.steps_per_epoch,
                                   validation_steps=ModelTest.steps_per_epoch)
        actual_value = logs[-1]['some_epoch_metric']
        val_actual_value = logs[-1]['val_some_epoch_metric']
        expected_value = 5
        self.assertEqual(val_actual_value, expected_value)
        self.assertEqual(actual_value, expected_value)

    def test_evaluate_with_no_metric(self):
        model = Model(self.pytorch_network, self.optimizer, self.loss_function)
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelTest.evaluate_dataset_len, 1)
        loss = model.evaluate(x, y, batch_size=ModelTest.batch_size)
        self.assertEqual(type(loss), float)

    def test_tensor_evaluate_on_batch(self):
        x = torch.rand(ModelTest.batch_size, 1)
        y = torch.rand(ModelTest.batch_size, 1)
        loss, metrics = self.model.evaluate_on_batch(x, y)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)

    def test_evaluate_on_batch_with_pred(self):
        x = torch.rand(ModelTest.batch_size, 1)
        y = torch.rand(ModelTest.batch_size, 1)
        loss, metrics, pred_y = self.model.evaluate_on_batch(x,
                                                             y,
                                                             return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)
        self.assertEqual(pred_y.shape, (ModelTest.batch_size, 1))

    def test_ndarray_evaluate_on_batch(self):
        x = np.random.rand(ModelTest.batch_size, 1).astype(np.float32)
        y = np.random.rand(ModelTest.batch_size, 1).astype(np.float32)
        loss, metrics = self.model.evaluate_on_batch(x, y)
        self.assertEqual(type(loss), float)
        self.assertEqual(type(metrics), np.ndarray)
        self.assertEqual(metrics.tolist(), self.batch_metrics_values)

    def test_predict(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        pred_y = self.model.predict(x, batch_size=ModelTest.batch_size)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_predict_with_np_array(self):
        x = np.random.rand(ModelTest.evaluate_dataset_len,
                           1).astype(np.float32)
        pred_y = self.model.predict(x, batch_size=ModelTest.batch_size)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_predict_data_loader(self):
        x = torch.rand(ModelTest.evaluate_dataset_len, 1)
        generator = DataLoader(x, ModelTest.batch_size)
        pred_y = self.model.predict_generator(generator)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(pred_y.shape, (ModelTest.evaluate_dataset_len, 1))

    def test_predict_generator(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        generator = (x for x, _ in generator)
        pred_y = self.model.predict_generator(generator, steps=num_steps)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(pred_y.shape, (num_steps * ModelTest.batch_size, 1))

    def test_predict_generator_with_no_concatenation(self):
        num_steps = 10
        generator = some_data_tensor_generator(ModelTest.batch_size)
        generator = (x for x, _ in generator)
        pred_y = self.model.predict_generator(generator,
                                              steps=num_steps,
                                              concatenate_returns=False)
        self.assertEqual(type(pred_y), list)
        for pred in pred_y:
            self.assertEqual(type(pred), np.ndarray)
            self.assertEqual(pred.shape, (ModelTest.batch_size, 1))

    def test_tensor_predict_on_batch(self):
        x = torch.rand(ModelTest.batch_size, 1)
        pred_y = self.model.predict_on_batch(x)
        self.assertEqual(pred_y.shape, (ModelTest.batch_size, 1))

    def test_ndarray_predict_on_batch(self):
        x = np.random.rand(ModelTest.batch_size, 1).astype(np.float32)
        pred_y = self.model.predict_on_batch(x)
        self.assertEqual(pred_y.shape, (ModelTest.batch_size, 1))

    @skipIf(not torch.cuda.is_available(), "no gpu available")
    def test_cpu_cuda(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)
        valid_generator = some_data_tensor_generator(ModelTest.batch_size)

        self._capture_output()

        with torch.cuda.device(ModelTest.cuda_device):
            self.model.cuda()
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTest.epochs,
                steps_per_epoch=ModelTest.steps_per_epoch,
                validation_steps=ModelTest.steps_per_epoch,
                callbacks=[self.mock_callback])

        # The context manager is also used here because of this bug:
        # https://github.com/pytorch/pytorch/issues/7320
        with torch.cuda.device(ModelTest.cuda_device):
            self.model.cuda(ModelTest.cuda_device)
            self._test_device(
                torch.device('cuda:' + str(ModelTest.cuda_device)))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTest.epochs,
                steps_per_epoch=ModelTest.steps_per_epoch,
                validation_steps=ModelTest.steps_per_epoch,
                callbacks=[self.mock_callback])

            self.model.cpu()
            self._test_device(torch.device('cpu'))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTest.epochs,
                steps_per_epoch=ModelTest.steps_per_epoch,
                validation_steps=ModelTest.steps_per_epoch,
                callbacks=[self.mock_callback])

            self.model.to(torch.device('cuda:' + str(ModelTest.cuda_device)))
            self._test_device(
                torch.device('cuda:' + str(ModelTest.cuda_device)))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTest.epochs,
                steps_per_epoch=ModelTest.steps_per_epoch,
                validation_steps=ModelTest.steps_per_epoch,
                callbacks=[self.mock_callback])

            self.model.to(torch.device('cpu'))
            self._test_device(torch.device('cpu'))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTest.epochs,
                steps_per_epoch=ModelTest.steps_per_epoch,
                validation_steps=ModelTest.steps_per_epoch,
                callbacks=[self.mock_callback])

    def test_get_batch_size(self):
        batch_size = ModelTest.batch_size
        x = np.random.rand(batch_size, 1).astype(np.float32)
        y = np.random.rand(batch_size, 1).astype(np.float32)

        batch_size2 = ModelTest.batch_size + 1
        x2 = np.random.rand(batch_size2, 1).astype(np.float32)
        y2 = np.random.rand(batch_size2, 1).astype(np.float32)

        other_batch_size = batch_size2 + 1

        inf_batch_size = self.model.get_batch_size(x, y)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size(x2, y2)
        self.assertEqual(inf_batch_size, batch_size2)

        inf_batch_size = self.model.get_batch_size(x, y2)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size(x2, y)
        self.assertEqual(inf_batch_size, batch_size2)

        inf_batch_size = self.model.get_batch_size((x, x2), y)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size((x2, x), y)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size((x, x2), (y, y2))
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size((x2, x), (y, y2))
        self.assertEqual(inf_batch_size, batch_size2)

        inf_batch_size = self.model.get_batch_size([x, x2], y)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size([x2, x], y)
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size([x, x2], [y, y2])
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size([x2, x], [y, y2])
        self.assertEqual(inf_batch_size, batch_size2)

        inf_batch_size = self.model.get_batch_size(
            {
                'batch_size': other_batch_size,
                'x': x
            }, {'y': y})
        self.assertEqual(inf_batch_size, other_batch_size)

        inf_batch_size = self.model.get_batch_size({'x': x}, {
            'batch_size': other_batch_size,
            'y': y
        })
        self.assertEqual(inf_batch_size, other_batch_size)

        inf_batch_size = self.model.get_batch_size({'x': x}, {'y': y})
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size(
            OrderedDict([('x1', x), ('x2', x2)]), {'y': y})
        self.assertEqual(inf_batch_size, batch_size)

        inf_batch_size = self.model.get_batch_size(
            OrderedDict([('x1', x2), ('x2', x)]), {'y': y})
        self.assertEqual(inf_batch_size, batch_size2)

        inf_batch_size = self.model.get_batch_size([1, 2, 3], {'y': y})
        self.assertEqual(inf_batch_size, batch_size)

    def test_get_batch_size_warning(self):
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            inf_batch_size = self.model.get_batch_size([1, 2, 3], [4, 5, 6])
            self.assertEqual(inf_batch_size, 1)
            self.assertEqual(len(w), 1)

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            warning_settings['batch_size'] = 'ignore'
            inf_batch_size = self.model.get_batch_size([1, 2, 3], [4, 5, 6])
            self.assertEqual(inf_batch_size, 1)
            self.assertEqual(len(w), 0)
class BaseCSVGradientLoggerTest:
    # pylint: disable=not-callable,no-member
    CSVGradientLogger = None
    batch_size = 20
    num_epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 1))
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        self.csv_filename = os.path.join(self.temp_dir_obj.name,
                                         'layer_{}.csv')

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_logging(self):
        train_gen = some_data_generator(self.batch_size)
        valid_gen = some_data_generator(self.batch_size)
        memgrad = MemoryGradientLogger()
        logger = self.CSVGradientLogger(self.csv_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[memgrad, logger])
        self._test_logging(memgrad.history)

    def test_logging_append(self):
        train_gen = some_data_generator(self.batch_size)
        valid_gen = some_data_generator(self.batch_size)
        logger = self.CSVGradientLogger(self.csv_filename)
        memgrad = MemoryGradientLogger()
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[memgrad, logger])
        memgrad2 = MemoryGradientLogger()
        logger = self.CSVGradientLogger(self.csv_filename, append=True)
        self.model.fit_generator(
            train_gen,
            valid_gen,
            epochs=20,
            steps_per_epoch=5,
            initial_epoch=self.num_epochs,
            callbacks=[memgrad2, logger],
        )
        history = {
            layer: stats1 + memgrad2.history[layer]
            for layer, stats1 in memgrad.history.items()
        }
        self._test_logging(history)

    def test_logging_overwrite(self):
        train_gen = some_data_generator(self.batch_size)
        valid_gen = some_data_generator(self.batch_size)
        logger = self.CSVGradientLogger(self.csv_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[logger])
        memgrad = MemoryGradientLogger()
        logger = self.CSVGradientLogger(self.csv_filename, append=False)
        self.model.fit_generator(
            train_gen,
            valid_gen,
            epochs=20,
            steps_per_epoch=5,
            initial_epoch=self.num_epochs,
            callbacks=[memgrad, logger],
        )
        self._test_logging(memgrad.history)

    def _test_logging(self, history):
        for layer, stats in history.items():
            filename = self.csv_filename.format(layer)
            with open(filename, 'r', encoding='utf-8') as csvfile:
                reader = csv.DictReader(csvfile)
                rows = list(reader)
            self.assertEqual(len(rows), len(stats))
            for row, stats_entry in zip(rows, stats):
                self.assertEqual(row.keys(), stats_entry.keys())
                for k in row.keys():
                    self.assertAlmostEqual(float(row[k]), stats_entry[k])
Example #13
0
from poutyne import Model

model = Model(network,
              'sgd',
              'cross_entropy',
              batch_metrics=['accuracy'],
              epoch_metrics=['f1'])
model.to(device)

model.fit_generator(train_loader,
                    valid_loader,
                    epochs=num_epochs,
                    callbacks=callbacks)

test_loss, (test_acc, test_f1) = model.evaluate_generator(test_loader)
print(f'Test: Loss: {test_loss}, Accuracy: {test_acc}, F1: {test_f1}')
Example #14
0
class ModelTestMultiGPU(ModelFittingTestCase):
    # pylint: disable=too-many-public-methods

    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.batch_metrics = [
            some_batch_metric_1, ('custom_name', some_batch_metric_2),
            repeat_batch_metric, repeat_batch_metric
        ]
        self.batch_metrics_names = [
            'some_batch_metric_1', 'custom_name', 'repeat_batch_metric1',
            'repeat_batch_metric2'
        ]
        self.batch_metrics_values = [
            some_metric_1_value, some_metric_2_value,
            repeat_batch_metric_value, repeat_batch_metric_value
        ]
        self.epoch_metrics = [SomeConstantEpochMetric()]
        self.epoch_metrics_names = ['some_constant_epoch_metric']
        self.epoch_metrics_values = [some_constant_epoch_metric_value]

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)

        self.default_main_device = ModelTestMultiGPU.cuda_device

    def _test_multiple_gpu_mode(self, devices):
        if devices == "all":
            expected = torch.cuda.device_count()
        else:
            expected = len(devices)
        self.assertEqual(len([self.model.device] + self.model.other_device),
                         expected)

    def _test_single_gpu_mode(self):
        self.assertIsNone(self.model.other_device)
        self.assertEqual(len([self.model.device]), 1)

    def test_back_and_forth_gpu_cpu_multi_gpus(self):
        devices = "all"
        train_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)
        valid_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)

        with torch.cuda.device(self.default_main_device):
            self.model.to(devices)
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(
                torch.device('cuda:' + str(self.default_main_device)))
            self._test_multiple_gpu_mode(devices=devices)

            self.model.cpu()
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(torch.device('cpu'))
            self._test_single_gpu_mode()

            self.model.to(devices)
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(
                torch.device('cuda:' + str(ModelTestMultiGPU.cuda_device)))
            self._test_multiple_gpu_mode(devices=devices)

            self.model.to(torch.device('cpu'))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(torch.device('cpu'))
            self._test_single_gpu_mode()

    def test_back_and_forth_cuda_cpu_to_multi_gpus(self):
        devices = "all"
        train_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)
        valid_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)

        self.model.to(devices)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTestMultiGPU.epochs,
            steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
            validation_steps=ModelTestMultiGPU.steps_per_epoch,
            callbacks=[self.mock_callback])

        with torch.cuda.device(self.default_main_device):
            self.model.cuda()
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(
                torch.device('cuda:' + str(self.default_main_device)))
            self._test_single_gpu_mode()

            self.model.cpu()
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(torch.device('cpu'))
            self._test_single_gpu_mode()

            self.model.to(devices)
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(
                torch.device('cuda:' + str(ModelTestMultiGPU.cuda_device)))
            self._test_multiple_gpu_mode(devices=devices)

            self.model.cuda()
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(
                torch.device('cuda:' + str(self.default_main_device)))
            self._test_single_gpu_mode()

            self.model.to(torch.device('cpu'))
            self.model.fit_generator(
                train_generator,
                valid_generator,
                epochs=ModelTestMultiGPU.epochs,
                steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
                validation_steps=ModelTestMultiGPU.steps_per_epoch,
                callbacks=[self.mock_callback])
            self._test_device(torch.device('cpu'))
            self._test_single_gpu_mode()

    def test_devices_settings(self):
        train_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)
        valid_generator = some_data_tensor_generator(
            ModelTestMultiGPU.batch_size)

        devices = "all"
        self.model.to(devices)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTestMultiGPU.epochs,
            steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
            validation_steps=ModelTestMultiGPU.steps_per_epoch,
            callbacks=[self.mock_callback])
        self._test_multiple_gpu_mode(devices=devices)

        devices = ["cuda:0", "cuda:1"]
        self.model.to(devices)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTestMultiGPU.epochs,
            steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
            validation_steps=ModelTestMultiGPU.steps_per_epoch,
            callbacks=[self.mock_callback])
        self._test_multiple_gpu_mode(devices=devices)

        devices = [torch.device("cuda:0"), torch.device("cuda:1")]
        self.model.to(devices)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTestMultiGPU.epochs,
            steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
            validation_steps=ModelTestMultiGPU.steps_per_epoch,
            callbacks=[self.mock_callback])
        self._test_multiple_gpu_mode(devices=devices)

        devices = ["cuda:0"]
        self.model.to(devices)
        self.assertIsNone(self.model.other_device)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelTestMultiGPU.epochs,
            steps_per_epoch=ModelTestMultiGPU.steps_per_epoch,
            validation_steps=ModelTestMultiGPU.steps_per_epoch,
            callbacks=[self.mock_callback])
        self._test_device(torch.device('cuda:0'))
        self._test_single_gpu_mode()
Example #15
0
class ModelFittingTestCaseProgress(ModelFittingTestCase):
    # pylint: disable=too-many-public-methods
    num_steps = 5

    def setUp(self):
        super().setUp()
        self.train_generator = some_data_tensor_generator(ModelFittingTestCase.batch_size)
        self.valid_generator = some_data_tensor_generator(ModelFittingTestCase.batch_size)
        self.test_generator = some_data_tensor_generator(ModelFittingTestCase.batch_size)
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)
        self.batch_metrics = [
            some_batch_metric_1, ('custom_name', some_batch_metric_2), repeat_batch_metric, repeat_batch_metric
        ]
        self.batch_metrics_names = [
            'some_batch_metric_1', 'custom_name', 'repeat_batch_metric1', 'repeat_batch_metric2'
        ]
        self.batch_metrics_values = [
            some_metric_1_value, some_metric_2_value, repeat_batch_metric_value, repeat_batch_metric_value
        ]
        self.epoch_metrics = [SomeConstantEpochMetric()]
        self.epoch_metrics_names = ['some_constant_epoch_metric']
        self.epoch_metrics_values = [some_constant_epoch_metric_value]

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)

        self._capture_output()

    def assertStdoutContains(self, values):
        for value in values:
            self.assertIn(value, self.test_out.getvalue().strip())

    def assertStdoutNotContains(self, values):
        for value in values:
            self.assertNotIn(value, self.test_out.getvalue().strip())

    @skipIf(color is None, "Unable to import colorama")
    def test_fitting_with_default_coloring(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback])

        self.assertStdoutContains(["[32m", "[35m", "[36m", "[94m"])

    def test_fitting_with_progress_bar_show_epoch(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback])

        self.assertStdoutContains(["Epoch", "1/5", "2/5"])

    def test_fitting_with_progress_bar_show_steps(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback])

        self.assertStdoutContains(["steps", f"{ModelFittingTestCase.steps_per_epoch}"])

    def test_fitting_with_progress_bar_show_train_val_final_steps(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback])

        self.assertStdoutContains(["Val steps", "Train steps"])

    def test_fitting_with_no_progress_bar_dont_show_epoch(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     verbose=False)

        self.assertStdoutNotContains(["Epoch", "1/5", "2/5"])

    @skipIf(color is None, "Unable to import colorama")
    def test_fitting_with_user_coloring(self):
        coloring = {
            "text_color": 'BLACK',
            "ratio_color": "BLACK",
            "metric_value_color": "BLACK",
            "time_color": "BLACK",
            "progress_bar_color": "BLACK"
        }
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=coloring))

        self.assertStdoutContains(["[30m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_fitting_with_user_partial_coloring(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring={
                                         "text_color": 'BLACK',
                                         "ratio_color": "BLACK"
                                     }))

        self.assertStdoutContains(["[30m", "[32m", "[35m", "[94m"])

    def test_fitting_with_user_coloring_invalid(self):
        with self.assertRaises(KeyError):
            _ = self.model.fit_generator(self.train_generator,
                                         self.valid_generator,
                                         epochs=ModelFittingTestCase.epochs,
                                         steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                         validation_steps=ModelFittingTestCase.steps_per_epoch,
                                         callbacks=[self.mock_callback],
                                         progress_options=dict(coloring={"invalid_name": 'A COLOR'}))

    def test_fitting_with_no_coloring(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=False))

        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_fitting_with_progress_bar_default_color(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=True, progress_bar=True))

        self.assertStdoutContains(["%", "[32m", "[35m", "[36m", "[94m", "\u2588"])

    @skipIf(color is None, "Unable to import colorama")
    def test_fitting_with_progress_bar_user_color(self):
        coloring = {
            "text_color": 'BLACK',
            "ratio_color": "BLACK",
            "metric_value_color": "BLACK",
            "time_color": "BLACK",
            "progress_bar_color": "BLACK"
        }
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=coloring, progress_bar=True))

        self.assertStdoutContains(["%", "[30m", "\u2588"])

    def test_fitting_with_progress_bar_no_color(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=False, progress_bar=True))

        self.assertStdoutContains(["%", "\u2588"])
        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    def test_fitting_with_no_progress_bar(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     steps_per_epoch=ModelFittingTestCase.steps_per_epoch,
                                     validation_steps=ModelFittingTestCase.steps_per_epoch,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=False, progress_bar=False))

        self.assertStdoutNotContains(["%", "\u2588"])
        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    def test_progress_bar_with_step_is_none(self):
        train_generator = SomeDataGeneratorUsingStopIteration(ModelFittingTestCase.batch_size, 10)
        valid_generator = SomeDataGeneratorUsingStopIteration(ModelFittingTestCase.batch_size, 10)
        _ = self.model.fit_generator(train_generator,
                                     valid_generator,
                                     epochs=ModelFittingTestCase.epochs,
                                     progress_options=dict(coloring=False, progress_bar=True))

        self.assertStdoutContains(["s/step"])
        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m", "\u2588", "%"])

    def test_evaluate_without_progress_output(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x, y, batch_size=ModelFittingTestCase.batch_size, verbose=False)

        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_default_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x, y, batch_size=ModelFittingTestCase.batch_size)

        self.assertStdoutContains(["[32m", "[35m", "[36m", "[94m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_user_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        coloring = {
            "text_color": 'BLACK',
            "ratio_color": "BLACK",
            "metric_value_color": "BLACK",
            "time_color": "BLACK",
            "progress_bar_color": "BLACK"
        }

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=coloring))

        self.assertStdoutContains(["[30m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_user_partial_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring={
                                       "text_color": 'BLACK',
                                       "ratio_color": "BLACK"
                                   }))
        self.assertStdoutContains(["[30m", "[32m", "[35m", "[94m"])

    def test_evaluate_with_user_coloring_invalid(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        with self.assertRaises(KeyError):
            _, _ = self.model.evaluate(x,
                                       y,
                                       batch_size=ModelFittingTestCase.batch_size,
                                       progress_options=dict(coloring={"invalid_name": 'A COLOR'}))

    def test_evaluate_with_no_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=False))

        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_progress_bar_default_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=True, progress_bar=True))

        self.assertStdoutContains(["%", "[32m", "[35m", "[36m", "[94m", "\u2588"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_progress_bar_user_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        coloring = {
            "text_color": 'BLACK',
            "ratio_color": "BLACK",
            "metric_value_color": "BLACK",
            "time_color": "BLACK",
            "progress_bar_color": "BLACK"
        }

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=coloring, progress_bar=True))

        self.assertStdoutContains(["%", "[30m", "\u2588"])

    @skipIf(color is None, "Unable to import colorama")
    def test_evaluate_with_progress_bar_user_no_color(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=False, progress_bar=True))

        self.assertStdoutContains(["%", "\u2588"])
        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    def test_evaluate_with_no_progress_bar(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)

        _, _ = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelFittingTestCase.batch_size,
                                   progress_options=dict(coloring=False, progress_bar=False))

        self.assertStdoutNotContains(["%", "\u2588"])
        self.assertStdoutNotContains(["[32m", "[35m", "[36m", "[94m"])

    def test_evaluate_data_loader_with_progress_bar_coloring(self):
        x = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        y = torch.rand(ModelFittingTestCase.evaluate_dataset_len, 1)
        dataset = TensorDataset(x, y)
        generator = DataLoader(dataset, ModelFittingTestCase.batch_size)

        _, _ = self.model.evaluate_generator(generator, verbose=True)

        self.assertStdoutContains(["%", "[32m", "[35m", "[36m", "[94m", "\u2588"])

    def test_evaluate_generator_with_progress_bar_coloring(self):
        generator = some_data_tensor_generator(ModelFittingTestCase.batch_size)

        _, _ = self.model.evaluate_generator(generator, steps=ModelFittingTestCaseProgress.num_steps, verbose=True)

        self.assertStdoutContains(["%", "[32m", "[35m", "[36m", "[94m", "\u2588"])

    def test_evaluate_generator_with_callback_and_progress_bar_coloring(self):
        generator = some_data_tensor_generator(ModelFittingTestCase.batch_size)

        _, _ = self.model.evaluate_generator(generator,
                                             steps=ModelFittingTestCaseProgress.num_steps,
                                             callbacks=[self.mock_callback],
                                             verbose=True)

        self.assertStdoutContains(["%", "[32m", "[35m", "[36m", "[94m", "\u2588"])

    def test_fitting_complete_display_test_with_progress_bar_coloring(self):
        # we use the same color for all components for simplicity
        coloring = {
            "text_color": 'WHITE',
            "ratio_color": "WHITE",
            "metric_value_color": "WHITE",
            "time_color": "WHITE",
            "progress_bar_color": "WHITE"
        }
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=1,
                                     steps_per_epoch=ModelFittingTestCaseProgress.num_steps,
                                     validation_steps=ModelFittingTestCaseProgress.num_steps,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=coloring, progress_bar=False))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*Epoch:.*{}\/1.*\[37mStep:.*{}\/5.*{:6.2f}\%.*|{}|.*ETA:"
        epoch = 1
        # the 5 train steps
        for step, step_update in enumerate(steps_update[:ModelFittingTestCaseProgress.num_steps]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # The 5 val steps
        for step, step_update in enumerate(steps_update[ModelFittingTestCaseProgress.num_steps:-1]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*\[37mTrain steps:.*5.*Val steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)

    def test_fitting_complete_display_test_with_progress_bar_no_coloring(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=1,
                                     steps_per_epoch=ModelFittingTestCaseProgress.num_steps,
                                     validation_steps=ModelFittingTestCaseProgress.num_steps,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=False, progress_bar=True))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*Epoch:.*{}\/1.*Step:.*{}\/5.*{:6.2f}\%.*|{}|.*ETA:"
        epoch = 1
        # the 5 train steps
        for step, step_update in enumerate(steps_update[:ModelFittingTestCaseProgress.num_steps]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # The 5 val steps
        for step, step_update in enumerate(steps_update[ModelFittingTestCaseProgress.num_steps:-1]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*Train steps:.*5.*Val steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)

    def test_fitting_complete_display_test_with_no_progress_bar_no_coloring(self):
        _ = self.model.fit_generator(self.train_generator,
                                     self.valid_generator,
                                     epochs=1,
                                     steps_per_epoch=ModelFittingTestCaseProgress.num_steps,
                                     validation_steps=ModelFittingTestCaseProgress.num_steps,
                                     callbacks=[self.mock_callback],
                                     progress_options=dict(coloring=False, progress_bar=False))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*Epoch:.*{}\/1.*Step:.*{}\/5.*ETA:"
        epoch = 1
        # the 5 train steps
        for step, step_update in enumerate(steps_update[:ModelFittingTestCaseProgress.num_steps]):
            step += 1
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100)
            self.assertRegex(step_update, regex_filled)

        # The 5 val steps
        for step, step_update in enumerate(steps_update[ModelFittingTestCaseProgress.num_steps:-1]):
            step += 1
            regex_filled = template_format.format(epoch, step, step / ModelFittingTestCaseProgress.num_steps * 100)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*Train steps:.*5.*Val steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)

    def test_evaluate_complete_display_test_with_progress_bar_coloring(self):
        # we use the same color for all components for simplicity
        coloring = {
            "text_color": 'WHITE',
            "ratio_color": "WHITE",
            "metric_value_color": "WHITE",
            "time_color": "WHITE",
            "progress_bar_color": "WHITE"
        }

        _, _ = self.model.evaluate_generator(self.test_generator,
                                             steps=ModelFittingTestCaseProgress.num_steps,
                                             callbacks=[self.mock_callback],
                                             verbose=True,
                                             progress_options=dict(coloring=coloring, progress_bar=True))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*\[37mStep:.*{}\/5.*{:6.2f}\%.*|{}|.*ETA:"
        for step, step_update in enumerate(steps_update[:-1]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*\[37mTest steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)

    def test_evaluate_complete_display_test_with_progress_bar_no_coloring(self):
        _, _ = self.model.evaluate_generator(self.test_generator,
                                             steps=ModelFittingTestCaseProgress.num_steps,
                                             callbacks=[self.mock_callback],
                                             verbose=True,
                                             progress_options=dict(coloring=False, progress_bar=True))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*Step:.*{}\/5.*{:6.2f}\%.*|{}|.*ETA:"
        for step, step_update in enumerate(steps_update[:-1]):
            step += 1
            progress_Bar = "\u2588" * step * 2 + " " * (20 - step * 2)
            regex_filled = template_format.format(step, step / ModelFittingTestCaseProgress.num_steps * 100,
                                                  progress_Bar)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*Test steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)

    def test_evaluate_complete_display_test_with_no_progress_bar_no_coloring(self):
        _, _ = self.model.evaluate_generator(self.test_generator,
                                             steps=ModelFittingTestCaseProgress.num_steps,
                                             callbacks=[self.mock_callback],
                                             verbose=True,
                                             progress_options=dict(coloring=False, progress_bar=False))

        # We split per step update
        steps_update = self.test_out.getvalue().strip().split("\r")

        # we don't validate the templating of metrics since tested before
        template_format = r".*Step:.*{}\/5.*ETA:"
        for step, step_update in enumerate(steps_update[:-1]):
            step += 1
            regex_filled = template_format.format(step, step / ModelFittingTestCaseProgress.num_steps * 100)
            self.assertRegex(step_update, regex_filled)

        # last print update templating different
        last_print_regex = r".*Test steps:.*5.*[0-9]*\.[0-9][0-9]s"
        self.assertRegex(steps_update[-1], last_print_regex)
Example #16
0
class ModelCheckpointTest(TestCase):
    batch_size = 20

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        self.checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                                'my_checkpoint_{epoch}.ckpt')

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_integration(self):
        train_gen = some_data_generator(ModelCheckpointTest.batch_size)
        valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       save_best_only=True)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[checkpointer])

    def test_temporary_filename_arg(self):
        tmp_filename = os.path.join(self.temp_dir_obj.name,
                                    'my_checkpoint.tmp.ckpt')
        checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                           'my_checkpoint.ckpt')
        train_gen = some_data_generator(ModelCheckpointTest.batch_size)
        valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
        checkpointer = ModelCheckpoint(checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       period=1,
                                       temporary_filename=tmp_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[checkpointer])
        self.assertFalse(os.path.isfile(tmp_filename))
        self.assertTrue(os.path.isfile(checkpoint_filename))

    def test_temporary_filename_arg_with_differing_checkpoint_filename(self):
        epochs = 10
        tmp_filename = os.path.join(self.temp_dir_obj.name,
                                    'my_checkpoint.tmp.ckpt')
        checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                           'my_checkpoint_{epoch}.ckpt')
        train_gen = some_data_generator(ModelCheckpointTest.batch_size)
        valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
        checkpointer = ModelCheckpoint(checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       period=1,
                                       temporary_filename=tmp_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=epochs,
                                 steps_per_epoch=5,
                                 callbacks=[checkpointer])
        self.assertFalse(os.path.isfile(tmp_filename))
        for i in range(1, epochs + 1):
            self.assertTrue(os.path.isfile(
                checkpoint_filename.format(epoch=i)))

    def test_non_atomic_write(self):
        checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                           'my_checkpoint.ckpt')
        train_gen = some_data_generator(ModelCheckpointTest.batch_size)
        valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
        checkpointer = ModelCheckpoint(checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       period=1,
                                       atomic_write=False)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[checkpointer])
        self.assertTrue(os.path.isfile(checkpoint_filename))

    def test_save_best_only(self):
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       save_best_only=True)

        val_losses = [10, 3, 8, 5, 2]
        has_checkpoints = [True, True, False, False, True]
        self._test_checkpointer_with_val_losses(checkpointer, val_losses,
                                                has_checkpoints)

    def test_save_best_only_with_restore_best(self):
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       save_best_only=True,
                                       restore_best=True)

        val_losses = [10, 3, 8, 5, 2]
        has_checkpoints = [True, True, False, False, True]
        self._test_checkpointer_with_val_losses(checkpointer, val_losses,
                                                has_checkpoints)

        self._test_restore_best(val_losses)

    def test_restore_best_without_save_best_only(self):
        with self.assertRaises(ValueError):
            ModelCheckpoint(self.checkpoint_filename,
                            monitor='val_loss',
                            verbose=True,
                            save_best_only=False,
                            restore_best=True)

        with self.assertRaises(ValueError):
            ModelCheckpoint(self.checkpoint_filename,
                            monitor='val_loss',
                            verbose=True,
                            restore_best=True)

    def test_save_best_only_with_max(self):
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       mode='max',
                                       verbose=True,
                                       save_best_only=True)

        val_losses = [2, 3, 8, 5, 2]
        has_checkpoints = [True, True, True, False, False]
        self._test_checkpointer_with_val_losses(checkpointer, val_losses,
                                                has_checkpoints)

    def test_periodic_with_period_of_1(self):
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       period=1,
                                       save_best_only=False)

        val_losses = [1] * 10
        has_checkpoints = [True] * 10
        self._test_checkpointer_with_val_losses(checkpointer, val_losses,
                                                has_checkpoints)

    def test_periodic_with_period_of_2(self):
        checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                       monitor='val_loss',
                                       verbose=True,
                                       period=2,
                                       save_best_only=False)

        val_losses = [1] * 10
        has_checkpoints = [False, True] * 5
        self._test_checkpointer_with_val_losses(checkpointer, val_losses,
                                                has_checkpoints)

    def _test_checkpointer_with_val_losses(self, checkpointer, val_losses,
                                           has_checkpoints):
        generator = some_data_generator(ModelCheckpointTest.batch_size)

        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, (val_loss, has_checkpoint) in enumerate(
                zip(val_losses, has_checkpoints), 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(1, {
                'batch': 1,
                'size': ModelCheckpointTest.batch_size,
                'loss': loss
            })
            checkpointer.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': val_loss
            })
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.assertEqual(has_checkpoint, os.path.isfile(filename))
        checkpointer.on_train_end({})

    def _update_model(self, generator):
        self.pytorch_network.zero_grad()

        x, y = next(generator)
        pred_y = self.pytorch_network(x)
        loss = self.loss_function(pred_y, y)
        loss.backward()

        self.optimizer.step()

        return float(loss)

    def _test_restore_best(self, val_losses):
        final_weights = torch_to_numpy(self.model.get_weight_copies())

        epoch = val_losses.index(min(val_losses)) + 1
        best_epoch_filename = self.checkpoint_filename.format(epoch=epoch)
        self.model.load_weights(best_epoch_filename)

        best_weights = torch_to_numpy(self.model.get_weight_copies())

        self.assertEqual(best_weights, final_weights)
Example #17
0
class OptimizerCheckpointTest(TestCase):
    batch_size = 20
    epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.Adam(self.pytorch_network.parameters(),
                                          lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        self.checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                                'my_checkpoint_{epoch}.optim')

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_integration(self):
        train_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
        valid_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
        checkpointer = OptimizerCheckpoint(self.checkpoint_filename, period=1)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=OptimizerCheckpointTest.epochs,
                                 steps_per_epoch=5,
                                 callbacks=[checkpointer])

    def test_checkpoints(self):
        checkpointer = OptimizerCheckpoint(self.checkpoint_filename, period=1)
        self._test_checkpointer(checkpointer)

    def _test_checkpointer(self, checkpointer):
        optimizer_states = {}
        generator = some_data_generator(OptimizerCheckpointTest.batch_size)

        checkpointer.set_params({
            'epochs': OptimizerCheckpointTest.epochs,
            'steps': 1
        })
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch in range(1, OptimizerCheckpointTest.epochs + 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(
                1, {
                    'batch': 1,
                    'size': OptimizerCheckpointTest.batch_size,
                    'loss': loss
                })
            checkpointer.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': 1
            })
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.assertTrue(os.path.isfile(filename))
            optimizer_states[epoch] = torch_to_numpy(
                self.optimizer.state_dict(), copy=True)
        checkpointer.on_train_end({})

        self._test_checkpoint(optimizer_states)

    def _update_model(self, generator):
        self.pytorch_network.zero_grad()

        x, y = next(generator)
        pred_y = self.pytorch_network(x)
        loss = self.loss_function(pred_y, y)
        loss.backward()

        self.optimizer.step()

        return float(loss)

    def _test_checkpoint(self, optimizer_states):
        for epoch, epoch_optimizer_state in optimizer_states.items():
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.model.load_optimizer_state(filename)
            saved_optimizer_state = torch_to_numpy(self.optimizer.state_dict())

            self.assertEqual(epoch_optimizer_state, saved_optimizer_state)
Example #18
0
class BaseCSVLoggerTest:
    # pylint: disable=not-callable,no-member
    CSVLogger = None
    batch_size = 20
    lr = 1e-3
    num_epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=BaseCSVLoggerTest.lr)
        self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        self.csv_filename = os.path.join(self.temp_dir_obj.name, 'my_log.csv')

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_logging(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = self.CSVLogger(self.csv_filename)
        history = self.model.fit_generator(train_gen,
                                           valid_gen,
                                           epochs=self.num_epochs,
                                           steps_per_epoch=5,
                                           callbacks=[logger])
        self._test_logging(history)

    def test_logging_with_batch_granularity(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = self.CSVLogger(self.csv_filename, batch_granularity=True)
        history = History()
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[logger, history])
        self._test_logging(history.history)

    def test_logging_append(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = self.CSVLogger(self.csv_filename)
        history = self.model.fit_generator(train_gen,
                                           valid_gen,
                                           epochs=self.num_epochs,
                                           steps_per_epoch=5,
                                           callbacks=[logger])
        logger = self.CSVLogger(self.csv_filename, append=True)
        history2 = self.model.fit_generator(train_gen,
                                            valid_gen,
                                            epochs=20,
                                            steps_per_epoch=5,
                                            initial_epoch=self.num_epochs,
                                            callbacks=[logger])
        self._test_logging(history + history2)

    def test_logging_overwrite(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        logger = self.CSVLogger(self.csv_filename)
        self.model.fit_generator(train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger])
        logger = self.CSVLogger(self.csv_filename, append=False)
        history = self.model.fit_generator(train_gen,
                                           valid_gen,
                                           epochs=20,
                                           steps_per_epoch=5,
                                           initial_epoch=self.num_epochs,
                                           callbacks=[logger])
        self._test_logging(history)

    def _test_logging(self, history):
        with open(self.csv_filename) as csvfile:
            reader = csv.DictReader(csvfile)
            rows = []
            for row in reader:
                if row['epoch'] != '':
                    self.assertAlmostEqual(float(row['lr']), BaseCSVLoggerTest.lr)
                del row['lr']
                rows.append(row)
        self.assertEqual(len(rows), len(history))
        for row, hist_entry in zip(rows, history):
            row = {k: v for k, v in row.items() if v != ''}
            self.assertEqual(row.keys(), hist_entry.keys())
            for k in row.keys():
                if isinstance(hist_entry[k], float):
                    self.assertAlmostEqual(float(row[k]), hist_entry[k])
                else:
                    self.assertEqual(str(row[k]), str(hist_entry[k]))
Example #19
0
class TensorBoardGradientTrackerTest(TestCase):
    batch_size = 20
    lr = 1e-3
    num_epochs = 10

    def setUp(self):
        torch.manual_seed(42)
        self.loss_function = nn.MSELoss()
        self.temp_dir_obj = TemporaryDirectory()
        # pylint: disable=not-callable
        self.writer = SummaryWriter(self.temp_dir_obj.name)
        self.writer.add_scalars = MagicMock()

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_tracking_one_layer_model(self):
        self.num_layer = 1
        self.pytorch_network = nn.Linear(1, 1)
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

        keep_bias = False
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[tracker])
        self._test_tracking(keep_bias)

    def test_tracking_one_layer_model_with_bias(self):
        self.num_layer = 1
        self.pytorch_network = nn.Linear(1, 1)
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

        keep_bias = True
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[tracker])
        self._test_tracking(keep_bias)

    def test_tracking_two_layers_model(self):
        self.num_layer = 2
        self.pytorch_network = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1))
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

        keep_bias = False
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[tracker])
        self._test_tracking(keep_bias)

    def test_tracking_two_layers_shallow_model(self):
        self.num_layer = 2
        self.pytorch_network = nn.Sequential(nn.Linear(1, 4), nn.Linear(4, 1))
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

        keep_bias = False
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[tracker])
        self._test_tracking(keep_bias)

    def test_tracking_N_layers_model_with_bias(self):
        self.num_layer = 4
        self.pytorch_network = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1),
                                             nn.Linear(1, 1), nn.Linear(1, 1))
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

        keep_bias = True
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=self.num_epochs,
                                 steps_per_epoch=5,
                                 callbacks=[tracker])
        self._test_tracking(keep_bias)

    def _test_tracking(self, keep_bias):
        expected_calls = []
        for epoch in range(1, self.num_epochs + 1):
            layer_names = [""]
            if self.num_layer > 1:
                layer_names = []
                for layer_idx in range(self.num_layer):
                    layer_names.append("{}.".format(layer_idx))
            for layer_name in layer_names:
                expected_calls.append(
                    call('gradient_distributions/{}weight'.format(layer_name),
                         {'mean': ANY}, epoch))
                expected_calls.append(
                    call('gradient_distributions/{}weight'.format(layer_name),
                         {'mean_std_dev_up': ANY}, epoch))
                expected_calls.append(
                    call('gradient_distributions/{}weight'.format(layer_name),
                         {'mean_std_dev_down': ANY}, epoch))
                expected_calls.append(
                    call('other_gradient_stats/{}weight'.format(layer_name),
                         {'min': ANY}, epoch))
                expected_calls.append(
                    call('other_gradient_stats/{}weight'.format(layer_name),
                         {'max': ANY}, epoch))

                if keep_bias:
                    expected_calls.append(
                        call(
                            'gradient_distributions/{}bias'.format(layer_name),
                            {'mean': ANY}, epoch))
                    expected_calls.append(
                        call(
                            'gradient_distributions/{}bias'.format(layer_name),
                            {'mean_std_dev_up': ANY}, epoch))
                    expected_calls.append(
                        call(
                            'gradient_distributions/{}bias'.format(layer_name),
                            {'mean_std_dev_down': ANY}, epoch))
                    expected_calls.append(
                        call('other_gradient_stats/{}bias'.format(layer_name),
                             {'min': ANY}, epoch))
                    expected_calls.append(
                        call('other_gradient_stats/{}bias'.format(layer_name),
                             {'max': ANY}, epoch))

        method_calls = self.writer.add_scalars.mock_calls
        self.assertEqual(len(method_calls), len(expected_calls))
        self.assertEqual(method_calls, expected_calls)

        self.assertIn(expected_calls, method_calls)
Example #20
0
class PeriodicSaveTest(TestCase):
    batch_size = 20

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.temp_dir_obj = TemporaryDirectory()
        self.save_filename = os.path.join(self.temp_dir_obj.name,
                                          'my_checkpoint_{epoch}.ckpt')

    def tearDown(self):
        self.temp_dir_obj.cleanup()

    def test_integration(self):
        train_gen = some_data_generator(PeriodicSaveTest.batch_size)
        valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  save_best_only=True)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[saver])

    def test_integration_with_keep_only_last_best(self):
        train_gen = some_data_generator(PeriodicSaveTest.batch_size)
        valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  save_best_only=True,
                                  keep_only_last_best=True)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[saver])

    def test_temporary_filename_arg(self):
        tmp_filename = os.path.join(self.temp_dir_obj.name,
                                    'my_checkpoint.tmp.ckpt')
        save_filename = os.path.join(self.temp_dir_obj.name,
                                     'my_checkpoint.ckpt')
        train_gen = some_data_generator(PeriodicSaveTest.batch_size)
        valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
        saver = PeriodicEpochSave(save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  period=1,
                                  temporary_filename=tmp_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[saver])
        self.assertFalse(os.path.isfile(tmp_filename))
        self.assertTrue(os.path.isfile(save_filename))

    def test_temporary_filename_arg_with_differing_save_filename(self):
        epochs = 10
        tmp_filename = os.path.join(self.temp_dir_obj.name,
                                    'my_checkpoint.tmp.ckpt')
        save_filename = os.path.join(self.temp_dir_obj.name,
                                     'my_checkpoint_{epoch}.ckpt')
        train_gen = some_data_generator(PeriodicSaveTest.batch_size)
        valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
        saver = PeriodicEpochSave(save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  period=1,
                                  temporary_filename=tmp_filename)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=epochs,
                                 steps_per_epoch=5,
                                 callbacks=[saver])
        self.assertFalse(os.path.isfile(tmp_filename))
        for i in range(1, epochs + 1):
            self.assertTrue(os.path.isfile(save_filename.format(epoch=i)))

    def test_non_atomic_write(self):
        save_filename = os.path.join(self.temp_dir_obj.name,
                                     'my_checkpoint.ckpt')
        train_gen = some_data_generator(PeriodicSaveTest.batch_size)
        valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
        saver = PeriodicEpochSave(save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  period=1,
                                  atomic_write=False)
        self.model.fit_generator(train_gen,
                                 valid_gen,
                                 epochs=10,
                                 steps_per_epoch=5,
                                 callbacks=[saver])
        self.assertTrue(os.path.isfile(save_filename))

    def test_save_best_only(self):
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  save_best_only=True)

        val_losses = [10, 3, 8, 5, 2]
        has_checkpoints = [True, True, False, False, True]
        self._test_saver_with_val_losses(saver, val_losses, has_checkpoints)

    def test_save_best_only_with_keep_only_last_best(self):
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  save_best_only=True,
                                  keep_only_last_best=True)

        val_losses = [10, 3, 8, 5, 2]
        has_checkpoints = [True, True, False, False, True]
        self._test_saver_with_val_losses(saver,
                                         val_losses,
                                         has_checkpoints,
                                         keep_only_last_best=True)

    def test_save_best_only_with_max(self):
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  mode='max',
                                  verbose=True,
                                  save_best_only=True)

        val_losses = [2, 3, 8, 5, 2]
        has_checkpoints = [True, True, True, False, False]
        self._test_saver_with_val_losses(saver, val_losses, has_checkpoints)

    def test_save_best_only_with_max_and_keep_only_last_best(self):
        saver = PeriodicEpochSave(
            self.save_filename,
            monitor='val_loss',
            mode='max',
            verbose=True,
            save_best_only=True,
            keep_only_last_best=True,
        )

        val_losses = [2, 3, 8, 5, 2]
        has_checkpoints = [True, True, True, False, False]
        self._test_saver_with_val_losses(saver,
                                         val_losses,
                                         has_checkpoints,
                                         keep_only_last_best=True)

    def test_periodic_with_period_of_1(self):
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  period=1,
                                  save_best_only=False)

        val_losses = [1] * 10
        has_checkpoints = [True] * 10
        self._test_saver_with_val_losses(saver, val_losses, has_checkpoints)

    def test_periodic_with_period_of_2(self):
        saver = PeriodicEpochSave(self.save_filename,
                                  monitor='val_loss',
                                  verbose=True,
                                  period=2,
                                  save_best_only=False)

        val_losses = [1] * 10
        has_checkpoints = [False, True] * 5
        self._test_saver_with_val_losses(saver, val_losses, has_checkpoints)

    def test_keep_only_last_best_without_save_best_only(self):
        with self.assertRaises(ValueError):
            PeriodicEpochSave(self.save_filename,
                              monitor='val_loss',
                              verbose=True,
                              save_best_only=False,
                              keep_only_last_best=True)

        with self.assertRaises(ValueError):
            PeriodicEpochSave(self.save_filename,
                              monitor='val_loss',
                              verbose=True,
                              keep_only_last_best=True)

    def test_save_best_only_with_restore_best(self):
        checkpointer = PeriodicEpochSave(self.save_filename,
                                         monitor='val_loss',
                                         verbose=True,
                                         save_best_only=True,
                                         restore_best=True)

        val_losses = [10, 3, 8, 7, 2, 5]
        has_checkpoints = [True, True, False, False, True, False]
        self._test_saver_with_val_losses(checkpointer, val_losses,
                                         has_checkpoints)

        self.assertEqual(5, checkpointer.restored_epoch_number)
        self.assertEqual(5, checkpointer.last_saved_epoch_number)

    def test_restore_best_without_save_best_only(self):
        with self.assertRaises(ValueError):
            PeriodicEpochSave(self.save_filename,
                              monitor='val_loss',
                              verbose=True,
                              save_best_only=False,
                              restore_best=True)

        with self.assertRaises(ValueError):
            PeriodicEpochSave(self.save_filename,
                              monitor='val_loss',
                              verbose=True,
                              restore_best=True)

    def _test_saver_with_val_losses(self,
                                    saver,
                                    val_losses,
                                    has_checkpoints,
                                    keep_only_last_best=False):
        generator = some_data_generator(PeriodicSaveTest.batch_size)

        best_checkpoint_filenames = []
        saver.set_params({'epochs': len(val_losses), 'steps': 1})
        saver.set_model(self.model)
        saver.on_train_begin({})
        for epoch, (val_loss, has_checkpoint) in enumerate(
                zip(val_losses, has_checkpoints), 1):
            saver.on_epoch_begin(epoch, {})
            saver.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            saver.on_train_batch_end(1, {
                'batch': 1,
                'size': PeriodicSaveTest.batch_size,
                'loss': loss
            })
            saver.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': val_loss
            })

            filename = self.save_filename.format(epoch=epoch)
            self.assertEqual(has_checkpoint, os.path.isfile(filename))
            if has_checkpoint:
                self.assertEqual(f'{epoch}\n',
                                 open(filename, 'r', encoding='utf-8').read())
                best_checkpoint_filenames.append(os.path.realpath(filename))

        files = [
            os.path.realpath(os.path.join(self.temp_dir_obj.name, f))
            for f in os.listdir(self.temp_dir_obj.name)
        ]
        if keep_only_last_best:
            self.assertEqual(1, len(files))
            self.assertEqual(files[0], best_checkpoint_filenames[-1])
        else:
            best_checkpoint_filenames = set(best_checkpoint_filenames)
            self.assertEqual(len(best_checkpoint_filenames), len(files))
            self.assertEqual(best_checkpoint_filenames, set(files))

        saver.on_train_end({})

    def _update_model(self, generator):
        self.pytorch_network.zero_grad()

        x, y = next(generator)
        pred_y = self.pytorch_network(x)
        loss = self.loss_function(pred_y, y)
        loss.backward()

        self.optimizer.step()

        return float(loss)
Example #21
0
class LambdaTest(ModelFittingTestCase):
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.Adam(self.pytorch_network.parameters(),
                                          lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)

    def test_integration_zero_args(self):
        lambda_callback = LambdaCallback()

        train_generator = some_data_tensor_generator(LambdaTest.batch_size)
        valid_generator = some_data_tensor_generator(LambdaTest.batch_size)
        test_generator = some_data_tensor_generator(LambdaTest.batch_size)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=LambdaTest.epochs,
            steps_per_epoch=LambdaTest.steps_per_epoch,
            validation_steps=LambdaTest.steps_per_epoch,
            callbacks=[lambda_callback],
        )

        num_steps = 10
        self.model.evaluate_generator(test_generator,
                                      steps=num_steps,
                                      callbacks=[lambda_callback])

    def test_with_only_on_epoch_end_arg(self):
        on_epoch_end = Mock()
        lambda_callback = LambdaCallback(on_epoch_end=on_epoch_end)

        train_generator = some_data_tensor_generator(LambdaTest.batch_size)
        valid_generator = some_data_tensor_generator(LambdaTest.batch_size)
        test_generator = some_data_tensor_generator(LambdaTest.batch_size)
        logs = self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=LambdaTest.epochs,
            steps_per_epoch=LambdaTest.steps_per_epoch,
            validation_steps=LambdaTest.steps_per_epoch,
            callbacks=[lambda_callback],
        )

        num_steps = 10
        self.model.evaluate_generator(test_generator,
                                      steps=num_steps,
                                      callbacks=[lambda_callback])

        expected_calls = [
            call(epoch_number, log)
            for epoch_number, log in enumerate(logs, 1)
        ]
        actual_calls = on_epoch_end.mock_calls
        self.assertEqual(len(expected_calls), len(actual_calls))
        self.assertEqual(expected_calls, actual_calls)

    def test_lambda_test_calls(self):
        lambda_callback, mock_calls = self._get_lambda_callback_with_mock_args(
        )
        num_steps = 10
        generator = some_data_tensor_generator(LambdaTest.batch_size)
        self.model.evaluate_generator(
            generator,
            steps=num_steps,
            callbacks=[lambda_callback, self.mock_callback])

        expected_calls = self.mock_callback.method_calls[2:]
        actual_calls = mock_calls.method_calls
        self.assertEqual(len(expected_calls), len(actual_calls))
        self.assertEqual(expected_calls, actual_calls)

    def test_lambda_train_calls(self):
        lambda_callback, mock_calls = self._get_lambda_callback_with_mock_args(
        )
        train_generator = some_data_tensor_generator(LambdaTest.batch_size)
        valid_generator = some_data_tensor_generator(LambdaTest.batch_size)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=LambdaTest.epochs,
            steps_per_epoch=LambdaTest.steps_per_epoch,
            validation_steps=LambdaTest.steps_per_epoch,
            callbacks=[lambda_callback, self.mock_callback],
        )

        expected_calls = self.mock_callback.method_calls[2:]
        actual_calls = mock_calls.method_calls
        self.assertEqual(len(expected_calls), len(actual_calls))
        self.assertEqual(expected_calls, actual_calls)

    def _get_lambda_callback_with_mock_args(self):
        mock_callback = Mock(spec=Callback())
        lambda_callback = LambdaCallback(
            on_epoch_begin=mock_callback.on_epoch_begin,
            on_epoch_end=mock_callback.on_epoch_end,
            on_train_batch_begin=mock_callback.on_train_batch_begin,
            on_train_batch_end=mock_callback.on_train_batch_end,
            on_valid_batch_begin=mock_callback.on_valid_batch_begin,
            on_valid_batch_end=mock_callback.on_valid_batch_end,
            on_test_batch_begin=mock_callback.on_test_batch_begin,
            on_test_batch_end=mock_callback.on_test_batch_end,
            on_train_begin=mock_callback.on_train_begin,
            on_train_end=mock_callback.on_train_end,
            on_valid_begin=mock_callback.on_valid_begin,
            on_valid_end=mock_callback.on_valid_end,
            on_test_begin=mock_callback.on_test_begin,
            on_test_end=mock_callback.on_test_end,
            on_backward_end=mock_callback.on_backward_end,
        )
        return lambda_callback, mock_callback
Example #22
0
class ModelMultiIOTest(ModelFittingTestCase):

    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = MultiIOModel(num_input=2, num_output=2)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)

        self.model = Model(
            self.pytorch_network,
            self.optimizer,
            lambda y_pred, y_true: self.loss_function(y_pred[0], y_true[0]) + self.loss_function(y_pred[1], y_true[1]),
            batch_metrics=self.batch_metrics,
            epoch_metrics=self.epoch_metrics)

    def test_fitting_tensor_generator_multi_io(self):
        train_generator = some_data_tensor_generator_multi_io(ModelMultiIOTest.batch_size)
        valid_generator = some_data_tensor_generator_multi_io(ModelMultiIOTest.batch_size)
        logs = self.model.fit_generator(train_generator,
                                        valid_generator,
                                        epochs=ModelMultiIOTest.epochs,
                                        steps_per_epoch=ModelMultiIOTest.steps_per_epoch,
                                        validation_steps=ModelMultiIOTest.steps_per_epoch,
                                        callbacks=[self.mock_callback])
        params = {'epochs': ModelMultiIOTest.epochs, 'steps': ModelMultiIOTest.steps_per_epoch}
        self._test_callbacks_train(params, logs)

    def test_fitting_with_tensor_multi_io(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelMultiIOTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - \
                     train_final_batch_missing_samples
        train_x = (torch.rand(train_size, 1), torch.rand(train_size, 1))
        train_y = (torch.rand(train_size, 1), torch.rand(train_size, 1))

        valid_real_steps_per_epoch = 10
        # valid_batch_size will be the same as train_batch_size in the fit method.
        valid_batch_size = train_batch_size
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - \
                     valid_final_batch_missing_samples
        valid_x = (torch.rand(valid_size, 1), torch.rand(valid_size, 1))
        valid_y = (torch.rand(valid_size, 1), torch.rand(valid_size, 1))

        logs = self.model.fit(train_x,
                              train_y,
                              validation_data=(valid_x, valid_y),
                              epochs=ModelMultiIOTest.epochs,
                              batch_size=train_batch_size,
                              steps_per_epoch=None,
                              validation_steps=None,
                              callbacks=[self.mock_callback])
        params = {'epochs': ModelMultiIOTest.epochs, 'steps': train_real_steps_per_epoch}
        self._test_callbacks_train(params, logs)

    def test_tensor_train_on_batch_multi_io(self):
        x1 = torch.rand(ModelMultiIOTest.batch_size, 1)
        x2 = torch.rand(ModelMultiIOTest.batch_size, 1)
        y1 = torch.rand(ModelMultiIOTest.batch_size, 1)
        y2 = torch.rand(ModelMultiIOTest.batch_size, 1)
        loss = self.model.train_on_batch((x1, x2), (y1, y2))
        self.assertEqual(type(loss), float)

    def test_ndarray_train_on_batch_multi_io(self):
        x1 = np.random.rand(ModelMultiIOTest.batch_size, 1).astype(np.float32)
        x2 = np.random.rand(ModelMultiIOTest.batch_size, 1).astype(np.float32)
        y1 = np.random.rand(ModelMultiIOTest.batch_size, 1).astype(np.float32)
        y2 = np.random.rand(ModelMultiIOTest.batch_size, 1).astype(np.float32)
        loss = self.model.train_on_batch((x1, x2), (y1, y2))
        self.assertEqual(type(loss), float)

    def test_evaluate_with_pred_multi_io(self):
        x = (torch.rand(ModelMultiIOTest.evaluate_dataset_len, 1), torch.rand(ModelMultiIOTest.evaluate_dataset_len, 1))
        y = (torch.rand(ModelMultiIOTest.evaluate_dataset_len, 1), torch.rand(ModelMultiIOTest.evaluate_dataset_len, 1))
        # We also test the unpacking.
        _, pred_y = self.model.evaluate(x, y, batch_size=ModelMultiIOTest.batch_size, return_pred=True)
        for pred in pred_y:
            self.assertEqual(pred.shape, (ModelMultiIOTest.evaluate_dataset_len, 1))

    def test_tensor_evaluate_on_batch_multi_io(self):
        y = (torch.rand(ModelMultiIOTest.batch_size, 1), torch.rand(ModelMultiIOTest.batch_size, 1))
        x = (torch.rand(ModelMultiIOTest.batch_size, 1), torch.rand(ModelMultiIOTest.batch_size, 1))
        loss = self.model.evaluate_on_batch(x, y)
        self.assertEqual(type(loss), float)

    def test_predict_with_np_array_multi_io(self):
        x1 = np.random.rand(ModelMultiIOTest.evaluate_dataset_len, 1).astype(np.float32)
        x2 = np.random.rand(ModelMultiIOTest.evaluate_dataset_len, 1).astype(np.float32)
        x = (x1, x2)
        pred_y = self.model.predict(x, batch_size=ModelMultiIOTest.batch_size)
        for pred in pred_y:
            self.assertEqual(pred.shape, (ModelMultiIOTest.evaluate_dataset_len, 1))

    def test_predict_generator_multi_io(self):
        num_steps = 10
        generator = some_data_tensor_generator_multi_io(ModelMultiIOTest.batch_size)
        generator = (x for x, _ in generator)
        pred_y = self.model.predict_generator(generator, steps=num_steps)

        for pred in pred_y:
            self.assertEqual(pred.shape, (num_steps * ModelMultiIOTest.batch_size, 1))

    def test_tensor_predict_on_batch_multi_io(self):
        x = (torch.rand(ModelMultiIOTest.batch_size, 1), torch.rand(ModelMultiIOTest.batch_size, 1))
        pred_y = self.model.predict_on_batch(x)
        self._test_size_and_type_for_generator(pred_y, (ModelMultiIOTest.batch_size, 1))
Example #23
0
class NotificationCallbackTest(TestCase):
    epochs = 10
    steps_per_epoch = 5
    batch_size = 20
    lr = 0.01

    def setUp(self) -> None:
        super().setUp()
        self.notification_callback_mock = MagicMock()
        self.notificator_mock = MagicMock()

        self.train_generator = some_data_tensor_generator(NotificationCallbackTest.batch_size)
        self.valid_generator = some_data_tensor_generator(NotificationCallbackTest.batch_size)

        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=NotificationCallbackTest.lr)

        self.batch_metrics = [
            some_batch_metric_1,
            ('custom_name', some_batch_metric_2),
            repeat_batch_metric,
            repeat_batch_metric,
        ]
        self.batch_metrics_names = [
            'some_batch_metric_1',
            'custom_name',
            'repeat_batch_metric1',
            'repeat_batch_metric2',
        ]
        self.batch_metrics_values = [
            some_metric_1_value,
            some_metric_2_value,
            repeat_batch_metric_value,
            repeat_batch_metric_value,
        ]
        self.epoch_metrics = [SomeConstantEpochMetric()]
        self.epoch_metrics_names = ['some_constant_epoch_metric']
        self.epoch_metrics_values = [some_constant_epoch_metric_value]

        self.model = Model(
            self.pytorch_network,
            self.optimizer,
            self.loss_function,
            batch_metrics=self.batch_metrics,
            epoch_metrics=self.epoch_metrics,
        )

    def test_givenANotificationCallback_whenTrainingLoop_thenSendNotification(self):
        notification_callback = NotificationCallback(notificator=self.notificator_mock)
        logs = self.model.fit_generator(
            self.train_generator,
            self.valid_generator,
            epochs=NotificationCallbackTest.epochs,
            steps_per_epoch=NotificationCallbackTest.steps_per_epoch,
            validation_steps=NotificationCallbackTest.steps_per_epoch,
            callbacks=[notification_callback],
        )

        self._test_notificator_call(logs)

    def test_givenANotificationCallbackWithExperimentName_whenTrainingLoop_thenSendNotificationWithExperimentName(self):
        a_experiment_name = "A experiment name"
        notification_callback = NotificationCallback(
            notificator=self.notificator_mock, experiment_name=a_experiment_name
        )
        logs = self.model.fit_generator(
            self.train_generator,
            self.valid_generator,
            epochs=NotificationCallbackTest.epochs,
            steps_per_epoch=NotificationCallbackTest.steps_per_epoch,
            validation_steps=NotificationCallbackTest.steps_per_epoch,
            callbacks=[notification_callback],
        )

        self._test_notificator_call(logs, experiment_name=a_experiment_name)

    def _test_notificator_call(self, logs: Dict, experiment_name=None):
        experiment_name_text = f" for {experiment_name}" if experiment_name is not None else ""
        call_list = []
        call_list.append(call.send_notification('', subject=f'Start of the training{experiment_name_text}.'))
        for batch_log in logs:
            formatted_log_data = " ".join([f"{key}: {value}\n" for key, value in batch_log.items()])
            call_list.append(
                call.send_notification(
                    f"Here the epoch metrics: \n{formatted_log_data}",
                    subject=f"Epoch {batch_log['epoch']} is done{experiment_name_text}.",
                )
            )
        call_list.append(call.send_notification('', subject=f'End of the training{experiment_name_text}.'))

        method_calls = self.notificator_mock.method_calls
        self.assertEqual(len(method_calls), len(call_list))  # for set_model and set param
        self.assertEqual(method_calls, call_list)
Example #24
0
class DelayCallbackTest(TestCase):
    epochs = 10
    steps_per_epoch = 5
    batch_size = 20

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer,
                           self.loss_function)
        self.mock_callback = MagicMock(spec=Callback)
        self.delay_callback = DelayCallback(self.mock_callback)
        self.train_dict = {'loss': ANY, 'time': ANY}
        self.log_dict = {'loss': ANY, 'val_loss': ANY, 'time': ANY}

    def test_epoch_delay(self):
        epoch_delay = 4
        delay_callback = DelayCallback(self.mock_callback,
                                       epoch_delay=epoch_delay)
        train_generator = some_data_generator(DelayCallbackTest.batch_size)
        valid_generator = some_data_generator(DelayCallbackTest.batch_size)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=DelayCallbackTest.epochs,
            steps_per_epoch=DelayCallbackTest.steps_per_epoch,
            validation_steps=DelayCallbackTest.steps_per_epoch,
            callbacks=[delay_callback])
        params = {
            'epochs': DelayCallbackTest.epochs,
            'steps': DelayCallbackTest.steps_per_epoch,
            'valid_steps': DelayCallbackTest.steps_per_epoch
        }

        call_list = []
        call_list.append(call.on_train_begin({}))
        for epoch in range(epoch_delay + 1, DelayCallbackTest.epochs + 1):
            call_list.append(call.on_epoch_begin(epoch, {}))
            for step in range(1, params['steps'] + 1):
                call_list.append(call.on_train_batch_begin(step, {}))
                call_list.append(call.on_backward_end(step))
                call_list.append(
                    call.on_train_batch_end(
                        step, {
                            'batch': step,
                            'size': DelayCallbackTest.batch_size,
                            **self.train_dict
                        }))
            call_list.append(
                call.on_epoch_end(epoch, {
                    'epoch': epoch,
                    **self.log_dict
                }))
        call_list.append(call.on_train_end({}))

        method_calls = self.mock_callback.method_calls
        self.assertIn(call.set_model(self.model), method_calls[:2])
        self.assertIn(call.set_params(params), method_calls[:2])

        self.assertEqual(len(method_calls), len(call_list) + 2)
        self.assertEqual(method_calls[2:], call_list)

    def test_batch_delay_in_middle_of_epoch(self):
        self._test_batch_delay(epoch_delay=5, batch_in_epoch_delay=3)

    def test_batch_delay_at_begin_of_epoch(self):
        self._test_batch_delay(epoch_delay=5, batch_in_epoch_delay=0)

    def test_batch_delay_when_no_delay(self):
        self._test_batch_delay(epoch_delay=0, batch_in_epoch_delay=0)

    def _test_batch_delay(self, epoch_delay, batch_in_epoch_delay):
        batch_delay = epoch_delay * DelayCallbackTest.steps_per_epoch + batch_in_epoch_delay
        delay_callback = DelayCallback(self.mock_callback,
                                       batch_delay=batch_delay)
        train_generator = some_data_generator(DelayCallbackTest.batch_size)
        valid_generator = some_data_generator(DelayCallbackTest.batch_size)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=DelayCallbackTest.epochs,
            steps_per_epoch=DelayCallbackTest.steps_per_epoch,
            validation_steps=DelayCallbackTest.steps_per_epoch,
            callbacks=[delay_callback])
        params = {
            'epochs': DelayCallbackTest.epochs,
            'steps': DelayCallbackTest.steps_per_epoch,
            'valid_steps': DelayCallbackTest.steps_per_epoch
        }

        call_list = []
        call_list.append(call.on_train_begin({}))
        for epoch in range(epoch_delay + 1, DelayCallbackTest.epochs + 1):
            call_list.append(call.on_epoch_begin(epoch, {}))
            start_step = batch_in_epoch_delay + 1 if epoch == epoch_delay + 1 else 1
            for step in range(start_step, params['steps'] + 1):
                call_list.append(call.on_train_batch_begin(step, {}))
                call_list.append(call.on_backward_end(step))
                call_list.append(
                    call.on_train_batch_end(
                        step, {
                            'batch': step,
                            'size': DelayCallbackTest.batch_size,
                            **self.train_dict
                        }))
            call_list.append(
                call.on_epoch_end(epoch, {
                    'epoch': epoch,
                    **self.log_dict
                }))
        call_list.append(call.on_train_end({}))

        method_calls = self.mock_callback.method_calls
        self.assertIn(call.set_model(self.model), method_calls[:2])
        self.assertIn(call.set_params(params), method_calls[:2])

        self.assertEqual(len(method_calls), len(call_list) + 2)
        self.assertEqual(method_calls[2:], call_list)
class BestModelRestoreTest(TestCase):
    batch_size = 20

    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)
        self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)

    def test_integration(self):
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        model_restore = BestModelRestore(monitor='val_loss', verbose=True)
        self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[model_restore])

    def test_basic_restore(self):
        model_restore = BestModelRestore(monitor='val_loss')

        val_losses = [3, 2, 8, 5, 4]
        best_epoch = 2
        self._test_restore_with_val_losses(model_restore, val_losses, best_epoch)

    def test_save_best_only_with_max(self):
        model_restore = BestModelRestore(monitor='val_loss', mode='max')

        val_losses = [3, 2, 8, 5, 4]
        best_epoch = 3
        self._test_restore_with_val_losses(model_restore, val_losses, best_epoch)

    def _test_restore_with_val_losses(self, checkpointer, val_losses, best_epoch):
        generator = some_data_generator(BestModelRestoreTest.batch_size)

        best_epoch_weights = None
        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(1, {'batch': 1, 'size': BestModelRestoreTest.batch_size, 'loss': loss})
            checkpointer.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': val_loss})
            if epoch == best_epoch:
                best_epoch_weights = torch_to_numpy(self.model.get_weight_copies())
        checkpointer.on_train_end({})

        final_weights = torch_to_numpy(self.model.get_weight_copies())
        self.assertEqual(best_epoch_weights, final_weights)

    def _update_model(self, generator):
        self.pytorch_network.zero_grad()

        x, y = next(generator)
        pred_y = self.pytorch_network(x)
        loss = self.loss_function(pred_y, y)
        loss.backward()

        self.optimizer.step()

        return float(loss)
Example #26
0
class ModelMultiDictIOTest(ModelFittingTestCase):

    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = DictIOModel(['x1', 'x2'], ['y1', 'y2'])
        self.loss_function = dict_mse_loss
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)

    def test_fitting_tensor_generator_multi_dict_io(self):
        train_generator = some_data_tensor_generator_dict_io(ModelMultiDictIOTest.batch_size)
        valid_generator = some_data_tensor_generator_dict_io(ModelMultiDictIOTest.batch_size)
        logs = self.model.fit_generator(train_generator,
                                        valid_generator,
                                        epochs=ModelMultiDictIOTest.epochs,
                                        steps_per_epoch=ModelMultiDictIOTest.steps_per_epoch,
                                        validation_steps=ModelMultiDictIOTest.steps_per_epoch,
                                        callbacks=[self.mock_callback])
        params = {
            'epochs': ModelMultiDictIOTest.epochs,
            'steps': ModelMultiDictIOTest.steps_per_epoch,
            'valid_steps': ModelMultiDictIOTest.steps_per_epoch
        }
        self._test_callbacks_train(params, logs, valid_steps=ModelMultiDictIOTest.steps_per_epoch)

    def test_tensor_train_on_batch_multi_dict_io(self):
        x, y = get_batch(ModelMultiDictIOTest.batch_size)
        loss = self.model.train_on_batch(x, y)
        self.assertEqual(type(loss), float)

    def test_train_on_batch_with_pred_multi_dict_io(self):
        x, y = get_batch(ModelMultiDictIOTest.batch_size)
        loss, pred_y = self.model.train_on_batch(x, y, return_pred=True)
        self.assertEqual(type(loss), float)
        for value in pred_y.values():
            self.assertEqual(value.shape, (ModelMultiDictIOTest.batch_size, 1))

    def test_ndarray_train_on_batch_multi_dict_io(self):
        x1 = np.random.rand(ModelMultiDictIOTest.batch_size, 1).astype(np.float32)
        x2 = np.random.rand(ModelMultiDictIOTest.batch_size, 1).astype(np.float32)
        y1 = np.random.rand(ModelMultiDictIOTest.batch_size, 1).astype(np.float32)
        y2 = np.random.rand(ModelMultiDictIOTest.batch_size, 1).astype(np.float32)
        x, y = dict(x1=x1, x2=x2), dict(y1=y1, y2=y2)
        loss = self.model.train_on_batch(x, y)
        self.assertEqual(type(loss), float)

    def test_evaluate_generator_multi_dict_io(self):
        num_steps = 10
        generator = some_data_tensor_generator_dict_io(ModelMultiDictIOTest.batch_size)
        loss, pred_y = self.model.evaluate_generator(generator, steps=num_steps, return_pred=True)
        self.assertEqual(type(loss), float)
        self._test_size_and_type_for_generator(pred_y, (num_steps * ModelMultiDictIOTest.batch_size, 1))

    def test_tensor_evaluate_on_batch_multi_dict_io(self):
        x, y = get_batch(ModelMultiDictIOTest.batch_size)
        loss = self.model.evaluate_on_batch(x, y)
        self.assertEqual(type(loss), float)

    def test_predict_generator_multi_dict_io(self):
        num_steps = 10
        generator = some_data_tensor_generator_dict_io(ModelMultiDictIOTest.batch_size)
        generator = (x for x, _ in generator)
        pred_y = self.model.predict_generator(generator, steps=num_steps)
        self._test_size_and_type_for_generator(pred_y, (num_steps * ModelMultiDictIOTest.batch_size, 1))

    def test_tensor_predict_on_batch_multi_dict_io(self):
        x1 = torch.rand(ModelMultiDictIOTest.batch_size, 1)
        x2 = torch.rand(ModelMultiDictIOTest.batch_size, 1)
        pred_y = self.model.predict_on_batch(dict(x1=x1, x2=x2))
        self._test_size_and_type_for_generator(pred_y, (ModelMultiDictIOTest.batch_size, 1))
class ModelMultiInputTest(ModelFittingTestCase):
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = MultiIOModel(num_input=1, num_output=1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)

        self.model = Model(
            self.pytorch_network,
            self.optimizer,
            self.loss_function,
            batch_metrics=self.batch_metrics,
            epoch_metrics=self.epoch_metrics,
        )

    def test_fitting_tensor_generator_multi_input(self):
        train_generator = some_data_tensor_generator_multi_input(
            ModelMultiInputTest.batch_size)
        valid_generator = some_data_tensor_generator_multi_input(
            ModelMultiInputTest.batch_size)
        logs = self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=ModelMultiInputTest.epochs,
            steps_per_epoch=ModelMultiInputTest.steps_per_epoch,
            validation_steps=ModelMultiInputTest.steps_per_epoch,
            callbacks=[self.mock_callback],
        )
        params = {
            'epochs': ModelMultiInputTest.epochs,
            'steps': ModelMultiInputTest.steps_per_epoch,
            'valid_steps': ModelMultiInputTest.steps_per_epoch,
        }
        self._test_callbacks_train(
            params, logs, valid_steps=ModelMultiInputTest.steps_per_epoch)

    def test_fitting_with_tensor_multi_input(self):
        train_real_steps_per_epoch = 30
        train_batch_size = ModelMultiInputTest.batch_size
        train_final_batch_missing_samples = 7
        train_size = train_real_steps_per_epoch * train_batch_size - train_final_batch_missing_samples
        train_x = (torch.rand(train_size, 1), torch.rand(train_size, 1))
        train_y = torch.rand(train_size, 1)

        valid_real_steps_per_epoch = 10
        # valid_batch_size will be the same as train_batch_size in the fit method.
        valid_batch_size = train_batch_size
        valid_final_batch_missing_samples = 3
        valid_size = valid_real_steps_per_epoch * valid_batch_size - valid_final_batch_missing_samples
        valid_x = (torch.rand(valid_size, 1), torch.rand(valid_size, 1))
        valid_y = torch.rand(valid_size, 1)

        logs = self.model.fit(
            train_x,
            train_y,
            validation_data=(valid_x, valid_y),
            epochs=ModelMultiInputTest.epochs,
            batch_size=train_batch_size,
            steps_per_epoch=None,
            validation_steps=None,
            callbacks=[self.mock_callback],
        )
        params = {
            'epochs': ModelMultiInputTest.epochs,
            'steps': train_real_steps_per_epoch,
            'valid_steps': valid_real_steps_per_epoch,
        }
        self._test_callbacks_train(params, logs)

    def test_tensor_train_on_batch_multi_input(self):
        x1 = torch.rand(ModelMultiInputTest.batch_size, 1)
        x2 = torch.rand(ModelMultiInputTest.batch_size, 1)
        y = torch.rand(ModelMultiInputTest.batch_size, 1)
        loss = self.model.train_on_batch((x1, x2), y)
        self.assertEqual(type(loss), float)

    def test_train_on_batch_with_pred_multi_input(self):
        x1 = torch.rand(ModelMultiInputTest.batch_size, 1)
        x2 = torch.rand(ModelMultiInputTest.batch_size, 1)
        y = torch.rand(ModelMultiInputTest.batch_size, 1)
        loss, pred_y = self.model.train_on_batch((x1, x2), y, return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(pred_y.shape, (ModelMultiInputTest.batch_size, 1))

    def test_ndarray_train_on_batch_multi_input(self):
        x1 = np.random.rand(ModelMultiInputTest.batch_size,
                            1).astype(np.float32)
        x2 = np.random.rand(ModelMultiInputTest.batch_size,
                            1).astype(np.float32)
        y = np.random.rand(ModelMultiInputTest.batch_size,
                           1).astype(np.float32)
        loss = self.model.train_on_batch((x1, x2), y)
        self.assertEqual(type(loss), float)

    def test_evaluate_multi_input(self):
        x = (
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
        )
        y = torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1)
        loss = self.model.evaluate(x,
                                   y,
                                   batch_size=ModelMultiInputTest.batch_size)
        self.assertEqual(type(loss), float)

    def test_evaluate_with_pred_multi_input(self):
        x = (
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
        )
        y = torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1)
        # We also test the unpacking.
        _, pred_y = self.model.evaluate(
            x, y, batch_size=ModelMultiInputTest.batch_size, return_pred=True)
        self.assertEqual(pred_y.shape,
                         (ModelMultiInputTest.evaluate_dataset_len, 1))

    def test_evaluate_with_np_array_multi_input(self):
        x1 = np.random.rand(ModelMultiInputTest.evaluate_dataset_len,
                            1).astype(np.float32)
        x2 = np.random.rand(ModelMultiInputTest.evaluate_dataset_len,
                            1).astype(np.float32)
        x = (x1, x2)
        y = np.random.rand(ModelMultiInputTest.evaluate_dataset_len,
                           1).astype(np.float32)
        loss, pred_y = self.model.evaluate(
            x, y, batch_size=ModelMultiInputTest.batch_size, return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(pred_y.shape,
                         (ModelMultiInputTest.evaluate_dataset_len, 1))

    def test_evaluate_data_loader_multi_input(self):
        x1 = torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1)
        x2 = torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1)
        y = torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1)
        dataset = TensorDataset((x1, x2), y)
        generator = DataLoader(dataset, ModelMultiInputTest.batch_size)
        loss, pred_y = self.model.evaluate_generator(generator,
                                                     return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(pred_y.shape,
                         (ModelMultiInputTest.evaluate_dataset_len, 1))

    def test_evaluate_generator_multi_input(self):
        num_steps = 10
        generator = some_data_tensor_generator_multi_input(
            ModelMultiInputTest.batch_size)
        loss, pred_y = self.model.evaluate_generator(generator,
                                                     steps=num_steps,
                                                     return_pred=True)
        self.assertEqual(type(loss), float)
        self.assertEqual(pred_y.shape,
                         (num_steps * ModelMultiInputTest.batch_size, 1))

    def test_tensor_evaluate_on_batch_multi_input(self):
        x1 = torch.rand(ModelMultiInputTest.batch_size, 1)
        x2 = torch.rand(ModelMultiInputTest.batch_size, 1)
        y = torch.rand(ModelMultiInputTest.batch_size, 1)
        loss = self.model.evaluate_on_batch((x1, x2), y)
        self.assertEqual(type(loss), float)

    def test_predict_multi_input(self):
        x = (
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
            torch.rand(ModelMultiInputTest.evaluate_dataset_len, 1),
        )
        pred_y = self.model.predict(x,
                                    batch_size=ModelMultiInputTest.batch_size)
        self.assertEqual(pred_y.shape,
                         (ModelMultiInputTest.evaluate_dataset_len, 1))

    def test_predict_with_np_array_multi_input(self):
        x1 = np.random.rand(ModelMultiInputTest.evaluate_dataset_len,
                            1).astype(np.float32)
        x2 = np.random.rand(ModelMultiInputTest.evaluate_dataset_len,
                            1).astype(np.float32)
        x = (x1, x2)
        pred_y = self.model.predict(x,
                                    batch_size=ModelMultiInputTest.batch_size)
        self.assertEqual(pred_y.shape,
                         (ModelMultiInputTest.evaluate_dataset_len, 1))

    def test_predict_generator_multi_input(self):
        num_steps = 10
        generator = some_data_tensor_generator_multi_input(
            ModelMultiInputTest.batch_size)
        generator = (x for x, _ in generator)
        pred_y = self.model.predict_generator(generator, steps=num_steps)
        self.assertEqual(type(pred_y), np.ndarray)
        self.assertEqual(pred_y.shape,
                         (num_steps * ModelMultiInputTest.batch_size, 1))

    def test_tensor_predict_on_batch_multi_input(self):
        x1 = torch.rand(ModelMultiInputTest.batch_size, 1)
        x2 = torch.rand(ModelMultiInputTest.batch_size, 1)
        pred_y = self.model.predict_on_batch((x1, x2))
        self.assertEqual(pred_y.shape, (ModelMultiInputTest.batch_size, 1))