Ejemplo n.º 1
0
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)
        self.batch_metrics = [
            some_batch_metric_1, ('custom_name', some_batch_metric_2),
            repeat_batch_metric, repeat_batch_metric
        ]
        self.batch_metrics_names = [
            'some_batch_metric_1', 'custom_name', 'repeat_batch_metric1',
            'repeat_batch_metric2'
        ]
        self.batch_metrics_values = [
            some_metric_1_value, some_metric_2_value,
            repeat_batch_metric_value, repeat_batch_metric_value
        ]
        self.epoch_metrics = [SomeConstantEpochMetric()]
        self.epoch_metrics_names = ['some_constant_epoch_metric']
        self.epoch_metrics_values = [some_constant_epoch_metric_value]

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)
Ejemplo n.º 2
0
    def setUp(self):
        torch.manual_seed(42)
        self.pytorch_module = nn.Linear(1, 1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_module.parameters(), lr=1e-3)
        self.metrics = [some_metric_1, some_metric_2]
        self.metrics_names = ['some_metric_1', 'some_metric_2']
        self.metrics_values = [some_metric_1_value, some_metric_2_value]

        self.model = Model(self.pytorch_module, self.optimizer, self.loss_function,
                           metrics=self.metrics)

        self.multi_input_model = Model(MultiInputModel(), self.optimizer,
                                       self.loss_function, metrics=self.metrics)

        self.mock_callback = MagicMock()
Ejemplo n.º 3
0
 def test_evaluate_with_no_metric(self):
     self.model = Model(self.pytorch_module, self.optimizer,
                        self.loss_function)
     x = torch.rand(ModelTest.evaluate_dataset_len, 1)
     y = torch.rand(ModelTest.evaluate_dataset_len, 1)
     loss = self.model.evaluate(x, y, batch_size=ModelTest.batch_size)
     self.assertEqual(type(loss), float)
Ejemplo n.º 4
0
    def test_epoch_metrics_with_name_with_multiple_names_returned_by_tensor(self):
        class EpochMetricWithName(ConstEpochMetric):
            __name__ = self.metric_names

        epoch_metric = EpochMetricWithName(torch.tensor(self.metric_values))
        model = Model(self.pytorch_network, self.optimizer, self.loss_function, epoch_metrics=[epoch_metric])
        self._test_history(model, self.metric_names, self.metric_values)
Ejemplo n.º 5
0
 def test_repeated_epoch_metrics_handling(self):
     expected_names = ['some_metric_name1', 'some_metric_name2']
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   epoch_metrics=[SomeMetricName(1), SomeMetricName(2)])
     self._test_history(model, expected_names, [1, 2])
Ejemplo n.º 6
0
 def test_batch_metrics_with_multiple_names_returned_by_list(self):
     batch_metric = get_const_batch_metric(list(self.metric_values))
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   batch_metrics=[(self.metric_names, batch_metric)])
     self._test_history(model, self.metric_names, self.metric_values)
Ejemplo n.º 7
0
 def test_epoch_metrics_with_multiple_names_returned_by_tuple(self):
     epoch_metric = ConstEpochMetric(tuple(self.metric_values))
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   epoch_metrics=[(self.metric_names, epoch_metric)])
     self._test_history(model, self.metric_names, self.metric_values)
Ejemplo n.º 8
0
    def train(self,
              features: np.ndarray,
              labels: np.ndarray,
              validation_features: Optional[np.ndarray] = None,
              validation_labels: Optional[np.ndarray] = None,
              epochs: int = 50,
              shuffle: bool = True,
              batch_size: int = 32) -> None:
        """
        Trains the model
        :param features: consists of padded training segments and dummy tokens
        :param labels: consists of labels to predict
        :param validation_features: consists of padded training segments and dummy tokens
        :param validation_labels: consists of labels to predict
        """

        # Wrap pytorch with pytoune (very keras-like) for easier training
        # TODO define adam manually with different learning rates
        net = Model(self.model,
                    optimizer='adam',
                    loss_function='cross_entropy',
                    metrics=['accuracy'])
        if self.gpu_available:
            net = net.cuda()
        net.fit(features,
                labels,
                validation_x=validation_features,
                validation_y=validation_labels,
                batch_size=batch_size,
                epochs=epochs)
Ejemplo n.º 9
0
    def __init__(self,
                 directory,
                 network,
                 *,
                 device=None,
                 logging=True,
                 optimizer='sgd',
                 loss_function=None,
                 batch_metrics=None,
                 epoch_metrics=None,
                 monitor_metric=None,
                 monitor_mode=None,
                 task=None):
        self.directory = directory
        self.logging = logging

        if task is not None and not task.startswith(
                'classif') and not task.startswith('reg'):
            raise ValueError("Invalid task '%s'" % task)

        batch_metrics = [] if batch_metrics is None else batch_metrics
        epoch_metrics = [] if epoch_metrics is None else epoch_metrics

        loss_function = self._get_loss_function(loss_function, network, task)
        batch_metrics = self._get_batch_metrics(batch_metrics, network, task)
        epoch_metrics = self._get_epoch_metrics(epoch_metrics, network, task)
        self._set_monitor(monitor_metric, monitor_mode, task)

        self.model = Model(network,
                           optimizer,
                           loss_function,
                           batch_metrics=batch_metrics,
                           epoch_metrics=epoch_metrics)
        if device is not None:
            self.model.to(device)

        self.best_checkpoint_filename = self.get_path(
            Experiment.BEST_CHECKPOINT_FILENAME)
        self.best_checkpoint_tmp_filename = self.get_path(
            Experiment.BEST_CHECKPOINT_TMP_FILENAME)
        self.model_checkpoint_filename = self.get_path(
            Experiment.MODEL_CHECKPOINT_FILENAME)
        self.model_checkpoint_tmp_filename = self.get_path(
            Experiment.MODEL_CHECKPOINT_TMP_FILENAME)
        self.optimizer_checkpoint_filename = self.get_path(
            Experiment.OPTIMIZER_CHECKPOINT_FILENAME)
        self.optimizer_checkpoint_tmp_filename = self.get_path(
            Experiment.OPTIMIZER_CHECKPOINT_TMP_FILENAME)
        self.log_filename = self.get_path(Experiment.LOG_FILENAME)
        self.log_tmp_filename = self.get_path(Experiment.LOG_TMP_FILENAME)
        self.tensorboard_directory = self.get_path(
            Experiment.TENSORBOARD_DIRECTORY)
        self.epoch_filename = self.get_path(Experiment.EPOCH_FILENAME)
        self.epoch_tmp_filename = self.get_path(Experiment.EPOCH_TMP_FILENAME)
        self.lr_scheduler_filename = self.get_path(
            Experiment.LR_SCHEDULER_FILENAME)
        self.lr_scheduler_tmp_filename = self.get_path(
            Experiment.LR_SCHEDULER_TMP_FILENAME)
        self.test_log_filename = self.get_path(Experiment.TEST_LOG_FILENAME)
Ejemplo n.º 10
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_network = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)
     self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)
     self.temp_dir_obj = TemporaryDirectory()
     self.checkpoint_filename = os.path.join(self.temp_dir_obj.name, 'my_checkpoint_{epoch}.ckpt')
Ejemplo n.º 11
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_module = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_module.parameters(),
                                      lr=1e-3)
     self.model = Model(self.pytorch_module, self.optimizer,
                        self.loss_function)
Ejemplo n.º 12
0
 def test_evaluate_with_only_one_metric(self):
     model = Model(self.pytorch_network, self.optimizer, self.loss_function, batch_metrics=self.batch_metrics[:1])
     x = torch.rand(ModelTest.evaluate_dataset_len, 1)
     y = torch.rand(ModelTest.evaluate_dataset_len, 1)
     loss, first_metric = model.evaluate(x, y, batch_size=ModelTest.batch_size)
     self.assertEqual(type(loss), float)
     self.assertEqual(type(first_metric), float)
     self.assertEqual(first_metric, some_metric_1_value)
Ejemplo n.º 13
0
 def test_batch_metrics_with_multiple_names_returned_by_dict(self):
     d = dict(zip(self.metric_names, self.metric_values))
     batch_metric = get_const_batch_metric(d)
     model = Model(self.pytorch_network,
                   self.optimizer,
                   self.loss_function,
                   batch_metrics=[(self.metric_names, batch_metric)])
     self._test_history(model, d.keys(), d.values())
Ejemplo n.º 14
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_module = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_module.parameters(), lr=1e-3)
     self.model = Model(self.pytorch_module, self.optimizer, self.loss_function)
     self.train_gen = some_data_generator(20)
     self.valid_gen = some_data_generator(20)
Ejemplo n.º 15
0
 def test_epoch_metrics_with_multiple_names_returned_by_tensor_on_gpu(self):
     with torch.cuda.device(MetricsModelIntegrationTest.cuda_device):
         epoch_metric = ConstEpochMetric(torch.tensor(self.metric_values).cuda())
         model = Model(self.pytorch_network,
                       self.optimizer,
                       self.loss_function,
                       epoch_metrics=[(self.metric_names, epoch_metric)])
         model.cuda()
         self._test_history(model, self.metric_names, self.metric_values)
Ejemplo n.º 16
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_module = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_module.parameters(),
                                      lr=CSVLoggerTest.lr)
     self.model = Model(self.pytorch_module, self.optimizer,
                        self.loss_function)
     self.temp_dir_obj = TemporaryDirectory()
     self.csv_filename = os.path.join(self.temp_dir_obj.name, 'my_log.csv')
Ejemplo n.º 17
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_network = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)
     self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)
     self.mock_callback = MagicMock(spec=Callback)
     self.delay_callback = DelayCallback(self.mock_callback)
     self.train_dict = {'loss': ANY, 'time': ANY}
     self.log_dict = {'loss': ANY, 'val_loss': ANY, 'time': ANY}
Ejemplo n.º 18
0
def main(rnn_type, n_layers, dataset, embedding, device, save_path):
    train_iter, valid_iter, test_iter = dataset_factory(dataset, embedding=embedding)
    embedding_dim = int(embedding.split(".")[-1][:-1])
    save_path = Path(save_path) / f"{rnn_type}_{n_layers}layer_{embedding_dim}"
    save_path.mkdir(parents=True, exist_ok=True)
    kwargs = dict(
        vocab_size=len(TEXT.vocab),
        embedding_dim=embedding_dim,
        hidden_dim=256,
        output_dim=1,
        n_layers=n_layers,
        dropout=0.5,
        pad_idx=TEXT.vocab.stoi[TEXT.pad_token],
        rnn_type="gru",
    )
    with open(save_path / "kwargs.json", "w") as kwargs_file:
        json.dump(kwargs, kwargs_file)

    pretrained_embeddings = TEXT.vocab.vectors

    network = RNN(**kwargs)
    network.embedding.weight.data.copy_(pretrained_embeddings)
    UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
    PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]

    network.embedding.weight.data[UNK_IDX] = torch.zeros(embedding_dim)
    network.embedding.weight.data[PAD_IDX] = torch.zeros(embedding_dim)

    optimizer = torch.optim.Adam(network.parameters())
    model = Model(
        network=network,
        optimizer=optimizer,
        loss_function=custom_loss,
        batch_metrics=[acc],
    )
    model.to(device)

    history = model.fit_generator(
        train_generator=train_iter,
        valid_generator=valid_iter,
        epochs=10,
        callbacks=[
            ModelCheckpoint(
                filename=str(save_path / "model.pkl"),
                save_best_only=True,
                restore_best=True,
            )
        ],
    )
    print(f"Model saved to {save_path}")
    __import__("pudb").set_trace()
    test_loss, test_acc, y_pred, y_true = model.evaluate_generator(
        generator=test_iter, return_pred=True, return_ground_truth=True
    )
    print(f"Test Loss: {test_loss:.4f}, Test Binary Accuracy: {test_acc:.4f}")
Ejemplo n.º 19
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_network = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 1))
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                      lr=1e-3)
     self.model = Model(self.pytorch_network, self.optimizer,
                        self.loss_function)
     self.temp_dir_obj = TemporaryDirectory()
     self.csv_filename = os.path.join(self.temp_dir_obj.name,
                                      'layer_{}.csv')
Ejemplo n.º 20
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_module = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_module.parameters(),
                                      lr=BaseTensorBoardLoggerTest.lr)
     self.model = Model(self.pytorch_module, self.optimizer,
                        self.loss_function)
     self.temp_dir_obj = TemporaryDirectory()
     self.writer = self.SummaryWriter(self.temp_dir_obj.name)
     self.writer.add_scalars = MagicMock()
Ejemplo n.º 21
0
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = DictIOModel(['x1', 'x2'], ['y1', 'y2'])
        self.loss_function = dict_mse_loss
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)
Ejemplo n.º 22
0
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = MultiIOModel(num_input=1, num_output=2)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=1e-3)

        self.model = Model(
            self.pytorch_network,
            self.optimizer,
            lambda y_pred, y_true: self.loss_function(y_pred[0], y_true[0]) + self.loss_function(y_pred[1], y_true[1]),
            batch_metrics=self.batch_metrics,
            epoch_metrics=self.epoch_metrics)
Ejemplo n.º 23
0
            def __init__(self):
                # global spectral_accuracy, image_accuracy
                super().__init__()
                spectral_net = nn(np.shape(X_spectral_train)[-1], spectral_layers, spectral_dropout, material_count)
                opt = torch.optim.Adam(spectral_net.parameters(), lr=lr)
                spectral_model = Model(spectral_net, opt, 'cross_entropy', batch_metrics=['accuracy'])
                spectral_model.fit(X_spectral_train, y_train, epochs=spectral_epochs, batch_size=batch_size, verbose=False)

                image_net = nn(np.shape(X_image_train)[-1], image_layers, image_dropout, material_count)
                opt = torch.optim.Adam(image_net.parameters(), lr=lr)
                image_model = Model(image_net, opt, 'cross_entropy', batch_metrics=['accuracy'])
                image_model.fit(X_image_train, y_train, epochs=image_epochs, batch_size=batch_size, verbose=False)

                # Disable dropout, remove last layer and freeze network
                self.trained_spectral_model = torch.nn.Sequential(*(list(spectral_net.children())[:-1]))
                for p in self.trained_spectral_model.parameters():
                    p.requires_grad = False
                self.trained_image_model = torch.nn.Sequential(*(list(image_net.children())[:-1]))
                for p in self.trained_image_model.parameters():
                    p.requires_grad = False

                self.concat_net = nn(spectral_layers[-1] + image_layers[-1], layers, dropout, material_count, batchnorm=False)
Ejemplo n.º 24
0
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = MultiIOModel(num_input=1, num_output=1)
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)

        self.model = Model(self.pytorch_network,
                           self.optimizer,
                           self.loss_function,
                           batch_metrics=self.batch_metrics,
                           epoch_metrics=self.epoch_metrics)
Ejemplo n.º 25
0
    def test_correct_optim_calls_1_batch_per_step(self):
        train_generator = some_data_tensor_generator(ModelTest.batch_size)

        mocked_optimizer = some_mocked_optimizer()
        mocked_optim_model = Model(self.pytorch_network,
                                   mocked_optimizer,
                                   self.loss_function,
                                   batch_metrics=self.batch_metrics,
                                   epoch_metrics=self.epoch_metrics)
        mocked_optim_model.fit_generator(train_generator, None, epochs=1, steps_per_epoch=1, batches_per_step=1)

        self.assertEqual(1, mocked_optimizer.step.call_count)
        self.assertEqual(1, mocked_optimizer.zero_grad.call_count)
Ejemplo n.º 26
0
    def test_disable_batch_size_warning(self):
        import warnings

        def tuple_generator(batch_size):
            while True:
                x1 = torch.rand(batch_size, 1)
                x2 = torch.rand(batch_size, 1)
                y1 = torch.rand(batch_size, 1)
                y2 = torch.rand(batch_size, 1)
                yield (x1, x2), (y1, y2)

        class TupleModule(nn.Module):
            def __init__(self):
                super().__init__()
                self.l1 = nn.Linear(1, 1)
                self.l2 = nn.Linear(1, 1)

            def forward(self, x):
                # pylint: disable=arguments-differ
                x1, x2 = x
                return self.l1(x1), self.l2(x2)

        def loss_function(y_pred, y_true):
            return F.mse_loss(y_pred[0], y_true[0]) + F.mse_loss(
                y_pred[1], y_true[1])

        pytorch_module = TupleModule()
        optimizer = torch.optim.SGD(pytorch_module.parameters(), lr=1e-3)
        model = Model(pytorch_module, optimizer, loss_function)

        train_generator = tuple_generator(ModelTest.batch_size)
        valid_generator = tuple_generator(ModelTest.batch_size)
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            model.fit_generator(train_generator,
                                valid_generator,
                                epochs=ModelTest.epochs,
                                steps_per_epoch=ModelTest.steps_per_epoch,
                                validation_steps=ModelTest.steps_per_epoch)
            num_warnings = ModelTest.steps_per_epoch * 2 * ModelTest.epochs
            self.assertEqual(len(w), num_warnings)

        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")
            warning_settings['batch_size'] = 'ignore'
            model.fit_generator(train_generator,
                                valid_generator,
                                epochs=ModelTest.epochs,
                                steps_per_epoch=ModelTest.steps_per_epoch,
                                validation_steps=ModelTest.steps_per_epoch)
            self.assertEqual(len(w), 0)
Ejemplo n.º 27
0
 def test_epoch_metrics_integration(self):
     model = Model(self.pytorch_network, self.optimizer, self.loss_function, epoch_metrics=[SomeEpochMetric()])
     train_generator = some_data_tensor_generator(ModelTest.batch_size)
     valid_generator = some_data_tensor_generator(ModelTest.batch_size)
     logs = model.fit_generator(train_generator,
                                valid_generator,
                                epochs=1,
                                steps_per_epoch=ModelTest.steps_per_epoch,
                                validation_steps=ModelTest.steps_per_epoch)
     actual_value = logs[-1]['some_epoch_metric']
     val_actual_value = logs[-1]['val_some_epoch_metric']
     expected_value = 5
     self.assertEqual(val_actual_value, expected_value)
     self.assertEqual(actual_value, expected_value)
Ejemplo n.º 28
0
    def __init__(self,
                 directory,
                 module,
                 *,
                 device=None,
                 logging=True,
                 optimizer='sgd',
                 loss_function=None,
                 metrics=[],
                 monitor_metric=None,
                 monitor_mode=None,
                 type=None):
        self.directory = directory
        self.logging = logging

        if type is not None and not type.startswith(
                'classif') and not type.startswith('reg'):
            raise ValueError("Invalid type '%s'" % type)

        loss_function = self._get_loss_function(loss_function, module, type)
        metrics = self._get_metrics(metrics, module, type)
        self._set_monitor(monitor_metric, monitor_mode, type)

        self.model = Model(module, optimizer, loss_function, metrics=metrics)
        if device is not None:
            self.model.to(device)

        join_dir = lambda x: os.path.join(directory, x)

        self.best_checkpoint_filename = join_dir(
            Experiment.BEST_CHECKPOINT_FILENAME)
        self.best_checkpoint_tmp_filename = join_dir(
            Experiment.BEST_CHECKPOINT_TMP_FILENAME)
        self.model_checkpoint_filename = join_dir(
            Experiment.MODEL_CHECKPOINT_FILENAME)
        self.model_checkpoint_tmp_filename = join_dir(
            Experiment.MODEL_CHECKPOINT_TMP_FILENAME)
        self.optimizer_checkpoint_filename = join_dir(
            Experiment.OPTIMIZER_CHECKPOINT_FILENAME)
        self.optimizer_checkpoint_tmp_filename = join_dir(
            Experiment.OPTIMIZER_CHECKPOINT_TMP_FILENAME)
        self.log_filename = join_dir(Experiment.LOG_FILENAME)
        self.tensorboard_directory = join_dir(Experiment.TENSORBOARD_DIRECTORY)
        self.epoch_filename = join_dir(Experiment.EPOCH_FILENAME)
        self.epoch_tmp_filename = join_dir(Experiment.EPOCH_TMP_FILENAME)
        self.lr_scheduler_filename = join_dir(Experiment.LR_SCHEDULER_FILENAME)
        self.lr_scheduler_tmp_filename = join_dir(
            Experiment.LR_SCHEDULER_TMP_FILENAME)
        self.test_log_filename = join_dir(Experiment.TEST_LOG_FILENAME)
Ejemplo n.º 29
0
    def setUp(self):
        super().setUp()
        torch.manual_seed(42)
        self.pytorch_network = DictOutputModel()
        self.loss_function = nn.MSELoss()
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                         lr=1e-3)

        self.model = Model(
            self.pytorch_network,
            self.optimizer,
            lambda y_p, y_t: self.loss_function(y_p['out1'], y_t[
                0]) + self.loss_function(y_p['out2'], y_t[1]),
            batch_metrics=self.batch_metrics,
            epoch_metrics=self.epoch_metrics)
Ejemplo n.º 30
0
 def test_metrics_integration(self):
     num_steps = 10
     model = Model(self.pytorch_network, self.optimizer, self.loss_function, batch_metrics=[F.mse_loss])
     train_generator = some_data_tensor_generator(ModelTest.batch_size)
     valid_generator = some_data_tensor_generator(ModelTest.batch_size)
     model.fit_generator(train_generator,
                         valid_generator,
                         epochs=ModelTest.epochs,
                         steps_per_epoch=ModelTest.steps_per_epoch,
                         validation_steps=ModelTest.steps_per_epoch,
                         callbacks=[self.mock_callback])
     generator = some_data_tensor_generator(ModelTest.batch_size)
     loss, mse = model.evaluate_generator(generator, steps=num_steps)
     self.assertEqual(type(loss), float)
     self.assertEqual(type(mse), float)