def test_logging_append(self):
     train_gen = some_data_generator(self.batch_size)
     valid_gen = some_data_generator(self.batch_size)
     logger = self.CSVGradientLogger(self.csv_filename)
     memgrad = MemoryGradientLogger()
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=self.num_epochs,
                              steps_per_epoch=5,
                              callbacks=[memgrad, logger])
     memgrad2 = MemoryGradientLogger()
     logger = self.CSVGradientLogger(self.csv_filename, append=True)
     self.model.fit_generator(
         train_gen,
         valid_gen,
         epochs=20,
         steps_per_epoch=5,
         initial_epoch=self.num_epochs,
         callbacks=[memgrad2, logger],
     )
     history = {
         layer: stats1 + memgrad2.history[layer]
         for layer, stats1 in memgrad.history.items()
     }
     self._test_logging(history)
Exemplo n.º 2
0
 def test_integration(self):
     train_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     valid_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     checkpointer = OptimizerCheckpoint(self.checkpoint_filename, period=1)
     self.model.fit_generator(
         train_gen, valid_gen, epochs=OptimizerCheckpointTest.epochs, steps_per_epoch=5, callbacks=[checkpointer]
     )
Exemplo n.º 3
0
 def test_logging(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = self.CSVLogger(self.csv_filename)
     history = self.model.fit_generator(
         train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
     )
     self._test_logging(history)
Exemplo n.º 4
0
 def test_logging(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = TensorBoardLogger(self.writer)
     history = self.model.fit_generator(
         train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
     )
     self._test_logging(history)
Exemplo n.º 5
0
 def test_logging_with_batch_granularity(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = self.CSVLogger(self.csv_filename, batch_granularity=True)
     history = History()
     self.model.fit_generator(
         train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger, history]
     )
     self._test_logging(history.history)
Exemplo n.º 6
0
 def setUp(self):
     torch.manual_seed(42)
     self.pytorch_network = nn.Linear(1, 1)
     self.loss_function = nn.MSELoss()
     self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(),
                                      lr=1e-3)
     self.model = Model(self.pytorch_network, self.optimizer,
                        self.loss_function)
     self.train_gen = some_data_generator(20)
     self.valid_gen = some_data_generator(20)
Exemplo n.º 7
0
 def test_logging_overwrite(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = self.CSVLogger(self.csv_filename)
     self.model.fit_generator(train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger])
     logger = self.CSVLogger(self.csv_filename, append=False)
     history = self.model.fit_generator(
         train_gen, valid_gen, epochs=20, steps_per_epoch=5, initial_epoch=self.num_epochs, callbacks=[logger]
     )
     self._test_logging(history)
 def test_logging(self):
     train_gen = some_data_generator(self.batch_size)
     valid_gen = some_data_generator(self.batch_size)
     memgrad = MemoryGradientLogger()
     logger = self.CSVGradientLogger(self.csv_filename)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=self.num_epochs,
                              steps_per_epoch=5,
                              callbacks=[memgrad, logger])
     self._test_logging(memgrad.history)
Exemplo n.º 9
0
 def test_integration(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     earlystopper = EarlyStopping(monitor='val_loss',
                                  min_delta=0,
                                  patience=2,
                                  verbose=False)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=10,
                              steps_per_epoch=5,
                              callbacks=[earlystopper])
 def test_reduce_lr_on_plateau_integration(self):
     train_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     valid_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     reduce_lr = ReduceLROnPlateau(monitor='loss', patience=3)
     checkpointer = LRSchedulerCheckpoint(reduce_lr,
                                          self.checkpoint_filename,
                                          period=1)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=OptimizerCheckpointTest.epochs,
                              steps_per_epoch=5,
                              callbacks=[checkpointer])
 def test_any_scheduler_integration(self):
     train_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     valid_gen = some_data_generator(OptimizerCheckpointTest.batch_size)
     lr_scheduler = ExponentialLR(gamma=0.01)
     checkpointer = LRSchedulerCheckpoint(lr_scheduler,
                                          self.checkpoint_filename,
                                          period=1)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=OptimizerCheckpointTest.epochs,
                              steps_per_epoch=5,
                              callbacks=[checkpointer])
Exemplo n.º 12
0
    def test_tracking_N_layers_model_with_bias(self):
        self.num_layer = 4
        self.pytorch_network = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1), nn.Linear(1, 1))
        self.optimizer = torch.optim.SGD(self.pytorch_network.parameters(), lr=self.lr)
        self.model = Model(self.pytorch_network, self.optimizer, self.loss_function)

        keep_bias = True
        train_gen = some_data_generator(20)
        valid_gen = some_data_generator(20)
        tracker = TensorBoardGradientTracker(self.writer, keep_bias=keep_bias)
        self.model.fit_generator(train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[tracker])
        self._test_tracking(keep_bias)
Exemplo n.º 13
0
 def test_integration(self):
     train_gen = some_data_generator(PeriodicSaveTest.batch_size)
     valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
     saver = PeriodicEpochSave(self.save_filename,
                               monitor='val_loss',
                               verbose=True,
                               save_best_only=True)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=10,
                              steps_per_epoch=5,
                              callbacks=[saver])
Exemplo n.º 14
0
 def test_integration(self):
     train_gen = some_data_generator(ModelCheckpointTest.batch_size)
     valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
     checkpointer = ModelCheckpoint(self.checkpoint_filename,
                                    monitor='val_loss',
                                    verbose=True,
                                    save_best_only=True)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=10,
                              steps_per_epoch=5,
                              callbacks=[checkpointer])
Exemplo n.º 15
0
 def test_multiple_learning_rates(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     logger = self.CSVLogger(self.csv_filename)
     lrs = [BaseCSVLoggerTest.lr, BaseCSVLoggerTest.lr / 2]
     optimizer = torch.optim.SGD(
         [dict(params=[self.pytorch_network.weight], lr=lrs[0]), dict(params=[self.pytorch_network.bias], lr=lrs[1])]
     )
     model = Model(self.pytorch_network, optimizer, self.loss_function)
     history = model.fit_generator(
         train_gen, valid_gen, epochs=self.num_epochs, steps_per_epoch=5, callbacks=[logger]
     )
     self._test_logging(history, lrs=lrs)
Exemplo n.º 16
0
 def test_non_atomic_write(self):
     checkpoint_filename = os.path.join(self.temp_dir_obj.name,
                                        'my_checkpoint.ckpt')
     train_gen = some_data_generator(ModelCheckpointTest.batch_size)
     valid_gen = some_data_generator(ModelCheckpointTest.batch_size)
     checkpointer = ModelCheckpoint(checkpoint_filename,
                                    monitor='val_loss',
                                    verbose=True,
                                    period=1,
                                    atomic_write=False)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=10,
                              steps_per_epoch=5,
                              callbacks=[checkpointer])
     self.assertTrue(os.path.isfile(checkpoint_filename))
Exemplo n.º 17
0
    def _test_checkpointer_with_val_losses(self, checkpointer, val_losses,
                                           has_checkpoints):
        generator = some_data_generator(ModelCheckpointTest.batch_size)

        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, (val_loss, has_checkpoint) in enumerate(
                zip(val_losses, has_checkpoints), 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(1, {
                'batch': 1,
                'size': ModelCheckpointTest.batch_size,
                'loss': loss
            })
            checkpointer.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': val_loss
            })
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.assertEqual(has_checkpoint, os.path.isfile(filename))
        checkpointer.on_train_end({})
    def _test_checkpointer(self, checkpointer, lr_scheduler):
        scheduler_states = {}
        generator = some_data_generator(OptimizerCheckpointTest.batch_size)

        checkpointer.set_params({
            'epochs': OptimizerCheckpointTest.epochs,
            'steps': 1
        })
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch in range(1, OptimizerCheckpointTest.epochs + 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(
                1, {
                    'batch': 1,
                    'size': OptimizerCheckpointTest.batch_size,
                    'loss': loss
                })
            checkpointer.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': 1
            })
            filename = self.checkpoint_filename.format(epoch=epoch)
            self.assertTrue(os.path.isfile(filename))
            scheduler_states[epoch] = torch_to_numpy(
                lr_scheduler.scheduler.state_dict(), copy=True)
        checkpointer.on_train_end({})

        self._test_checkpoint(scheduler_states, lr_scheduler)
Exemplo n.º 19
0
    def _test_early_stopping(self, earlystopper, val_losses, early_stop_epoch):
        generator = some_data_generator(EarlyStoppingTest.batch_size)

        self.model.stop_training = False

        earlystopper.set_params({'epochs': len(val_losses), 'steps': 1})
        earlystopper.set_model(self.model)
        earlystopper.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            earlystopper.on_epoch_begin(epoch, {})
            earlystopper.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            earlystopper.on_train_batch_end(1, {
                'batch': 1,
                'size': EarlyStoppingTest.batch_size,
                'loss': loss
            })
            earlystopper.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': val_loss
            })
            self.assertEqual(self.model.stop_training,
                             epoch == early_stop_epoch)
            if epoch == early_stop_epoch:
                break

        earlystopper.on_train_end({})
Exemplo n.º 20
0
    def _test_batch_delay(self, epoch_delay, batch_in_epoch_delay):
        batch_delay = epoch_delay * DelayCallbackTest.steps_per_epoch + batch_in_epoch_delay
        delay_callback = DelayCallback(self.mock_callback,
                                       batch_delay=batch_delay)
        train_generator = some_data_generator(DelayCallbackTest.batch_size)
        valid_generator = some_data_generator(DelayCallbackTest.batch_size)
        self.model.fit_generator(
            train_generator,
            valid_generator,
            epochs=DelayCallbackTest.epochs,
            steps_per_epoch=DelayCallbackTest.steps_per_epoch,
            validation_steps=DelayCallbackTest.steps_per_epoch,
            callbacks=[delay_callback])
        params = {
            'epochs': DelayCallbackTest.epochs,
            'steps': DelayCallbackTest.steps_per_epoch,
            'valid_steps': DelayCallbackTest.steps_per_epoch
        }

        call_list = []
        call_list.append(call.on_train_begin({}))
        for epoch in range(epoch_delay + 1, DelayCallbackTest.epochs + 1):
            call_list.append(call.on_epoch_begin(epoch, {}))
            start_step = batch_in_epoch_delay + 1 if epoch == epoch_delay + 1 else 1
            for step in range(start_step, params['steps'] + 1):
                call_list.append(call.on_train_batch_begin(step, {}))
                call_list.append(call.on_backward_end(step))
                call_list.append(
                    call.on_train_batch_end(
                        step, {
                            'batch': step,
                            'size': DelayCallbackTest.batch_size,
                            **self.train_dict
                        }))
            call_list.append(
                call.on_epoch_end(epoch, {
                    'epoch': epoch,
                    **self.log_dict
                }))
        call_list.append(call.on_train_end({}))

        method_calls = self.mock_callback.method_calls
        self.assertIn(call.set_model(self.model), method_calls[:2])
        self.assertIn(call.set_params(params), method_calls[:2])

        self.assertEqual(len(method_calls), len(call_list) + 2)
        self.assertEqual(method_calls[2:], call_list)
Exemplo n.º 21
0
 def test_temporary_filename_arg(self):
     tmp_filename = os.path.join(self.temp_dir_obj.name,
                                 'my_checkpoint.tmp.ckpt')
     save_filename = os.path.join(self.temp_dir_obj.name,
                                  'my_checkpoint.ckpt')
     train_gen = some_data_generator(PeriodicSaveTest.batch_size)
     valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
     saver = PeriodicEpochSave(save_filename,
                               monitor='val_loss',
                               verbose=True,
                               period=1,
                               temporary_filename=tmp_filename)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=10,
                              steps_per_epoch=5,
                              callbacks=[saver])
     self.assertFalse(os.path.isfile(tmp_filename))
     self.assertTrue(os.path.isfile(save_filename))
Exemplo n.º 22
0
 def test_logging_overwrite(self):
     train_gen = some_data_generator(self.batch_size)
     valid_gen = some_data_generator(self.batch_size)
     logger = self.CSVGradientLogger(self.csv_filename)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=self.num_epochs,
                              steps_per_epoch=5,
                              callbacks=[logger])
     memgrad = MemoryGradientLogger()
     logger = self.CSVGradientLogger(self.csv_filename, append=False)
     self.model.fit_generator(
         train_gen,
         valid_gen,
         epochs=20,
         steps_per_epoch=5,
         initial_epoch=self.num_epochs,
         callbacks=[memgrad, logger],
     )
     self._test_logging(memgrad.history)
Exemplo n.º 23
0
 def test_temporary_filename_arg_with_differing_save_filename(self):
     epochs = 10
     tmp_filename = os.path.join(self.temp_dir_obj.name,
                                 'my_checkpoint.tmp.ckpt')
     save_filename = os.path.join(self.temp_dir_obj.name,
                                  'my_checkpoint_{epoch}.ckpt')
     train_gen = some_data_generator(PeriodicSaveTest.batch_size)
     valid_gen = some_data_generator(PeriodicSaveTest.batch_size)
     saver = PeriodicEpochSave(save_filename,
                               monitor='val_loss',
                               verbose=True,
                               period=1,
                               temporary_filename=tmp_filename)
     self.model.fit_generator(train_gen,
                              valid_gen,
                              epochs=epochs,
                              steps_per_epoch=5,
                              callbacks=[saver])
     self.assertFalse(os.path.isfile(tmp_filename))
     for i in range(1, epochs + 1):
         self.assertTrue(os.path.isfile(save_filename.format(epoch=i)))
Exemplo n.º 24
0
    def _test_saver_with_val_losses(self,
                                    saver,
                                    val_losses,
                                    has_checkpoints,
                                    keep_only_last_best=False):
        generator = some_data_generator(PeriodicSaveTest.batch_size)

        best_checkpoint_filenames = []
        saver.set_params({'epochs': len(val_losses), 'steps': 1})
        saver.set_model(self.model)
        saver.on_train_begin({})
        for epoch, (val_loss, has_checkpoint) in enumerate(
                zip(val_losses, has_checkpoints), 1):
            saver.on_epoch_begin(epoch, {})
            saver.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            saver.on_train_batch_end(1, {
                'batch': 1,
                'size': PeriodicSaveTest.batch_size,
                'loss': loss
            })
            saver.on_epoch_end(epoch, {
                'epoch': epoch,
                'loss': loss,
                'val_loss': val_loss
            })

            filename = self.save_filename.format(epoch=epoch)
            self.assertEqual(has_checkpoint, os.path.isfile(filename))
            if has_checkpoint:
                self.assertEqual(f'{epoch}\n',
                                 open(filename, 'r', encoding='utf-8').read())
                best_checkpoint_filenames.append(os.path.realpath(filename))

        files = [
            os.path.realpath(os.path.join(self.temp_dir_obj.name, f))
            for f in os.listdir(self.temp_dir_obj.name)
        ]
        if keep_only_last_best:
            self.assertEqual(1, len(files))
            self.assertEqual(files[0], best_checkpoint_filenames[-1])
        else:
            best_checkpoint_filenames = set(best_checkpoint_filenames)
            self.assertEqual(len(best_checkpoint_filenames), len(files))
            self.assertEqual(best_checkpoint_filenames, set(files))

        saver.on_train_end({})
    def _test_restore_with_val_losses(self, checkpointer, val_losses, best_epoch):
        generator = some_data_generator(BestModelRestoreTest.batch_size)

        best_epoch_weights = None
        checkpointer.set_params({'epochs': len(val_losses), 'steps': 1})
        checkpointer.set_model(self.model)
        checkpointer.on_train_begin({})
        for epoch, val_loss in enumerate(val_losses, 1):
            checkpointer.on_epoch_begin(epoch, {})
            checkpointer.on_train_batch_begin(1, {})
            loss = self._update_model(generator)
            checkpointer.on_train_batch_end(1, {'batch': 1, 'size': BestModelRestoreTest.batch_size, 'loss': loss})
            checkpointer.on_epoch_end(epoch, {'epoch': epoch, 'loss': loss, 'val_loss': val_loss})
            if epoch == best_epoch:
                best_epoch_weights = torch_to_numpy(self.model.get_weight_copies())
        checkpointer.on_train_end({})

        final_weights = torch_to_numpy(self.model.get_weight_copies())
        self.assertEqual(best_epoch_weights, final_weights)
 def test_integration(self):
     train_gen = some_data_generator(20)
     valid_gen = some_data_generator(20)
     model_restore = BestModelRestore(monitor='val_loss', verbose=True)
     self.model.fit_generator(train_gen, valid_gen, epochs=10, steps_per_epoch=5, callbacks=[model_restore])