def run_model_test_without_loggers(trainer_options, model, min_acc: float = 0.50): reset_seed() # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' pretrained_model = load_model_from_checkpoint( trainer.logger, trainer.checkpoint_callback.best_model_path, ) # test new model accuracy test_loaders = model.test_dataloader() if not isinstance(test_loaders, list): test_loaders = [test_loaders] for dataloader in test_loaders: run_prediction(dataloader, pretrained_model, min_acc=min_acc) if trainer.use_ddp: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
def test_running_test_pretrained_model_cpu(tmpdir): """Verify test() on pretrained model.""" model = EvalModelTemplate() # logger file to get meta logger = tutils.get_default_logger(tmpdir) # logger file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( progress_bar_refresh_rate=0, max_epochs=3, limit_train_batches=0.4, limit_val_batches=0.2, checkpoint_callback=checkpoint, logger=logger, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model_from_checkpoint( logger, trainer.checkpoint_callback.dirpath, module_class=EvalModelTemplate) new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer)
def test_running_test_pretrained_model_distrib(tmpdir, backend): """Verify `test()` on pretrained model.""" tutils.set_random_master_port() model = EvalModelTemplate() # exp file to get meta logger = tutils.get_default_logger(tmpdir) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( progress_bar_refresh_rate=0, max_epochs=2, limit_train_batches=0.4, limit_val_batches=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend=backend, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model_from_checkpoint( logger, trainer.checkpoint_callback.dirpath, module_class=EvalModelTemplate) # run test set new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer) dataloaders = model.test_dataloader() if not isinstance(dataloaders, list): dataloaders = [dataloaders] for dataloader in dataloaders: tpipes.run_prediction(dataloader, pretrained_model)
def run_model_test(trainer_options, model, on_gpu: bool = True, version=None, with_hpc: bool = True): reset_seed() save_dir = trainer_options['default_root_dir'] # logger file to get meta logger = get_default_logger(save_dir, version=version) trainer_options.update(logger=logger) if 'checkpoint_callback' not in trainer_options: trainer_options.update(checkpoint_callback=True) trainer = Trainer(**trainer_options) initial_values = torch.tensor( [torch.sum(torch.abs(x)) for x in model.parameters()]) result = trainer.fit(model) post_train_values = torch.tensor( [torch.sum(torch.abs(x)) for x in model.parameters()]) assert result == 1, 'trainer failed' # Check that the model is actually changed post-training assert torch.norm(initial_values - post_train_values) > 0.1 # test model loading pretrained_model = load_model_from_checkpoint( logger, trainer.checkpoint_callback.best_model_path) # test new model accuracy test_loaders = model.test_dataloader() if not isinstance(test_loaders, list): test_loaders = [test_loaders] for dataloader in test_loaders: run_prediction(dataloader, pretrained_model) if with_hpc: if trainer.use_ddp or trainer.use_ddp2: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \ trainer.init_optimizers(pretrained_model) # test HPC loading / saving trainer.checkpoint_connector.hpc_save(save_dir, logger) trainer.checkpoint_connector.hpc_load(save_dir, on_gpu=on_gpu)
def run_model_test(trainer_options, model, on_gpu: bool = True, version=None, with_hpc: bool = True): reset_seed() save_dir = trainer_options['default_root_dir'] # logger file to get meta logger = get_default_logger(save_dir, version=version) trainer_options.update(logger=logger) if 'checkpoint_callback' not in trainer_options: # logger file to get weights checkpoint = init_checkpoint_callback(logger) trainer_options.update(checkpoint_callback=checkpoint) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test model loading pretrained_model = load_model_from_checkpoint( logger, trainer.checkpoint_callback.dirpath) # test new model accuracy test_loaders = model.test_dataloader() if not isinstance(test_loaders, list): test_loaders = [test_loaders] [ run_prediction(dataloader, pretrained_model) for dataloader in test_loaders ] if with_hpc: if trainer.use_ddp or trainer.use_ddp2: # on hpc this would work fine... but need to hack it for the purpose of the test trainer.model = pretrained_model trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \ trainer.init_optimizers(pretrained_model) # test HPC loading / saving trainer.hpc_save(save_dir, logger) trainer.hpc_load(save_dir, on_gpu=on_gpu)