def test_sync_reduce_ddp(): """Make sure sync-reduce works with DDP""" tutils.reset_seed() tutils.set_random_master_port() worldsize = 2 mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)
def test_amp_gpu_ddp_slurm_managed(tmpdir): """Make sure DDP + AMP work.""" # simulate setting slurm flags tutils.set_random_master_port() os.environ['SLURM_LOCALID'] = str(0) model = EvalModelTemplate(tutils.get_default_hparams()) # exp file to get meta logger = tutils.get_default_logger(tmpdir) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # fit model trainer = Trainer( max_epochs=1, gpus=[0], distributed_backend='ddp', precision=16, checkpoint_callback=checkpoint, logger=logger, ) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address( 'abc[23-24, 45-40, 40]') == 'abc23'
def run_test_from_config(trainer_options): """Trains the default model with the given config.""" tutils.reset_seed() tutils.set_random_master_port() ckpt_path = trainer_options['default_root_dir'] trainer_options['checkpoint_callback'] = ModelCheckpoint(ckpt_path) model, hparams = tutils.get_default_model() tutils.run_model_test(trainer_options, model, version=0, with_hpc=False) # Horovod should be initialized following training. If not, this will raise an exception. assert hvd.size() == 2
def test_running_test_pretrained_model_distrib(tmpdir, backend): """Verify `test()` on pretrained model.""" tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) # exp file to get meta logger = tutils.get_default_logger(tmpdir) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) trainer_options = dict( progress_bar_refresh_rate=0, max_epochs=2, train_percent_check=0.4, val_percent_check=0.2, checkpoint_callback=checkpoint, logger=logger, gpus=[0, 1], distributed_backend=backend, ) # fit model trainer = Trainer(**trainer_options) result = trainer.fit(model) log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir))) # correct result and ok accuracy assert result == 1, 'training failed to complete' pretrained_model = tutils.load_model(logger, trainer.checkpoint_callback.dirpath, module_class=LightningTestModel) # run test set new_trainer = Trainer(**trainer_options) new_trainer.test(pretrained_model) # test we have good test accuracy tutils.assert_ok_model_acc(new_trainer) dataloaders = model.test_dataloader() if not isinstance(dataloaders, list): dataloaders = [dataloaders] for dataloader in dataloaders: tutils.run_prediction(dataloader, pretrained_model)
def test_multi_cpu_model_ddp(tmpdir): """Make sure DDP works.""" tutils.set_random_master_port() trainer_options = dict(default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=None, num_processes=2, distributed_backend='ddp_cpu') model = EvalModelTemplate() tutils.run_model_test(trainer_options, model, on_gpu=False)
def test_amp_gpu_ddp(tmpdir): """Make sure DDP + AMP work.""" tutils.reset_seed() tutils.set_random_master_port() hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) trainer_options = dict(default_save_path=tmpdir, max_epochs=1, gpus=2, distributed_backend='ddp', precision=16) tutils.run_model_test(trainer_options, model)
def test_multi_gpu_model_ddp2(tmpdir): """Make sure DDP2 works.""" tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_default_model() trainer_options = dict(default_save_path=tmpdir, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=2, weights_summary=None, distributed_backend='ddp2') tutils.run_model_test(trainer_options, model)
def test_multi_gpu_model_ddp(tmpdir): """Make sure DDP works.""" tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_default_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') tutils.run_model_test(trainer_options, model)
def run_test_from_config(trainer_options): """Trains the default model with the given config.""" set_random_master_port() ckpt_path = trainer_options['default_root_dir'] trainer_options.update(checkpoint_callback=ModelCheckpoint(ckpt_path)) model = EvalModelTemplate() run_model_test(trainer_options, model, on_gpu=args.on_gpu, version=0, with_hpc=False) # Horovod should be initialized following training. If not, this will raise an exception. assert hvd.size() == 2 if args.on_gpu: trainer = Trainer(gpus=1, distributed_backend='horovod', max_epochs=1) # Test the root_gpu property assert trainer.root_gpu == hvd.local_rank()
def test_amp_multi_gpu(tmpdir, backend): """Make sure DP/DDP + AMP work.""" tutils.set_random_master_port() model = EvalModelTemplate(tutils.get_default_hparams()) trainer_options = dict( default_root_dir=tmpdir, max_epochs=1, # gpus=2, gpus='0, 1', # test init with gpu string distributed_backend=backend, precision=16) # tutils.run_model_test(trainer_options, model) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result
def test_amp_gpu_ddp_slurm_managed(tmpdir): """Make sure DDP + AMP work.""" if not tutils.can_run_gpu_test(): return tutils.reset_seed() # simulate setting slurm flags tutils.set_random_master_port() os.environ['SLURM_LOCALID'] = str(0) hparams = tutils.get_default_hparams() model = LightningTestModel(hparams) trainer_options = dict(show_progress_bar=True, max_epochs=1, gpus=[0], distributed_backend='ddp', precision=16) # exp file to get meta logger = tutils.get_default_testtube_logger(tmpdir, False) # exp file to get weights checkpoint = tutils.init_checkpoint_callback(logger) # add these to the trainer options trainer_options['checkpoint_callback'] = checkpoint trainer_options['logger'] = logger # fit model trainer = Trainer(**trainer_options) trainer.is_slurm_managing_tasks = True result = trainer.fit(model) # correct result and ok accuracy assert result == 1, 'amp + ddp model failed to complete' # test root model address assert trainer.resolve_root_node_address('abc') == 'abc' assert trainer.resolve_root_node_address('abc[23]') == 'abc23' assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23' assert trainer.resolve_root_node_address( 'abc[23-24, 45-40, 40]') == 'abc23'
def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" tutils.set_random_master_port() trainer_options = dict(default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') model = EvalModelTemplate() fit_options = dict(train_dataloader=model.train_dataloader(), val_dataloaders=model.val_dataloader()) trainer = Trainer(**trainer_options) result = trainer.fit(model, **fit_options) assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_default_model() trainer = Trainer(default_root_dir=tmpdir, progress_bar_refresh_rate=0, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') result = trainer.fit(model, train_dataloader=model.train_dataloader(), val_dataloaders=model.val_dataloader()) assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_multi_gpu_model(tmpdir, backend): """Make sure DDP works.""" tutils.set_random_master_port() trainer_options = dict( default_root_dir=tmpdir, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend=backend, ) model = EvalModelTemplate() # tutils.run_model_test(trainer_options, model) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result # test memory helper functions memory.get_memory_profile('min_max')
def test_ddp_all_dataloaders_passed_to_fit(tmpdir): """Make sure DDP works with dataloaders passed to fit()""" tutils.reset_seed() tutils.set_random_master_port() model, hparams = tutils.get_default_model() trainer_options = dict(default_save_path=tmpdir, show_progress_bar=False, max_epochs=1, train_percent_check=0.4, val_percent_check=0.2, gpus=[0, 1], distributed_backend='ddp') fit_options = dict(train_dataloader=model.train_dataloader(), val_dataloaders=model.val_dataloader()) trainer = Trainer(**trainer_options) result = trainer.fit(model, **fit_options) assert result == 1, "DDP doesn't work with dataloaders passed to fit()."
def test_multi_gpu_wandb(tmpdir, backend): """Make sure DP/DDP + AMP work.""" from pytorch_lightning.loggers import WandbLogger tutils.set_random_master_port() model = EvalModelTemplate() logger = WandbLogger(name='utest') trainer_options = dict( default_root_dir=tmpdir, max_epochs=1, gpus=2, distributed_backend=backend, precision=16, logger=logger, ) # tutils.run_model_test(trainer_options, model) trainer = Trainer(**trainer_options) result = trainer.fit(model) assert result trainer.test(model)
def test_numpy_metric_ddp(): tutils.reset_seed() tutils.set_random_master_port() world_size = 2 mp.spawn(_ddp_test_numpy_metric, args=(world_size,), nprocs=world_size)