コード例 #1
0
def test_testtube_pickle():
    """
    Verify that pickling a trainer containing a test tube logger works
    """
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    logger = tutils.get_test_tube_logger(False)
    logger.log_hyperparams(hparams)
    logger.save()

    trainer_options = dict(max_nb_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})

    tutils.clear_save_dir()
コード例 #2
0
def test_mlflow_logger():
    """
    verify that basic functionality of mlflow logger works
    """
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import MLFlowLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    root_dir = os.path.dirname(os.path.realpath(__file__))
    mlflow_dir = os.path.join(root_dir, "mlruns")

    logger = MLFlowLogger("test", f"file://{mlflow_dir}")

    trainer_options = dict(max_nb_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"

    tutils.clear_save_dir()
コード例 #3
0
def test_comet_logger(tmpdir, monkeypatch):
    """Verify that basic functionality of Comet.ml logger works."""

    # prevent comet logger from trying to print at exit, since
    # pytest's stdout/stderr redirection breaks it
    import atexit
    monkeypatch.setattr(atexit, "register", lambda _: None)

    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
コード例 #4
0
def test_wandb_pickle(tmpdir):
    """Verify that pickling trainer with wandb logger works."""
    tutils.reset_seed()

    wandb_dir = str(tmpdir)
    logger = WandbLogger(save_dir=wandb_dir, anonymous=True)
    assert logger is not None
コード例 #5
0
def test_simple_cpu():
    """
    Verify continue training session on CPU
    :return:
    """
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    # logger file to get meta
    trainer_options = dict(
        max_nb_epochs=1,
        val_percent_check=0.1,
        train_percent_check=0.1,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'

    tutils.clear_save_dir()
コード例 #6
0
def test_mlflow_pickle():
    """
    verify that pickling trainer with mlflow logger works
    """
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import MLFlowLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    root_dir = os.path.dirname(os.path.realpath(__file__))
    mlflow_dir = os.path.join(root_dir, "mlruns")

    logger = MLFlowLogger("test", f"file://{mlflow_dir}")

    trainer_options = dict(max_nb_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})

    tutils.clear_save_dir()
コード例 #7
0
def test_no_val_end_module(tmpdir):
    """Tests use case where trainer saves the model, and user loads it from tags independently."""
    tutils.reset_seed()

    class CurrentTestModel(LightningValidationStepMixin,
                           LightningTestModelBase):
        pass

    hparams = tutils.get_hparams()
    model = CurrentTestModel(hparams)

    # logger file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    trainer_options = dict(max_epochs=1,
                           logger=logger,
                           checkpoint_callback=ModelCheckpoint(tmpdir))

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'

    # save model
    new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')
    trainer.save_checkpoint(new_weights_path)

    # load new model
    tags_path = tutils.get_data_path(logger, path_dir=tmpdir)
    tags_path = os.path.join(tags_path, 'meta_tags.csv')
    model_2 = LightningTestModel.load_from_metrics(
        weights_path=new_weights_path, tags_csv=tags_path)
    model_2.eval()
コード例 #8
0
def test_multiple_test_dataloader(tmpdir):
    """Verify multiple test_dataloader."""
    tutils.reset_seed()

    class CurrentTestModel(LightningTestMultipleDataloadersMixin,
                           LightningTestModelBase):
        pass

    hparams = tutils.get_hparams()
    model = CurrentTestModel(hparams)

    # logger file to get meta
    trainer_options = dict(
        default_save_path=tmpdir,
        max_epochs=1,
        val_percent_check=0.1,
        train_percent_check=0.2,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # verify there are 2 val loaders
    assert len(trainer.get_test_dataloaders()) == 2, \
        'Multiple test_dataloaders not initiated properly'

    # make sure predictions are good for each test set
    for dataloader in trainer.get_test_dataloaders():
        tutils.run_prediction(dataloader, trainer.model)

    # run the test method
    trainer.test()
コード例 #9
0
def test_running_test_pretrained_model(tmpdir):
    tutils.reset_seed()
    """Verify test() on pretrained model"""
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    # logger file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    # logger file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(show_progress_bar=False,
                           max_num_epochs=4,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           logger=logger)

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = tutils.load_model(logger.experiment,
                                         trainer.checkpoint_callback.filepath,
                                         module_class=LightningTestModel)

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_test_acc(new_trainer)
コード例 #10
0
def test_load_model_from_checkpoint(tmpdir):
    tutils.reset_seed()
    """Verify test() on pretrained model"""
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(
        show_progress_bar=False,
        max_num_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=True,
        logger=False,
        default_save_path=tmpdir,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = LightningTestModel.load_from_checkpoint(
        os.path.join(trainer.checkpoint_callback.filepath,
                     "_ckpt_epoch_0.ckpt"))

    # test that hparams loaded correctly
    for k, v in vars(hparams).items():
        assert getattr(pretrained_model.hparams, k) == v

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_test_acc(new_trainer)
コード例 #11
0
def test_optimizer_return_options():
    tutils.reset_seed()

    trainer = Trainer()
    model, hparams = tutils.get_model()

    # single optimizer
    opt_a = torch.optim.Adam(model.parameters(), lr=0.002)
    opt_b = torch.optim.SGD(model.parameters(), lr=0.002)
    optim, lr_sched = trainer.init_optimizers(opt_a)
    assert len(optim) == 1 and len(lr_sched) == 0

    # opt tuple
    opts = (opt_a, opt_b)
    optim, lr_sched = trainer.init_optimizers(opts)
    assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]
    assert len(lr_sched) == 0

    # opt list
    opts = [opt_a, opt_b]
    optim, lr_sched = trainer.init_optimizers(opts)
    assert len(optim) == 2 and optim[0] == opts[0] and optim[1] == opts[1]
    assert len(lr_sched) == 0

    # opt tuple of lists
    opts = ([opt_a], ['lr_scheduler'])
    optim, lr_sched = trainer.init_optimizers(opts)
    assert len(optim) == 1 and len(lr_sched) == 1
    assert optim[0] == opts[0][0] and lr_sched[0] == 'lr_scheduler'
コード例 #12
0
def test_ddp_sampler_error(tmpdir):
    """
    Make sure DDP + AMP work
    :return:
    """
    if not tutils.can_run_gpu_test():
        return

    tutils.reset_seed()
    tutils.set_random_master_port()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams, force_remove_distributed_sampler=True)

    logger = tutils.get_test_tube_logger(tmpdir, True)

    trainer = Trainer(
        logger=logger,
        show_progress_bar=False,
        max_nb_epochs=1,
        gpus=[0, 1],
        distributed_backend='ddp',
        use_amp=True
    )

    with pytest.warns(UserWarning):
        trainer.get_dataloaders(model)
コード例 #13
0
def test_multi_gpu_model_dp(tmpdir):
    """
    Make sure DP works
    :return:
    """
    tutils.reset_seed()

    if not tutils.can_run_gpu_test():
        return

    model, hparams = tutils.get_model()
    trainer_options = dict(
        default_save_path=tmpdir,
        show_progress_bar=False,
        distributed_backend='dp',
        max_nb_epochs=1,
        train_percent_check=0.1,
        val_percent_check=0.1,
        gpus='-1'
    )

    tutils.run_model_test(trainer_options, model, hparams)

    # test memory helper functions
    memory.get_memory_profile('min_max')
コード例 #14
0
def test_multiple_val_dataloader():
    """
    Verify multiple val_dataloader
    :return:
    """
    tutils.reset_seed()

    class CurrentTestModel(LightningValidationMultipleDataloadersMixin,
                           LightningTestModelBase):
        pass

    hparams = tutils.get_hparams()
    model = CurrentTestModel(hparams)

    # logger file to get meta
    trainer_options = dict(
        max_nb_epochs=1,
        val_percent_check=0.1,
        train_percent_check=1.0,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # verify training completed
    assert result == 1

    # verify there are 2 val loaders
    assert len(trainer.get_val_dataloaders()) == 2, \
        'Multiple val_dataloaders not initiated properly'

    # make sure predictions are good for each val set
    for dataloader in trainer.get_val_dataloaders():
        tutils.run_prediction(dataloader, trainer.model)
コード例 #15
0
def test_comet_logger(tmpdir):
    """Verify that basic functionality of Comet.ml logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(max_num_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
コード例 #16
0
def test_wandb_pickle(tmpdir):
    """Verify that pickling trainer with wandb logger works."""
    tutils.reset_seed()

    from pytorch_lightning.logging import WandbLogger
    wandb_dir = str(tmpdir)
    logger = WandbLogger(save_dir=wandb_dir, anonymous=True)
コード例 #17
0
def test_running_test_after_fitting(tmpdir):
    """Verify test() on fitted model."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    # logger file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    # logger file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(default_save_path=tmpdir,
                           show_progress_bar=False,
                           max_epochs=4,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           test_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           logger=logger)

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    assert result == 1, 'training failed to complete'

    trainer.test()

    # test we have good test accuracy
    tutils.assert_ok_model_acc(trainer)
コード例 #18
0
def test_running_test_without_val(tmpdir):
    """Verify `test()` works on a model with no `val_loader`."""
    tutils.reset_seed()

    class CurrentTestModel(LightningTestMixin, LightningTestModelBase):
        pass

    hparams = tutils.get_hparams()
    model = CurrentTestModel(hparams)

    # logger file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    # logger file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(show_progress_bar=False,
                           max_epochs=1,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           test_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           logger=logger)

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    assert result == 1, 'training failed to complete'

    trainer.test()

    # test we have good test accuracy
    tutils.assert_ok_model_acc(trainer)
コード例 #19
0
def test_early_stopping_cpu_model(tmpdir):
    """Test each of the trainer options."""
    tutils.reset_seed()

    stopping = EarlyStopping(monitor='val_loss', min_delta=0.1)
    trainer_options = dict(
        default_save_path=tmpdir,
        min_epochs=2,
        early_stop_callback=stopping,
        gradient_clip_val=1.0,
        overfit_pct=0.20,
        track_grad_norm=2,
        print_nan_grads=True,
        show_progress_bar=True,
        logger=tutils.get_test_tube_logger(tmpdir),
        train_percent_check=0.1,
        val_percent_check=0.1,
    )

    model, hparams = tutils.get_model()
    tutils.run_model_test(trainer_options,
                          model,
                          on_gpu=False,
                          early_stop=True)

    # test freeze on cpu
    model.freeze()
    model.unfreeze()
コード例 #20
0
def test_comet_pickle(tmpdir, monkeypatch):
    """Verify that pickling trainer with comet logger works."""

    # prevent comet logger from trying to print at exit, since
    # pytest's stdout/stderr redirection breaks it
    import atexit
    monkeypatch.setattr(atexit, "register", lambda _: None)

    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    # hparams = tutils.get_hparams()
    # model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
コード例 #21
0
def test_mlflow_logger(tmpdir):
    """Verify that basic functionality of mlflow logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import MLFlowLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    mlflow_dir = os.path.join(tmpdir, "mlruns")
    logger = MLFlowLogger("test",
                          tracking_uri=f"file:{os.sep * 2}{mlflow_dir}")

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
コード例 #22
0
def test_default_logger_callbacks_cpu_model(tmpdir):
    """
    Test each of the trainer options
    :return:
    """
    tutils.reset_seed()

    trainer_options = dict(default_save_path=tmpdir,
                           max_nb_epochs=1,
                           gradient_clip_val=1.0,
                           overfit_pct=0.20,
                           print_nan_grads=True,
                           show_progress_bar=False,
                           train_percent_check=0.01,
                           val_percent_check=0.01)

    model, hparams = tutils.get_model()
    tutils.run_model_test_no_loggers(trainer_options,
                                     model,
                                     hparams,
                                     on_gpu=False)

    # test freeze on cpu
    model.freeze()
    model.unfreeze()
コード例 #23
0
def test_comet_pickle(tmpdir):
    """Verify that pickling trainer with comet logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import CometLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    comet_dir = os.path.join(tmpdir, "cometruns")

    # We test CometLogger in offline mode with local saves
    logger = CometLogger(
        save_dir=comet_dir,
        project_name="general",
        workspace="dummy-test",
    )

    trainer_options = dict(max_num_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
コード例 #24
0
def test_cpu_restore_training(tmpdir):
    """Verify continue training session on CPU."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    # logger file to get meta
    test_logger_version = 10
    logger = tutils.get_test_tube_logger(tmpdir, False, version=test_logger_version)

    trainer_options = dict(
        max_epochs=8,
        val_check_interval=0.50,
        val_percent_check=0.2,
        train_percent_check=0.2,
        logger=logger,
        checkpoint_callback=ModelCheckpoint(tmpdir, save_top_k=-1)
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)
    real_global_epoch = trainer.current_epoch

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'

    # wipe-out trainer and model
    # retrain with not much data... this simulates picking training back up after slurm
    # we want to see if the weights come back correctly
    new_logger = tutils.get_test_tube_logger(tmpdir, False, version=test_logger_version)
    trainer_options = dict(
        max_epochs=2,
        val_check_interval=0.50,
        val_percent_check=0.2,
        train_percent_check=0.2,
        logger=new_logger,
        checkpoint_callback=ModelCheckpoint(tmpdir),
    )
    trainer = Trainer(**trainer_options)
    model = LightningTestModel(hparams)

    # set the epoch start hook so we can predict before the model does the full training
    def assert_good_acc():
        assert trainer.current_epoch == real_global_epoch
        assert trainer.current_epoch >= 0

        # if model and state loaded correctly, predictions will be good even though we
        # haven't trained with the new loaded model
        trainer.model.eval()
        for dataloader in trainer.get_val_dataloaders():
            tutils.run_prediction(dataloader, trainer.model)

    model.on_train_start = assert_good_acc

    # by calling fit again, we trigger training, loading weights from the cluster
    # and our hook to predict using current model before any more weight updates
    trainer.fit(model)
コード例 #25
0
def test_model_freeze_unfreeze():
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    model.freeze()
    model.unfreeze()
コード例 #26
0
def test_wandb_logger(tmpdir):
    """Verify that basic functionality of wandb logger works."""
    tutils.reset_seed()

    from pytorch_lightning.logging import WandbLogger

    wandb_dir = os.path.join(tmpdir, "wandb")
    logger = WandbLogger(save_dir=wandb_dir, anonymous=True)
コード例 #27
0
def test_model_saving_loading():
    """
    Tests use case where trainer saves the model, and user loads it from tags independently
    :return:
    """
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    # logger file to get meta
    logger = tutils.get_test_tube_logger(False)

    trainer_options = dict(
        max_nb_epochs=1,
        logger=logger,
        checkpoint_callback=ModelCheckpoint(save_dir)
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'

    # make a prediction
    for dataloader in model.test_dataloader():
        for batch in dataloader:
            break

    x, y = batch
    x = x.view(x.size(0), -1)

    # generate preds before saving model
    model.eval()
    pred_before_saving = model(x)

    # save model
    new_weights_path = os.path.join(save_dir, 'save_test.ckpt')
    trainer.save_checkpoint(new_weights_path)

    # load new model
    tags_path = logger.experiment.get_data_path(logger.experiment.name, logger.experiment.version)
    tags_path = os.path.join(tags_path, 'meta_tags.csv')
    model_2 = LightningTestModel.load_from_metrics(weights_path=new_weights_path,
                                                   tags_csv=tags_path)
    model_2.eval()

    # make prediction
    # assert that both predictions are the same
    new_pred = model_2(x)
    assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1

    tutils.clear_save_dir()
コード例 #28
0
def test_single_gpu_batch_parse():
    tutils.reset_seed()

    if not tutils.can_run_gpu_test():
        return

    trainer = Trainer()

    # batch is just a tensor
    batch = torch.rand(2, 3)
    batch = trainer.transfer_batch_to_gpu(batch, 0)
    assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'

    # tensor list
    batch = [torch.rand(2, 3), torch.rand(2, 3)]
    batch = trainer.transfer_batch_to_gpu(batch, 0)
    assert batch[0].device.index == 0 and batch[0].type(
    ) == 'torch.cuda.FloatTensor'
    assert batch[1].device.index == 0 and batch[1].type(
    ) == 'torch.cuda.FloatTensor'

    # tensor list of lists
    batch = [[torch.rand(2, 3), torch.rand(2, 3)]]
    batch = trainer.transfer_batch_to_gpu(batch, 0)
    assert batch[0][0].device.index == 0 and batch[0][0].type(
    ) == 'torch.cuda.FloatTensor'
    assert batch[0][1].device.index == 0 and batch[0][1].type(
    ) == 'torch.cuda.FloatTensor'

    # tensor dict
    batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
    batch = trainer.transfer_batch_to_gpu(batch, 0)
    assert batch[0]['a'].device.index == 0 and batch[0]['a'].type(
    ) == 'torch.cuda.FloatTensor'
    assert batch[0]['b'].device.index == 0 and batch[0]['b'].type(
    ) == 'torch.cuda.FloatTensor'

    # tuple of tensor list and list of tensor dict
    batch = ([torch.rand(2, 3) for _ in range(2)], [{
        'a': torch.rand(2, 3),
        'b': torch.rand(2, 3)
    } for _ in range(2)])
    batch = trainer.transfer_batch_to_gpu(batch, 0)
    assert batch[0][0].device.index == 0 and batch[0][0].type(
    ) == 'torch.cuda.FloatTensor'

    assert batch[1][0]['a'].device.index == 0
    assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'

    assert batch[1][0]['b'].device.index == 0
    assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
コード例 #29
0
def test_running_test_pretrained_model_ddp():
    """Verify test() on pretrained model"""
    if not tutils.can_run_gpu_test():
        return

    tutils.reset_seed()
    tutils.set_random_master_port()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    # exp file to get meta
    logger = tutils.get_test_tube_logger(False)

    # exp file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(
        show_progress_bar=False,
        max_nb_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=checkpoint,
        logger=logger,
        gpus=[0, 1],
        distributed_backend='ddp'
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    exp = logger.experiment
    logging.info(os.listdir(exp.get_data_path(exp.name, exp.version)))

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = tutils.load_model(logger.experiment,
                                         trainer.checkpoint_callback.filepath,
                                         module_class=LightningTestModel)

    # run test set
    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    for dataloader in model.test_dataloader():
        tutils.run_prediction(dataloader, pretrained_model)

    tutils.clear_save_dir()
コード例 #30
0
def test_cpu_model(tmpdir):
    """Make sure model trains on CPU."""
    tutils.reset_seed()

    trainer_options = dict(default_save_path=tmpdir,
                           show_progress_bar=False,
                           logger=tutils.get_test_tube_logger(tmpdir),
                           max_epochs=1,
                           train_percent_check=0.4,
                           val_percent_check=0.4)

    model, hparams = tutils.get_model()

    tutils.run_model_test(trainer_options, model, on_gpu=False)