示例#1
0
def test_model_freeze_unfreeze():
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    model.freeze()
    model.unfreeze()
def test_model_saving_loading():
    """
    Tests use case where trainer saves the model, and user loads it from tags independently
    :return:
    """
    reset_seed()

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    # logger file to get meta
    logger = get_test_tube_logger(False)
    logger.log_hyperparams(hparams)
    logger.save()

    trainer_options = dict(max_nb_epochs=1,
                           logger=logger,
                           checkpoint_callback=ModelCheckpoint(save_dir))

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'

    # make a prediction
    for dataloader in model.test_dataloader():
        for batch in dataloader:
            break

    x, y = batch
    x = x.view(x.size(0), -1)

    # generate preds before saving model
    model.eval()
    pred_before_saving = model(x)

    # save model
    new_weights_path = os.path.join(save_dir, 'save_test.ckpt')
    trainer.save_checkpoint(new_weights_path)

    # load new model
    tags_path = logger.experiment.get_data_path(logger.experiment.name,
                                                logger.experiment.version)
    tags_path = os.path.join(tags_path, 'meta_tags.csv')
    model_2 = LightningTestModel.load_from_metrics(
        weights_path=new_weights_path, tags_csv=tags_path)
    model_2.eval()

    # make prediction
    # assert that both predictions are the same
    new_pred = model_2(x)
    assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1

    clear_save_dir()
示例#3
0
def get_model(use_test_model=False):
    # set up model with these hyperparams
    hparams = get_hparams()

    if use_test_model:
        model = LightningTestModel(hparams)
    else:
        model = LightningTemplateModel(hparams)

    return model, hparams
示例#4
0
def get_model(use_test_model=False, lbfgs=False):
    # set up model with these hyperparams
    hparams = get_hparams()
    if lbfgs:
        setattr(hparams, 'optimizer_name', 'lbfgs')

    if use_test_model:
        model = LightningTestModel(hparams)
    else:
        model = LightningTemplateModel(hparams)

    return model, hparams
示例#5
0
def test_running_test_pretrained_model_ddp():
    """Verify test() on pretrained model"""
    if not can_run_gpu_test():
        return

    reset_seed()
    set_random_master_port()

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    # exp file to get meta
    logger = get_test_tube_logger(False)

    # exp file to get weights
    checkpoint = init_checkpoint_callback(logger)

    trainer_options = dict(show_progress_bar=False,
                           max_nb_epochs=1,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           logger=logger,
                           gpus=[0, 1],
                           distributed_backend='ddp')

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    exp = logger.experiment
    print(os.listdir(exp.get_data_path(exp.name, exp.version)))

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = load_model(logger.experiment,
                                  save_dir,
                                  module_class=LightningTestModel)

    # run test set
    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    [
        run_prediction(dataloader, pretrained_model)
        for dataloader in model.test_dataloader()
    ]

    # test we have good test accuracy
    clear_save_dir()
示例#6
0
def test_testtube_logger(tmpdir):
    """Verify that basic functionality of test tube logger works."""
    tutils.reset_seed()
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    logger = tutils.get_test_tube_logger(tmpdir, False)

    trainer_options = dict(max_num_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    assert result == 1, "Training failed"
示例#7
0
def test_running_test_pretrained_model_ddp(tmpdir):
    """Verify `test()` on pretrained model."""
    if not tutils.can_run_gpu_test():
        return

    tutils.reset_seed()
    tutils.set_random_master_port()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    # exp file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    # exp file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(
        show_progress_bar=False,
        max_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=checkpoint,
        logger=logger,
        gpus=[0, 1],
        distributed_backend='ddp'
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    exp = logger.experiment
    logging.info(os.listdir(exp.get_data_path(exp.name, exp.version)))

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = tutils.load_model(logger.experiment,
                                         trainer.checkpoint_callback.filepath,
                                         module_class=LightningTestModel)

    # run test set
    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    for dataloader in model.test_dataloader():
        tutils.run_prediction(dataloader, pretrained_model)
示例#8
0
def test_neptune_logger(tmpdir):
    """Verify that basic functionality of neptune logger works."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)
    logger = NeptuneLogger(offline_mode=True)

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
示例#9
0
def test_tensorboard_logger(tmpdir):
    """Verify that basic functionality of Tensorboard logger works."""

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    logger = TensorBoardLogger(save_dir=tmpdir, name="tensorboard_logger_test")

    trainer_options = dict(max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print("result finished")
    assert result == 1, "Training failed"
示例#10
0
def test_running_test_pretrained_model_ddp():
    """Verify test() on pretrained model"""
    if not can_run_gpu_test():
        return

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    # exp file to get meta
    exp = get_exp(False)
    exp.argparse(hparams)
    exp.save()

    # exp file to get weights
    checkpoint = ModelCheckpoint(save_dir)

    trainer_options = dict(show_progress_bar=False,
                           max_nb_epochs=1,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           experiment=exp,
                           gpus=[0, 1],
                           distributed_backend='ddp')

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = load_model(exp,
                                  save_dir,
                                  on_gpu=True,
                                  module_class=LightningTestModel)

    # run test set
    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    run_prediction(model.test_dataloader, pretrained_model)

    # test we have good test accuracy
    clear_save_dir()
示例#11
0
def test_testtube_pickle():
    """Verify that pickling a trainer containing a test tube logger works"""
    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    logger = get_test_tube_logger(False)
    logger.log_hyperparams(hparams)
    logger.save()

    trainer_options = dict(max_nb_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
示例#12
0
def test_custom_logger(tmpdir):
    class CustomLogger(LightningLoggerBase):
        def __init__(self):
            super().__init__()
            self.hparams_logged = None
            self.metrics_logged = None
            self.finalized = False

        @rank_zero_only
        def log_hyperparams(self, params):
            self.hparams_logged = params

        @rank_zero_only
        def log_metrics(self, metrics, step_num):
            self.metrics_logged = metrics

        @rank_zero_only
        def finalize(self, status):
            self.finalized_status = status

        @property
        def name(self):
            return "name"

        @property
        def version(self):
            return "1"

    hparams = testing_utils.get_hparams()
    model = LightningTestModel(hparams)

    logger = CustomLogger()

    trainer_options = dict(
        max_nb_epochs=1,
        train_percent_check=0.01,
        logger=logger,
        default_save_path=tmpdir
    )

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)
    assert result == 1, "Training failed"
    assert logger.hparams_logged == hparams
    assert logger.metrics_logged != {}
    assert logger.finalized_status == "success"
示例#13
0
def test_running_test_pretrained_model_dp():
    tutils.reset_seed()

    """Verify test() on pretrained model"""
    if not tutils.can_run_gpu_test():
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    # logger file to get meta
    logger = tutils.get_test_tube_logger(False)

    # logger file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(
        show_progress_bar=True,
        max_nb_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=checkpoint,
        logger=logger,
        gpus=[0, 1],
        distributed_backend='dp'
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = tutils.load_model(logger.experiment,
                                         trainer.checkpoint_callback.filepath,
                                         module_class=LightningTestModel)

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_test_acc(new_trainer)
    tutils.clear_save_dir()
示例#14
0
def test_amp_single_gpu(tmpdir):
    """Make sure DDP + AMP work."""
    tutils.reset_seed()

    if not tutils.can_run_gpu_test():
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(default_save_path=tmpdir,
                           show_progress_bar=True,
                           max_epochs=1,
                           gpus=1,
                           distributed_backend='ddp',
                           use_amp=True)

    tutils.run_model_test(trainer_options, model)
示例#15
0
def test_amp_gpu_ddp_slurm_managed(tmpdir):
    """Make sure DDP + AMP work."""
    if not tutils.can_run_gpu_test():
        return

    tutils.reset_seed()

    # simulate setting slurm flags
    tutils.set_random_master_port()
    os.environ['SLURM_LOCALID'] = str(0)

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(show_progress_bar=True,
                           max_epochs=1,
                           gpus=[0],
                           distributed_backend='ddp',
                           use_amp=True)

    # exp file to get meta
    logger = tutils.get_test_tube_logger(tmpdir, False)

    # exp file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    # add these to the trainer options
    trainer_options['checkpoint_callback'] = checkpoint
    trainer_options['logger'] = logger

    # fit model
    trainer = Trainer(**trainer_options)
    trainer.is_slurm_managing_tasks = True
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'amp + ddp model failed to complete'

    # test root model address
    assert trainer.resolve_root_node_address('abc') == 'abc'
    assert trainer.resolve_root_node_address('abc[23]') == 'abc23'
    assert trainer.resolve_root_node_address('abc[23-24]') == 'abc23'
    assert trainer.resolve_root_node_address(
        'abc[23-24, 45-40, 40]') == 'abc23'
示例#16
0
def test_no_amp_single_gpu(tmpdir):
    """Make sure DDP + AMP work."""
    tutils.reset_seed()

    if not tutils.can_run_gpu_test():
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(default_save_path=tmpdir,
                           show_progress_bar=True,
                           max_num_epochs=1,
                           gpus=1,
                           distributed_backend='dp',
                           use_amp=True)

    with pytest.raises((MisconfigurationException, ModuleNotFoundError)):
        tutils.run_model_test(trainer_options, model)
示例#17
0
def test_testtube_pickle(tmpdir):
    """Verify that pickling a trainer containing a test tube logger works."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    logger = tutils.get_test_tube_logger(tmpdir, False)
    logger.log_hyperparams(hparams)
    logger.save()

    trainer_options = dict(max_num_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
示例#18
0
def test_amp_gpu_ddp():
    """
    Make sure DDP + AMP work
    :return:
    """
    if not can_run_gpu_test():
        return

    os.environ['MASTER_PORT'] = str(np.random.randint(12000, 19000, 1)[0])

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(show_progress_bar=True,
                           max_nb_epochs=1,
                           gpus=2,
                           distributed_backend='ddp',
                           use_amp=True)

    run_gpu_model_test(trainer_options, model, hparams)
示例#19
0
def test_mlflow_logger(tmpdir):
    """Verify that basic functionality of mlflow logger works."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    mlflow_dir = os.path.join(tmpdir, "mlruns")
    logger = MLFlowLogger("test",
                          tracking_uri=f"file:{os.sep * 2}{mlflow_dir}")

    trainer_options = dict(default_save_path=tmpdir,
                           max_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    print('result finished')
    assert result == 1, "Training failed"
示例#20
0
def test_testtube_logger():
    """verify that basic functionality of test tube logger works"""

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    logger = get_test_tube_logger(False)
    logger.log_hyperparams(hparams)
    logger.save()

    trainer_options = dict(max_nb_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    assert result == 1, "Training failed"

    clear_save_dir()
def test_simple_cpu(tmpdir):
    """Verify continue training session on CPU."""
    tutils.reset_seed()

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    # logger file to get meta
    trainer_options = dict(
        default_save_path=tmpdir,
        max_epochs=1,
        val_percent_check=0.1,
        train_percent_check=0.1,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # traning complete
    assert result == 1, 'amp + ddp model failed to complete'
示例#22
0
def test_amp_gpu_ddp():
    """
    Make sure DDP + AMP work
    :return:
    """
    if not can_run_gpu_test():
        return

    reset_seed()
    set_random_master_port()

    hparams = get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(show_progress_bar=True,
                           max_nb_epochs=1,
                           gpus=2,
                           distributed_backend='ddp',
                           use_amp=True)

    run_gpu_model_test(trainer_options, model, hparams)
def test_running_test_pretrained_model():
    """Verify test() on pretrained model"""
    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    # exp file to get meta
    exp = get_exp(False)
    exp.argparse(hparams)
    exp.save()

    # exp file to get weights
    checkpoint = ModelCheckpoint(save_dir)

    trainer_options = dict(
        show_progress_bar=False,
        max_nb_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=checkpoint,
        experiment=exp
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = load_model(
        exp, save_dir, on_gpu=False, module_class=LightningTestModel
    )

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    assert_ok_test_acc(new_trainer)
    clear_save_dir()
示例#24
0
def test_load_model_from_checkpoint(tmpdir):
    tutils.reset_seed()
    """Verify test() on pretrained model"""
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(
        show_progress_bar=False,
        max_epochs=2,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=ModelCheckpoint(tmpdir, save_top_k=-1),
        logger=False,
        default_save_path=tmpdir,
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'

    # load last checkpoint
    last_checkpoint = os.path.join(trainer.checkpoint_callback.filepath,
                                   "_ckpt_epoch_1.ckpt")
    if not os.path.isfile(last_checkpoint):
        last_checkpoint = os.path.join(trainer.checkpoint_callback.filepath,
                                       "_ckpt_epoch_0.ckpt")
    pretrained_model = LightningTestModel.load_from_checkpoint(last_checkpoint)

    # test that hparams loaded correctly
    for k, v in vars(hparams).items():
        assert getattr(pretrained_model.hparams, k) == v

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_model_acc(new_trainer)
示例#25
0
def test_testtube_logger():
    """
    verify that basic functionality of test tube logger works
    """
    reset_seed()
    hparams = testing_utils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = testing_utils.init_save_dir()

    logger = testing_utils.get_test_tube_logger(False)

    trainer_options = dict(max_nb_epochs=1,
                           train_percent_check=0.01,
                           logger=logger)

    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    assert result == 1, "Training failed"

    testing_utils.clear_save_dir()
示例#26
0
def test_amp_single_gpu():
    """
    Make sure DDP + AMP work
    :return:
    """
    testing_utils.reset_seed()

    if not testing_utils.can_run_gpu_test():
        return

    hparams = testing_utils.get_hparams()
    model = LightningTestModel(hparams)

    trainer_options = dict(
        show_progress_bar=True,
        max_nb_epochs=1,
        gpus=1,
        distributed_backend='ddp',
        use_amp=True
    )

    testing_utils.run_gpu_model_test(trainer_options, model, hparams)
def test_running_test_pretrained_model():
    reset_seed()
    """Verify test() on pretrained model"""
    hparams = get_hparams()
    model = LightningTestModel(hparams)

    save_dir = init_save_dir()

    # logger file to get meta
    logger = get_test_tube_logger(False)
    logger.log_hyperparams(hparams)
    logger.save()

    # logger file to get weights
    checkpoint = ModelCheckpoint(save_dir)

    trainer_options = dict(show_progress_bar=False,
                           max_nb_epochs=1,
                           train_percent_check=0.4,
                           val_percent_check=0.2,
                           checkpoint_callback=checkpoint,
                           logger=logger)

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = load_model(logger.experiment,
                                  save_dir,
                                  module_class=LightningTestModel)

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    assert_ok_test_acc(new_trainer)
    clear_save_dir()
def test_running_test_pretrained_model(tmpdir):
    tutils.reset_seed()

    """Verify test() on pretrained model"""
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tmpdir

    # logger file to get meta
    logger = tutils.get_test_tube_logger(save_dir, False)

    # logger file to get weights
    checkpoint = tutils.init_checkpoint_callback(logger)

    trainer_options = dict(
        show_progress_bar=False,
        max_nb_epochs=4,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=checkpoint,
        logger=logger
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = tutils.load_model(
        logger.experiment, trainer.checkpoint_callback.filepath, module_class=LightningTestModel
    )

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_test_acc(new_trainer)
示例#29
0
def test_mlflow_pickle(tmpdir):
    """Verify that pickling trainer with mlflow logger works."""
    tutils.reset_seed()

    try:
        from pytorch_lightning.logging import MLFlowLogger
    except ModuleNotFoundError:
        return

    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    mlflow_dir = os.path.join(tmpdir, "mlruns")

    logger = MLFlowLogger("test", f"file://{mlflow_dir}")

    trainer_options = dict(max_num_epochs=1, logger=logger)

    trainer = Trainer(**trainer_options)
    pkl_bytes = pickle.dumps(trainer)
    trainer2 = pickle.loads(pkl_bytes)
    trainer2.logger.log_metrics({"acc": 1.0})
示例#30
0
def test_load_model_from_checkpoint():
    tutils.reset_seed()

    """Verify test() on pretrained model"""
    hparams = tutils.get_hparams()
    model = LightningTestModel(hparams)

    save_dir = tutils.init_save_dir()

    trainer_options = dict(
        show_progress_bar=False,
        max_nb_epochs=1,
        train_percent_check=0.4,
        val_percent_check=0.2,
        checkpoint_callback=True,
        logger=False,
        default_save_path=save_dir
    )

    # fit model
    trainer = Trainer(**trainer_options)
    result = trainer.fit(model)

    # correct result and ok accuracy
    assert result == 1, 'training failed to complete'
    pretrained_model = LightningTestModel.load_from_checkpoint(
        os.path.join(trainer.checkpoint_callback.filepath, "_ckpt_epoch_0.ckpt")
    )

    # test that hparams loaded correctly
    for k, v in vars(hparams).items():
        assert getattr(pretrained_model.hparams, k) == v

    new_trainer = Trainer(**trainer_options)
    new_trainer.test(pretrained_model)

    # test we have good test accuracy
    tutils.assert_ok_test_acc(new_trainer)
    tutils.clear_save_dir()