Пример #1
0
def test_gan(tmpdir):
    reset_seed()

    model = GAN(data_dir=tmpdir)
    trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
    trainer.fit(model)
    trainer.test(model)
Пример #2
0
def test_ae(tmpdir):
    reset_seed()

    model = AE(data_dir=tmpdir, batch_size=2)
    trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
    trainer.fit(model)
    trainer.test(model)
def test_closed_form_lwca_lr_with_nz_start_lr_nz_eta_min(tmpdir):
    reset_seed()

    warmup_start_lr = 0.009
    base_lr = 0.07
    eta_min = 0.003
    warmup_epochs = 15
    max_epochs = 115
    multiplier = 32

    test_lr_scheduler = TestLRScheduler(base_lr=base_lr, multiplier=multiplier)
    scheduler = LinearWarmupCosineAnnealingLR(
        optimizer=test_lr_scheduler.optimizer,
        warmup_epochs=warmup_epochs,
        max_epochs=max_epochs,
        warmup_start_lr=warmup_start_lr,
        eta_min=eta_min,
    )

    closed_form_scheduler = LinearWarmupCosineAnnealingLR(
        optimizer=test_lr_scheduler.closed_form_opt,
        warmup_epochs=warmup_epochs,
        max_epochs=max_epochs,
        warmup_start_lr=warmup_start_lr,
        eta_min=eta_min,
    )

    test_lr_scheduler._test_against_closed_form(
        scheduler, closed_form_scheduler, epochs=max_epochs
    )
Пример #4
0
def test_ae(tmpdir):
    reset_seed()

    model = BasicAE()
    trainer = pl.Trainer(fast_dev_run=True)
    trainer.fit(model)
    trainer.test()
def test_lars_momentum_greater_than_zero(tmpdir):
    reset_seed()

    model = LitMNIST()
    with pytest.raises(ValueError, match='Invalid momentum.*'):
        opt = LARS(model.parameters(), lr=0.003, momentum=-0.01)

    opt = LARS(model.parameters(), lr=0.003, momentum=0.01)
def test_lars_eta_greater_than_zero(tmpdir):
    reset_seed()

    model = LitMNIST()
    with pytest.raises(ValueError, match='Invalid LARS coefficient.*'):
        opt = LARS(model.parameters(), lr=0.003, eta=-0.01)

    opt = LARS(model.parameters(), lr=0.003, eta=0.01)
def test_lars_lr_greater_than_zero(tmpdir):
    reset_seed()

    model = LitMNIST()
    with pytest.raises(ValueError, match='Invalid learning rate.*'):
        opt = LARS(model.parameters(), lr=-0.5)

    opt = LARS(model.parameters(), lr=0.003)
def test_lars_weight_decay_greater_than_zero(tmpdir):
    reset_seed()

    model = LitMNIST()
    with pytest.raises(ValueError, match='Invalid weight_decay.*'):
        opt = LARS(model.parameters(), lr=0.003, weight_decay=-0.01)

    opt = LARS(model.parameters(), lr=0.003, weight_decay=0.01)
Пример #9
0
def test_amdim(tmpdir):
    reset_seed()

    model = AMDIM(data_dir=tmpdir, batch_size=2, online_ft=True, encoder='resnet18')
    trainer = pl.Trainer(overfit_batches=2, max_epochs=1, default_root_dir=tmpdir)
    trainer.fit(model)
    loss = trainer.callback_metrics['loss']

    assert loss > 0
Пример #10
0
def test_vae(tmpdir):
    reset_seed()

    model = VAE(data_dir=tmpdir, batch_size=2)
    trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
    trainer.fit(model)
    results = trainer.test(model)[0]
    loss = results['test_loss']

    assert loss > 0, 'VAE failed'
Пример #11
0
def test_vae(tmpdir):
    reset_seed()

    model = BasicVAE()
    trainer = pl.Trainer(fast_dev_run=True)
    trainer.fit(model)
    trainer.test()
    loss = trainer.callback_metrics['loss']

    assert loss > 0, 'VAE failed'
Пример #12
0
def test_vae(tmpdir):
    reset_seed()

    model = VAE(data_dir=tmpdir, batch_size=2)
    trainer = pl.Trainer(fast_dev_run=True, default_root_dir=tmpdir)
    trainer.fit(model)
    trainer.test(model)
    loss = trainer.callback_metrics['loss']

    assert loss > 0, 'VAE failed'
Пример #13
0
def test_simclr(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(tmpdir, num_workers=0)
    datamodule.train_transforms = SimCLRTrainDataTransform(32)
    datamodule.val_transforms = SimCLREvalDataTransform(32)

    model = SimCLR(data_dir=tmpdir, batch_size=2, datamodule=datamodule, online_ft=True)
    trainer = pl.Trainer(overfit_batches=2, max_epochs=1, default_root_dir=tmpdir)
    trainer.fit(model)
    loss = trainer.callback_metrics['loss']

    assert loss > 0
Пример #14
0
def test_moco(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(tmpdir, num_workers=0)
    datamodule.train_transforms = Moco2TrainCIFAR10Transforms()
    datamodule.val_transforms = Moco2EvalCIFAR10Transforms()

    model = MocoV2(data_dir=tmpdir, batch_size=2, datamodule=datamodule, online_ft=True)
    trainer = pl.Trainer(overfit_batches=2, max_epochs=1, default_root_dir=tmpdir, callbacks=[MocoLRScheduler()])
    trainer.fit(model)
    loss = trainer.callback_metrics['loss']

    assert loss > 0
Пример #15
0
def test_cpcv2(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(data_dir=tmpdir, num_workers=0)
    datamodule.train_transforms = CPCTrainTransformsCIFAR10()
    datamodule.val_transforms = CPCEvalTransformsCIFAR10()

    model = CPCV2(encoder='resnet18', data_dir=tmpdir, batch_size=2, online_ft=True, datamodule=datamodule)
    trainer = pl.Trainer(overfit_batches=2, max_epochs=1, default_root_dir=tmpdir)
    trainer.fit(model)
    loss = trainer.callback_metrics['loss']

    assert loss > 0
def test_amdim(tmpdir):
    reset_seed()

    model = AMDIM(data_dir=tmpdir,
                  batch_size=2,
                  online_ft=True,
                  encoder='resnet18')
    trainer = pl.Trainer(fast_dev_run=True,
                         max_epochs=1,
                         default_root_dir=tmpdir)
    trainer.fit(model)
    loss = trainer.progress_bar_dict['loss']

    assert float(loss) > 0
Пример #17
0
def test_mnist(tmpdir):
    reset_seed()

    model = LitMNIST(data_dir=tmpdir)
    trainer = pl.Trainer(limit_train_batches=0.01,
                         limit_val_batches=0.01,
                         max_epochs=1,
                         limit_test_batches=0.01,
                         default_root_dir=tmpdir)
    trainer.fit(model)
    trainer.test(model)
    loss = trainer.callback_metrics['loss']

    assert loss <= 2.0, 'mnist failed'
Пример #18
0
def test_basic_ae_encoder(tmpdir):
    reset_seed()

    hidden_dim = 128
    latent_dim = 2
    width = height = 28
    batch_size = 16
    channels = 1

    encoder = AEEncoder(hidden_dim, latent_dim, width, height)
    x = torch.randn(batch_size, channels, width, height)
    z = encoder(x)

    assert z.shape == (batch_size, latent_dim)
Пример #19
0
def test_mnist(tmpdir):
    reset_seed()

    params = {'hidden_dim': 128, 'batch_size': 32, 'learning_rate': 0.001}
    model = LitMNISTModel(Namespace(**params))
    trainer = pl.Trainer(train_percent_check=0.01,
                         val_percent_check=0.01,
                         max_epochs=1,
                         test_percent_check=0.01)
    trainer.fit(model)
    trainer.test()
    loss = trainer.callback_metrics['loss']

    assert loss <= 2.0, 'mnist failed'
def test_lwca_lr(tmpdir):
    reset_seed()

    warmup_start_lr = 0.0
    base_lr = 0.4
    eta_min = 0.0
    warmup_epochs = 6
    max_epochs = 15
    multiplier = 10

    # define target schedule
    targets = []

    # param-group1
    warmup_lr_schedule = np.linspace(warmup_start_lr, base_lr, warmup_epochs)
    iters = np.arange(max_epochs - warmup_epochs)
    cosine_lr_schedule = np.array(
        [
            eta_min + 0.5 * (base_lr - eta_min) * (
                1 + math.cos(math.pi * t / (max_epochs - warmup_epochs))
            ) for t in iters
        ]
    )
    lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))
    targets.append(list(lr_schedule))

    # param-group2
    base_lr2 = base_lr * multiplier
    warmup_lr_schedule = np.linspace(warmup_start_lr, base_lr2, warmup_epochs)
    cosine_lr_schedule = np.array(
        [
            eta_min + 0.5 * (base_lr2 - eta_min) * (
                1 + math.cos(math.pi * t / (max_epochs - warmup_epochs))
            ) for t in iters
        ]
    )
    lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))
    targets.append(list(lr_schedule))

    test_lr_scheduler = TestLRScheduler(base_lr=base_lr, multiplier=multiplier)
    scheduler = LinearWarmupCosineAnnealingLR(
        optimizer=test_lr_scheduler.optimizer,
        warmup_epochs=warmup_epochs,
        max_epochs=max_epochs,
        warmup_start_lr=warmup_start_lr,
        eta_min=eta_min,
    )

    test_lr_scheduler._test_lr(scheduler, targets, epochs=max_epochs)
def test_simclr(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(tmpdir, num_workers=0, batch_size=2)
    datamodule.train_transforms = SimCLRTrainDataTransform(32)
    datamodule.val_transforms = SimCLREvalDataTransform(32)

    model = SimCLR(batch_size=2, num_samples=datamodule.num_samples)
    trainer = pl.Trainer(fast_dev_run=True,
                         max_epochs=1,
                         default_root_dir=tmpdir)
    trainer.fit(model, datamodule)
    loss = trainer.progress_bar_dict['loss']

    assert float(loss) > 0
Пример #22
0
def test_byol(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(data_dir=tmpdir,
                                   num_workers=0,
                                   batch_size=2)
    datamodule.train_transforms = CPCTrainTransformsCIFAR10()
    datamodule.val_transforms = CPCEvalTransformsCIFAR10()

    model = BYOL(data_dir=tmpdir, batch_size=2, datamodule=datamodule)
    trainer = pl.Trainer(fast_dev_run=True,
                         max_epochs=1,
                         default_root_dir=tmpdir,
                         max_steps=2)
    trainer.fit(model)
    loss = trainer.progress_bar_dict['loss']

    assert float(loss) < 1.0
def test_moco(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(tmpdir, num_workers=0, batch_size=2)
    datamodule.train_transforms = Moco2TrainCIFAR10Transforms()
    datamodule.val_transforms = Moco2EvalCIFAR10Transforms()

    model = MocoV2(data_dir=tmpdir,
                   batch_size=2,
                   datamodule=datamodule,
                   online_ft=True)
    trainer = pl.Trainer(fast_dev_run=True,
                         max_epochs=1,
                         default_root_dir=tmpdir,
                         callbacks=[MocoLRScheduler()])
    trainer.fit(model)
    loss = trainer.progress_bar_dict['loss']

    assert float(loss) > 0
Пример #24
0
def test_basic_vae_components(tmpdir):
    reset_seed()

    hidden_dim = 128
    latent_dim = 2
    width = height = 28
    batch_size = 16
    channels = 1

    enc = Encoder(hidden_dim, latent_dim, channels, width, height)
    x = torch.randn(batch_size, channels, width, height)
    mu, sigma = enc(x)

    assert mu.shape == sigma.shape

    dec = Decoder(hidden_dim, latent_dim, width, height, channels)
    decoded_x = dec(mu)

    assert decoded_x.view(-1).shape == x.view(-1).shape
def test_linear_regression_model(tmpdir):
    reset_seed()

    # --------------------
    # numpy data
    # --------------------
    X = np.array([[1.0, 1], [1, 2], [2, 2], [2, 3]])
    y = np.dot(X, np.array([1.0, 2])) + 3
    y = y[:, np.newaxis]
    loader = DataLoader(SklearnDataset(X, y), batch_size=2)

    model = LinearRegression(input_dim=2, learning_rate=1.0)
    trainer = pl.Trainer(max_epochs=200,
                         default_root_dir=tmpdir,
                         progress_bar_refresh_rate=0)
    trainer.fit(model, loader, loader)

    coeffs = model.linear.weight.detach().numpy().flatten()
    assert len(coeffs) == 2
    assert coeffs[0] == 1 and coeffs[1] == 2
    trainer.test(model, loader)
Пример #26
0
def test_cpcv2(tmpdir):
    reset_seed()

    datamodule = CIFAR10DataModule(data_dir=tmpdir,
                                   num_workers=0,
                                   batch_size=2)
    datamodule.train_transforms = CPCTrainTransformsCIFAR10()
    datamodule.val_transforms = CPCEvalTransformsCIFAR10()

    model = CPCV2(encoder='resnet18',
                  data_dir=tmpdir,
                  batch_size=2,
                  online_ft=True,
                  datamodule=datamodule)
    trainer = pl.Trainer(fast_dev_run=True,
                         max_epochs=1,
                         default_root_dir=tmpdir)
    trainer.fit(model)
    loss = trainer.progress_bar_metrics['val_nce']

    assert loss > 0
def test_dataloader(tmpdir):
    reset_seed()

    X = np.random.rand(5, 2)
    y = np.random.rand(5)
    x_val = np.random.rand(2, 2)
    y_val = np.random.rand(2)
    x_test = np.random.rand(1, 2)
    y_test = np.random.rand(1)

    shuffled_X, shuffled_y = sk_shuffle(X, y, random_state=1234)

    # -----------------------------
    # train
    # -----------------------------
    loaders = SklearnDataModule(X=X,
                                y=y,
                                val_split=0.2,
                                test_split=0.2,
                                random_state=1234)
    train_loader = loaders.train_dataloader()
    val_loader = loaders.val_dataloader()
    test_loader = loaders.test_dataloader()
    assert np.all(train_loader.dataset.X == shuffled_X[2:])
    assert np.all(val_loader.dataset.X == shuffled_X[0])
    assert np.all(test_loader.dataset.X == shuffled_X[1])
    assert np.all(train_loader.dataset.Y == shuffled_y[2:])

    # -----------------------------
    # train + val
    # -----------------------------
    loaders = SklearnDataModule(X=X,
                                y=y,
                                x_val=x_val,
                                y_val=y_val,
                                test_split=0.2,
                                random_state=1234)
    train_loader = loaders.train_dataloader()
    val_loader = loaders.val_dataloader()
    test_loader = loaders.test_dataloader()
    assert np.all(train_loader.dataset.X == shuffled_X[1:])
    assert np.all(val_loader.dataset.X == x_val)
    assert np.all(test_loader.dataset.X == shuffled_X[0])

    # -----------------------------
    # train + test
    # -----------------------------
    loaders = SklearnDataModule(X=X,
                                y=y,
                                x_test=x_test,
                                y_test=y_test,
                                val_split=0.2,
                                random_state=1234)
    train_loader = loaders.train_dataloader()
    val_loader = loaders.val_dataloader()
    test_loader = loaders.test_dataloader()
    assert np.all(train_loader.dataset.X == shuffled_X[1:])
    assert np.all(val_loader.dataset.X == shuffled_X[0])
    assert np.all(test_loader.dataset.X == x_test)

    # -----------------------------
    # train + val + test
    # -----------------------------
    loaders = SklearnDataModule(X,
                                y,
                                x_val,
                                y_val,
                                x_test,
                                y_test,
                                random_state=1234)
    train_loader = loaders.train_dataloader()
    val_loader = loaders.val_dataloader()
    test_loader = loaders.test_dataloader()
    assert np.all(train_loader.dataset.X == shuffled_X)
    assert np.all(val_loader.dataset.X == x_val)
    assert np.all(test_loader.dataset.X == x_test)
Пример #28
0
def test_gan(tmpdir):
    reset_seed()

    model = BasicGAN()
    trainer = pl.Trainer(train_percent_check=0.01, max_epochs=1)
    trainer.fit(model)