def main():
    config = {
        'name': '9L Conv Exponential Spline MNIST (lr1e-3)',
        'eval_epochs': 1,
        'sample_epochs': 1,
        'log_interval': 100,
        'lr': 1e-3,
        'num_layers': 9,
        'batch_size': 100,
        'modified_grad': False,
        'add_recon_grad': False,
        'sym_recon_grad': False,
        'activation': 'Spline',
        'recon_loss_weight': 1.0,
        'log_timing': True
    }

    train_loader, val_loader, test_loader = load_data(batch_size=config['batch_size'])

    model = create_model(num_layers=config['num_layers'], 
                         sym_recon_grad=config['sym_recon_grad'],
                         activation=config['activation'],
                         recon_loss_weight=config['recon_loss_weight']).to('cuda')

    optimizer = optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()
def main():
    config = {
        'name': 'Timing Experiment SNF FC',
        'eval_epochs': 1,
        'sample_epochs': 1,
        'log_interval': 10000,
        'lr': 1e-4,
        'batch_size': 100,
        'modified_grad': True,
        'add_recon_grad': True,
        'sym_recon_grad': False,
        'only_R_recon': False,
        'actnorm': False,
        'split_prior': False,
        'activation': 'None',
        'log_timing': True,
        'epochs': 100
    }

    train_loader, val_loader, test_loader = load_data(
        batch_size=config['batch_size'])

    model = create_model(data_size=(1, 28, 28), layer='fc').to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()
Example #3
0
def run_timing_experiment(name, snf_name, config, sz, m, results):
    train_loader, val_loader, test_loader = load_data(
        batch_size=config['batch_size'],
        im_size=sz,
        n_train=50_000,
        n_val=100,
        n_test=100)
    model = create_model(data_size=sz, layer=m).to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)
    experiment.run()
    mean_time = experiment.summary['Batch Time Mean']
    std_time = experiment.summary['Batch Time Std']

    print(f"{name}: {mean_time} +/- {std_time}")

    results[f'{m} {snf_name}']['n_params'].append(sz[0] * sz[1] * sz[2])
    results[f'{m} {snf_name}']['mean'].append(mean_time)
    results[f'{m} {snf_name}']['std'].append(std_time)

    return results
Example #4
0
def main():
    config = {
        'name': '2L-4K-16W Glow SNF (5x5 Kernel) MNIST w/ GECO',
        'eval_epochs': 1,
        'sample_epochs': 1,
        'log_interval': 100,
        'lr': 1e-3,
        'num_blocks': 2,
        'block_size': 4,
        'width': 16,
        'batch_size': 100,
        'modified_grad': True,
        'add_recon_grad': True,
        'sym_recon_grad': False,
        'actnorm': True,
        'split_prior': True,
        'activation': 'None',
        'recon_loss_weight': 1.0,
        'recon_loss_lr': 1e-3,
        'recon_alpha': 0.9,
        'sample_true_inv': True,
        'plot_recon': True,
        'vis_epochs': 1,
        'log_timing': False,
        'epochs': 1000
    }

    train_loader, val_loader, test_loader = load_data(
        data_aug=False, batch_size=config['batch_size'])

    model = create_model(num_blocks=config['num_blocks'],
                         block_size=config['block_size'],
                         width=config['width'],
                         sym_recon_grad=config['sym_recon_grad'],
                         actnorm=config['actnorm'],
                         split_prior=config['split_prior'],
                         recon_loss_weight=config['recon_loss_weight'],
                         recon_loss_lr=config['recon_loss_lr'],
                         recon_alpha=config['recon_alpha']).to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()
Example #5
0
def main():
    config = {
        'name': '3L-48K Glow Exact Imagenet32',
        'eval_epochs': 1,
        'sample_epochs': 1,
        'log_interval': 100,
        'lr': 1e-3,
        'num_blocks': 3,
        'block_size': 48,
        'batch_size': 64,
        'modified_grad': False,
        'add_recon_grad': False,
        'sym_recon_grad': False,
        'actnorm': True,
        'split_prior': True,
        'activation': 'None',
        'recon_loss_weight': 0.0,
        'sample_true_inv': False,
        'plot_recon': False,
        'grad_clip_norm': 10_000,
        'warmup_epochs': 0
    }

    train_loader, val_loader, test_loader = load_data(
        data_aug=False,
        resolution=32,
        data_dir='data/imagenet',
        batch_size=config['batch_size'])

    model = create_model(
        num_blocks=config['num_blocks'],
        block_size=config['block_size'],
        sym_recon_grad=config['sym_recon_grad'],
        actnorm=config['actnorm'],
        split_prior=config['split_prior'],
        recon_loss_weight=config['recon_loss_weight']).to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()
def main():
    config = {
        'name': '2L-16K Glow SNF(1x1) recon 100x MNIST (lr1e-3)',
        'eval_epochs': 1,
        'sample_epochs': 1,
        'log_interval': 100,
        'lr': 1e-3,
        'num_blocks': 2,
        'block_size': 16,
        'batch_size': 100,
        'modified_grad': True,
        'add_recon_grad': True,
        'sym_recon_grad': False,
        'actnorm': True,
        'split_prior': True,
        'activation': 'None',
        'recon_loss_weight': 100.0,
        'sample_true_inv': True,
        'plot_recon': True
    }

    train_loader, val_loader, test_loader = load_data(
        batch_size=config['batch_size'])

    model = create_model(
        num_blocks=config['num_blocks'],
        block_size=config['block_size'],
        sym_recon_grad=config['sym_recon_grad'],
        actnorm=config['actnorm'],
        split_prior=config['split_prior'],
        recon_loss_weight=config['recon_loss_weight']).to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer, step_size=1, gamma=1.0)

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()
        'warmup_epochs': 0,
        'step_epochs': 1,
        'step_gamma': 1.0,
    }

    train_loader, val_loader, test_loader = load_data(
        data_aug=False,
        resolution=32,
        data_dir='data/imagenet',
        batch_size=config['batch_size'])

    model = create_model(
        num_blocks=config['num_blocks'],
        block_size=config['block_size'],
        sym_recon_grad=config['sym_recon_grad'],
        actnorm=config['actnorm'],
        split_prior=config['split_prior'],
        recon_loss_weight=config['recon_loss_weight']).to('cuda')

    optimizer = optim.Adam(model.parameters(),
                           lr=config['lr'],
                           betas=(0.9, 0.999))
    scheduler = StepLR(optimizer,
                       step_size=config['step_epochs'],
                       gamma=config['step_gamma'])

    experiment = Experiment(model, train_loader, val_loader, test_loader,
                            optimizer, scheduler, **config)

    experiment.run()