コード例 #1
0
                    default=1000000,
                    help="Number of training iterations")
parser.add_argument('--iter_save',
                    type=int,
                    default=10000,
                    help="Save model every n iterations")
parser.add_argument('--run',
                    type=int,
                    default=0,
                    help="Run ID. In case you want to run replicates")
args = parser.parse_args()
layout = [('model={:s}', 'fsvae'), ('run={:04d}', args.run)]
model_name = '_'.join([t.format(v) for (t, v) in layout])
pprint(vars(args))
print('Model name:', model_name)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_loader, labeled_subset, test_set = ut.get_svhn_data(device)
fsvae = FSVAE(name=model_name).to(device)
writer = ut.prepare_writer(model_name, overwrite_existing=True)

train(model=fsvae,
      train_loader=train_loader,
      labeled_subset=labeled_subset,
      device=device,
      y_status='fullsup',
      tqdm=tqdm.tqdm,
      writer=writer,
      iter_max=args.iter_max,
      iter_save=args.iter_save)
コード例 #2
0
def run(args, verbose=False):
    layout = [
        ('{:s}', "vae2"),
        ('{:s}', args.model),
        # ('x{:02d}',  24 if args.hourly==1 else 96),
        # ('z{:02d}',  args.z),
        ('k{:02d}', args.k),
        ('iw{:02d}', args.iw),
        ('vp{:02d}', args.var_pen),
        ('lr{:.4f}', args.lr),
        ('epo{:03d}', args.num_epochs),
        ('run{:02d}', args.run)
    ]
    model_name = 'car' + '_'.join([t.format(v) for (t, v) in layout])
    if verbose: pprint(vars(args))
    print('Model name:', model_name)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # cloud
    # root_dir = "../data/data15_final"
    # Oskar
    root_dir = "../data/CS236/data60/split" if (
        args.hourly == 1) else "../data/CS236/data15_final"

    # load train loader anyways - to get correct shift_scale values.
    train_loader = torch.utils.data.DataLoader(
        LoadDataset2(root_dir=root_dir,
                     mode='train',
                     shift_scale=None,
                     filter_ev=False,
                     log_car=(args.log_ev == 1),
                     smooth=args.smooth),
        batch_size=args.batch,
        shuffle=True,
    )
    shift_scale = train_loader.dataset.shift_scale

    # load use-model
    use_model = run_vae2.main({
        "mode": 'load',
        "model": 'ff-s-dec',  # hardcode
        "lr": 0.01,  # hardcode
        "k": 1,  # hardcode
        "iw": 0,  # hardcode
        "num_epochs": 20,  # hardcode
        "var_pen": 1,  # hardcode
        "run": 1,  # hardcode
    })

    if args.k > 1:
        print('36')
        model = GMVAE2CAR(
            nn=args.model,
            name=model_name,
            z_dim=args.z,
            x_dim=24 if args.hourly == 1 else 96,
            c_dim=use_model.z_dim,
            warmup=(args.warmup == 1),
            var_pen=args.var_pen,
            use_model=use_model,
            k=args.k,
            y_dim=train_loader.dataset.dim_meta,
        ).to(device)
    else:
        model = VAE2CAR(
            nn=args.model,
            name=model_name,
            z_dim=args.z,
            x_dim=24 if args.hourly == 1 else 96,
            c_dim=use_model.z_dim,
            warmup=(args.warmup == 1),
            var_pen=args.var_pen,
            use_model=use_model,
            y_dim=train_loader.dataset.dim_meta,
        ).to(device)

    if args.mode == 'train':
        split_set = LoadDataset2(
            root_dir=root_dir,
            mode='val',
            shift_scale=shift_scale,
            filter_ev=False,
            log_car=(args.log_ev == 1),
            smooth=None,
        )
        val_set = {
            "x": torch.FloatTensor(split_set.car).to(device),
            "y": torch.FloatTensor(split_set.meta).to(device),
            "c": torch.FloatTensor(split_set.other).to(device),
        }
        _ = ut.prepare_writer(model_name, overwrite_existing=True)

        # make sure not to train the first VAE
        if not (args.finetune == 1):
            for p in model.use_model.parameters():
                p.requires_grad = False

        train2(
            model=model,
            train_loader=train_loader,
            val_set=val_set,
            tqdm=tqdm.tqdm,
            lr=args.lr,
            lr_gamma=args.lr_gamma,
            lr_milestone_every=args.lr_every,
            iw=args.iw,
            num_epochs=args.num_epochs,
            is_car_model=True,
        )

    else:
        ut.load_model_by_name(model, global_step=args.num_epochs)

    if args.mode in ['val', 'test']:
        model.set_to_eval()
        split_set = LoadDataset2(
            root_dir=root_dir,
            mode=args.mode,
            shift_scale=shift_scale,
            filter_ev=False,
            log_car=(args.log_ev == 1),
            smooth=None,
        )
        val_set = {
            "x": torch.FloatTensor(split_set.car).to(device),
            "y": torch.FloatTensor(split_set.meta).to(device),
            "c": torch.FloatTensor(split_set.other).to(device),
        }
        summaries = OrderedDict({
            'epoch': args.num_epochs,
            'loss': 0,
            'kl_z': 0,
            'rec_mse': 0,
            'rec_var': 0,
            'loss_type': 0,
            'lr': args.lr,
            'var_pen': model.var_pen,
        })

        ut.save_latent(model, val_set, mode=args.mode, is_car_model=True)

        ut.evaluate_lower_bound2(model,
                                 val_set,
                                 run_iwae=True,
                                 mode=args.mode,
                                 repeats=10,
                                 summaries=copy.deepcopy(summaries))

    if args.mode == 'plot':
        make_image_load(model, shift_scale["car"], (args.log_ev == 1))
        # make_image_load_day(model, shift_scale["car"], (args.log_ev==1))
        make_image_load_z(model, shift_scale["car"], (args.log_ev == 1))
        make_image_load_z_use(model, shift_scale["car"], (args.log_ev == 1))

    if args.mode == 'load':
        if verbose: print(model)
    return model
コード例 #3
0
def run(args, verbose=False):
    layout = [
        ('{:s}', "vae2"),
        ('{:s}', args.model),
        # ('x{:02d}',  24 if args.hourly==1 else 96),
        # ('z{:02d}',  args.z),
        ('k{:02d}', args.k),
        ('iw{:02d}', args.iw),
        ('vp{:02d}', args.var_pen),
        ('lr{:.4f}', args.lr),
        ('epo{:03d}', args.num_epochs),
        ('run{:02d}', args.run),
    ]
    model_name = '_'.join([t.format(v) for (t, v) in layout])
    if verbose: pprint(vars(args))
    print('Model name:', model_name)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # cloud
    # root_dir = "../data/data15_final"
    # Oskar
    root_dir = "../data/CS236/data60/split" if (
        args.hourly == 1) else "../data/CS236/data15_final"
    # Will
    #root_dir = '/Users/willlauer/Desktop/latent_load_gen/data/split'

    # load train loader anyways - to get correct shift_scale values.
    train_loader = torch.utils.data.DataLoader(
        LoadDataset2(root_dir=root_dir,
                     mode='train',
                     shift_scale=None,
                     filter_ev=False,
                     smooth=args.smooth),
        batch_size=args.batch,
        shuffle=True,
    )
    shift_scale = train_loader.dataset.shift_scale

    if args.k > 1:
        model = GMVAE2(
            nn=args.model,
            z_dim=args.z,
            name=model_name,
            x_dim=24 if args.hourly == 1 else 96,
            warmup=(args.warmup == 1),
            var_pen=args.var_pen,
            k=args.k,
            y_dim=train_loader.dataset.dim_meta,
        ).to(device)
    else:
        model = VAE2(nn=args.model,
                     z_dim=args.z,
                     name=model_name,
                     x_dim=24 if args.hourly == 1 else 96,
                     y_dim=train_loader.dataset.dim_meta,
                     warmup=(args.warmup == 1),
                     var_pen=args.var_pen).to(device)

    if args.mode == 'train':
        split_set = LoadDataset2(
            root_dir=root_dir,
            mode='val',
            shift_scale=shift_scale,
            filter_ev=False,
            smooth=None,
        )
        val_set = {
            "x": torch.FloatTensor(split_set.other).to(device),
            "y": torch.FloatTensor(split_set.meta).to(device),
            "c": None,
        }
        # maybe in future use Tensorboard?
        _ = ut.prepare_writer(model_name, overwrite_existing=True)
        train2(
            model=model,
            train_loader=train_loader,
            val_set=val_set,
            tqdm=tqdm.tqdm,
            # writer=writer,
            lr=args.lr,
            lr_gamma=args.lr_gamma,
            lr_milestone_every=args.lr_every,
            iw=args.iw,
            num_epochs=args.num_epochs)

    else:
        ut.load_model_by_name(model, global_step=args.num_epochs)

    if args.mode in ['val', 'test']:
        model.set_to_eval()
        split_set = LoadDataset2(
            root_dir=root_dir,
            mode=args.mode,
            shift_scale=shift_scale,
            filter_ev=False,
            smooth=None,
        )
        val_set = {
            "x": torch.FloatTensor(split_set.other).to(device),
            "y": torch.FloatTensor(split_set.meta).to(device),
            "c": None,
        }
        summaries = OrderedDict({
            'epoch': args.num_epochs,
            'loss': 0,
            'kl_z': 0,
            'rec_mse': 0,
            'rec_var': 0,
            'loss_type': 0,
            'lr': args.lr,
            'var_pen': model.var_pen,
        })

        ut.save_latent(model, val_set, mode=args.mode, is_car_model=False)

        ut.evaluate_lower_bound2(model,
                                 val_set,
                                 run_iwae=True,
                                 mode=args.mode,
                                 repeats=10,
                                 summaries=summaries)

    if args.mode == 'plot':

        # print(shift_scale["other"])
        # print(shift_scale)

        make_image_load(model, shift_scale["other"], (args.log_ev == 1))
        # make_image_load_day(model, shift_scale["other"], (args.log_ev==1))
        make_image_load_z(model, shift_scale["other"], (args.log_ev == 1))

    if args.mode == 'load':
        if verbose: print(model)
    return model
コード例 #4
0
ファイル: run_fsvae.py プロジェクト: vukien95/cs236
                    help="Save model every n iterations")
parser.add_argument('--run',
                    type=int,
                    default=0,
                    help="Run ID. In case you want to run replicates")
parser.add_argument('--sample', type=int, default=0, help="Sample")
args = parser.parse_args()
layout = [('model={:s}', 'fsvae'), ('run={:04d}', args.run)]
model_name = '_'.join([t.format(v) for (t, v) in layout])
pprint(vars(args))
print('Model name:', model_name)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
fsvae = FSVAE(name=model_name).to(device)
writer = ut.prepare_writer(model_name, overwrite_existing=args.sample == 0)

if args.sample:
    with torch.no_grad():
        ut.load_model_by_name(fsvae, global_step=args.iter_max)
        images = fsvae.sample_clipped_x(20)
        save_image(images.view(200, 3, 32, 32), 'fsvae_sample.png', nrow=20)
else:
    train_loader, labeled_subset, test_set = ut.get_svhn_data(device)
    train(model=fsvae,
          train_loader=train_loader,
          labeled_subset=labeled_subset,
          device=device,
          y_status='fullsup',
          tqdm=tqdm.tqdm,
          writer=writer,