Exemplo n.º 1
0
def compute_likelihoods(args, model, dset, weights_name):
    if dset == "mnist":
        dataset = sample_mnist(args.img_size)
    elif dset == "kmnist":
        dataset = sample_kmnist(args.img_size)
    elif dset == "fmnist":
        dataset = sample_fmnist(args.img_size)
    elif dset == "notmnist":
        dataset = sample_data_one_channel("notMNIST_large/", args.img_size)

    n_bins = 2.**args.n_bits

    log_likelihoods = []
    for i, datapoint in enumerate(dataset):
        image, _ = datapoint
        image = image.to(device)

        log_p, logdet, _ = model(image + torch.rand_like(image) / n_bins)
        logdet = logdet.mean()
        loss, log_p, log_det = likelihood_loss(log_p, logdet, args.img_size,
                                               n_bins)
        log_likelihoods.append(log_p.item() + log_det.item())

        print('#{} Loss: {}; logP: {}; logdet: {}'.format(
            i, loss.item(), log_p.item(), log_det.item()))

        if i % 20 == 0:
            np.save(dset + "_" + weights_name + "_likelihoods",
                    np.array(log_likelihoods))

    np.save(dset + "_" + weights_name + "_likelihoods",
            np.array(log_likelihoods))
Exemplo n.º 2
0
def main():
    utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
    print(args)

    # Basic Setup
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    torch.cuda.set_device(2)
    cudnn.benchmark = True
    cudnn.enabled = True

    n_channels = 3
    n_bins = 2.**args.n_bits
    approx_samples = 4

    # Define model
    model_single = Network(n_channels,
                           args.n_flow,
                           args.n_block,
                           n_bins,
                           affine=args.affine,
                           conv_lu=not args.no_lu)
    model = nn.DataParallel(model_single, device_ids=[2, 3])
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)
    dataset = iter(sample_cifar10(args.batch, args.img_size))

    # Sample generated images
    z_sample = []
    z_shapes = calc_z_shapes(n_channels, args.img_size, args.n_flow,
                             args.n_block)
    for z in z_shapes:
        z_new = torch.randn(args.n_sample, *z) * args.temp
        z_sample.append(z_new.to(device))

    with tqdm(range(args.iter)) as pbar:
        for i in pbar:
            # Training procedure
            model.train()

            # Get a random minibatch from the search queue with replacement
            input, _ = next(dataset)
            input = Variable(input,
                             requires_grad=False).cuda(non_blocking=True)
            input = input.repeat(approx_samples, 1, 1, 1)

            log_p, logdet, _ = model(input + torch.rand_like(input) / n_bins)

            loss, _, _ = likelihood_loss(log_p, logdet, args.img_size, n_bins)

            loss_variance = likelihood_loss_variance(log_p, logdet,
                                                     args.img_size, n_bins,
                                                     approx_samples)

            loss = loss + loss_variance

            # Optimize model
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pbar.set_description("Loss: {}".format(loss.item()))

            # Save generated samples
            if i % 100 == 0:
                with torch.no_grad():
                    tvutils.save_image(
                        model_single.reverse(z_sample).cpu().data,
                        "{}/samples/{}.png".format(args.save,
                                                   str(i + 1).zfill(6)),
                        normalize=False,
                        nrow=10,
                    )

            # Save checkpoint
            if i % 1000 == 0:
                model_single.genotype()
                torch.save(
                    model.state_dict(),
                    "{}/checkpoint/model_{}.pt".format(args.save,
                                                       str(i + 1).zfill(6)))

            # Save latest weights
            utils.save(model, os.path.join(args.save, 'latest_weights.pt'))
Exemplo n.º 3
0
def main():
    utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
    print(args)

    seed = random.randint(1, 100000000)
    print(seed)

    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

    torch.cuda.set_device(args.gpu)
    cudnn.benchmark = True
    cudnn.enabled = True

    n_channels = 3
    n_bins = 2.**args.n_bits

    # Define model and loss criteria
    model = SearchNetwork(n_channels,
                          args.n_flow,
                          args.n_block,
                          n_bins,
                          affine=args.affine,
                          conv_lu=not args.no_lu)
    model = nn.DataParallel(model, [args.gpu])
    model.load_state_dict(
        torch.load("architecture.pt", map_location="cuda:{}".format(args.gpu)))
    model = model.module
    genotype = model.sample_architecture()

    with open(args.save + '/genotype.pkl', 'wb') as fp:
        pickle.dump(genotype, fp)

    model_single = EnsembleNetwork(n_channels,
                                   args.n_flow,
                                   args.n_block,
                                   n_bins,
                                   genotype,
                                   affine=args.affine,
                                   conv_lu=not args.no_lu)
    model = model_single
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)

    dataset = iter(sample_cifar10(args.batch, args.img_size))

    # Sample generated images
    z_sample = []
    z_shapes = calc_z_shapes(n_channels, args.img_size, args.n_flow,
                             args.n_block)
    for z in z_shapes:
        z_new = torch.randn(args.n_sample, *z) * args.temp
        z_sample.append(z_new.to(device))

    with tqdm(range(args.iter)) as pbar:
        for i in pbar:
            # Training procedure
            model.train()

            # Get a random minibatch from the search queue with replacement
            input, _ = next(dataset)
            input = Variable(input,
                             requires_grad=False).cuda(non_blocking=True)

            log_p, logdet, _ = model(input + torch.rand_like(input) / n_bins)

            logdet = logdet.mean()
            loss, _, _ = likelihood_loss(log_p, logdet, args.img_size, n_bins)

            # Optimize model
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            pbar.set_description("Loss: {}".format(loss.item()))

            # Save generated samples
            if i % 100 == 0:
                with torch.no_grad():
                    tvutils.save_image(
                        model_single.reverse(z_sample).cpu().data,
                        "{}/samples/{}.png".format(args.save,
                                                   str(i + 1).zfill(6)),
                        normalize=False,
                        nrow=10,
                    )

            # Save checkpoint
            if i % 1000 == 0:
                utils.save(model, os.path.join(args.save, 'latest_weights.pt'))