Esempio n. 1
0
def main():
    """
    """
    args = get_args()
    np.random.seed(args.seed)
    torch.random.manual_seed(args.seed)

    gan = DCGAN()
    gan.load_model(dict_path=args.gan_model)

    vae = VAE()
    vae.load_model(dict_path=args.vae_model)

    # ----------------------------------------------------------------------------------

    # first save some random samples from both the models and also from original dataset
    samples_dir = os.path.join(args.out_dir, "visual_samples/")
    os.makedirs(samples_dir, exist_ok=True)

    # # draw 3 8X8 grid of images from each of 3 sources
    for i in range(1, 4):
        # original svhn dataset samples
        svhn_data_loader = get_dataloader("svhn_train", batch_size=64)
        orig_imgs, _ = next(iter(svhn_data_loader))
        save_image((orig_imgs * 0.5 + 0.5),
                   samples_dir + f"orig_image_grid{i}.png")
        # gan samples
        gan_imgs = gan.sample(num_images=64)
        save_image(gan_imgs, samples_dir + f"gan_image_grid{i}.png")
        # gan samples
        vae_imgs = vae.sample(num_images=64)
        save_image(vae_imgs, samples_dir + f"vae_image_grid{i}.png")
    # ----------------------------------------------------------------------------------

    # # next we want to see if the model has learned a disentangled representation in thelatent space
    disentg_dir = os.path.join(args.out_dir, "disentangled_repr/")
    os.makedirs(disentg_dir, exist_ok=True)
    imgs_per_row = 12
    eps = 15
    noise = torch.randn(imgs_per_row, 100)

    for tag, model in [("gan", gan), ("vae", vae)]:
        imgs_orig = model.sample(noise=noise)
        imgs_list = [imgs_orig, torch.zeros(imgs_per_row, 3, 32, 32)]
        interesting_dims = [14, 46, 51] if tag == "gan" else [12, 18, 70]
        # for i in tqdm(range(100)):
        for i in interesting_dims:
            noise_perturbed = noise.clone()
            noise_perturbed[:, i] += eps
            imgs_list.append(model.sample(noise=noise_perturbed))

        imgs_joined = torch.cat(imgs_list, dim=0)
        save_image(
            imgs_joined,
            disentg_dir +
            f"{tag}_disentang_3dims_seed{args.seed}_eps{eps}.png",
            nrow=imgs_per_row,
        )

    # ----------------------------------------------------------------------------------

    # Compare between interpolations in the data space and in the latent space
    interpolations_dir = os.path.join(args.out_dir, "interpolations/")
    os.makedirs(interpolations_dir, exist_ok=True)
    z = torch.randn(2, 100)  # two noises which will be interpolated
    alpha = torch.linspace(0.0, 1.0,
                           11)  # .unsqueeze(1)  # unsqueeze for mat-mul
    z_interpolations = torch.ger(alpha, z[0]) + torch.ger((1 - alpha), z[1])
    alpha = alpha.view(-1, 1, 1,
                       1)  # so as to broadcast across 3-dimensional images
    for tag, model in [("gan", gan), ("vae", vae)]:
        x = model.sample(noise=z)
        imgs_x_interpolations = alpha * x[0] + (1 - alpha) * x[1]
        imgs_z_interpolations = model.sample(noise=z_interpolations)
        imgs_joined = torch.cat([imgs_x_interpolations, imgs_z_interpolations],
                                dim=0)
        save_image(
            imgs_joined,
            interpolations_dir + f"{tag}_interpolations_s{args.seed}.png",
            nrow=11,
        )
for n, step_frames in zip(['linear', 'spherical'],
                          [linear_step_frames, spherical_step_frames]):

    for model in models:
        gan = DCGAN(name=model, reload=True)
        gan.load_weights(250)

        if start_seed is None:
            start_seed, target_seed = gan.generate_random_noise(2)

        for s, val in enumerate([s / steps for s in range(steps + 1)]):

            if n == 'spherical':
                seed = np.expand_dims(slerp(val, start_seed, target_seed),
                                      axis=0)
            elif n == 'linear':
                seed = np.expand_dims((val * target_seed), axis=0)

            if s not in step_frames:
                step_frames[s] = []

            step_frames[s].append(gan.sample(seed)[0])

    save_dir = os.path.join(medium_dir, 'traversal', n)
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)

    for s in range(steps + 1):
        save_single_image(os.path.join(save_dir, f'{s}.png'),
                          np.concatenate(step_frames[s], axis=1))