# Print Metrics
    print(f"\nEpoch: {epoch+1}/{epochs}:\
        \nTrain Loss = {train_loss}\
        \nVal Loss = {val_loss}")

################################################################################
################################## Save & Test #################################
################################################################################
# Generate Loss Graph
graphics.draw_loss(all_train_loss, all_val_loss, loss_output_path, mode="ae")

# Save Model
torch.save(model.state_dict(), model_output_path)

# Plot Animation Sample
fig, axis = graphics.make_grid(("Sample", sample), 4, 4)
plt.savefig(animation_sample_image_name)

# Create & Save Animation
Writer = animation.writers["ffmpeg"]
writer = Writer()
anim = graphics.make_animation(graphics.make_grid, all_samples)
anim.save(animation_output_path, writer=writer)

model.eval()

# Evaluate on Test Images
# Save Generated Images & Calculate Metrics
# Testing Loop - Standard
all_mse = []
all_ssim = []
        output_dim=prior_output_dim,
    )
    model_path = os.path.join(model_prefix, prior_model_name, "model.pt")
    prior.load_state_dict(torch.load(model_path, map_location=device))
    prior.eval()

    ## Setup Data
    images_to_load, ids = pick_images(data_dir, num_images * 2)
    bases, fusees = get_images(data_dir, image_size, transform, images_to_load)
    fusions = get_fusion_images(fusion_dir, image_size, transform, ids)
    fusion_sample = torch.stack((bases, fusees, fusions),
                                dim=1).flatten(end_dim=1)

    ## Plot Original Images
    caption = "Original Images"
    fig, axis = graphics.make_grid((caption, fusion_sample), 4, 3)
    plt.savefig(os.path.join(output_dir, f"{identifier}_{caption}.png"))
    print(caption)

    ## Move Data to Device
    bases = bases.to(device)
    fusees = fusees.to(device)
    fusions = fusions.to(device)

    ## Plot Reconstructions of VQ-VAE
    caption = "Reconstructed Images"
    # Get Model Outputs
    with torch.no_grad():
        recon_bases = model(bases)[1].detach().cpu()
        recon_fusees = model(fusees)[1].detach().cpu()
        recon_fusions = model(fusions)[1].detach().cpu()
Example #3
0
################################################################################
################################## Save & Test #################################
################################################################################
# Generate Loss Graph
graphics.draw_loss(all_train_loss, all_val_loss, loss_output_path, mode="vae")
graphics.draw_loss(all_train_fusion_loss,
                   all_val_fusion_loss,
                   fusion_loss_output_path,
                   mode="vae")

# Save Model
torch.save(model.state_dict(), model_output_path)

# Plot Animation Sample
fig, axis = graphics.make_grid(("Sample", sample), 4, 4)
plt.savefig(animation_sample_image_name)
fusion_sample = [x for y in fusion_sample for x in y]
fig, axis = graphics.make_grid(("Fusion Sample", fusion_sample), 4, 3)
plt.savefig(fusion_animation_sample_image_name)

# Create & Save Animation
anim = graphics.make_animation(graphics.make_grid, all_samples, width=3)
fusion_anim = graphics.make_animation(graphics.make_grid,
                                      all_fusion_samples,
                                      width=3)
anim.save(animation_output_path)
fusion_anim.save(fusion_animation_output_path)

model.eval()
Example #4
0
            "discriminator_model_state_dict": netD.state_dict(),
            "discriminator_optimizer_state_dict": optimizerD.state_dict(),
            "discriminator_loss": all_discriminator_loss,
        },
        os.path.join(model_save_dir, f"epoch_{epoch}_model.pt"),
    )
    netG.eval()
    netD.eval()
    # Generate a batch of fake images
    with torch.no_grad():
        # Generate batch of latent vectors
        noise = torch.randn(batch_size, latent_dim, 1, 1, device=device)
        # Generate fake image batch with G
        generated = netG(noise).detach().cpu()
    # Plot fake images
    fig, axis = graphics.make_grid(("Test Sample", generated), 4, 4)
    plt.savefig(os.path.join(output_dir, f"epoch_{epoch}_test_sample_output.jpg"))

################################################################################
################################## Save & Test #################################
################################################################################

# Save output graphs
graphics.draw_loss(
    all_generator_loss, all_discriminator_loss, loss_output_path, mode="gan"
)

# Create & Save Animation
anim = graphics.make_gan_animation(training_samples)
anim.save(animation_output_path)
Example #5
0
        \nTrain KL Divergence = {train_kl_d}\
        \nVal Loss = {val_loss}\
        \nVal Reconstruction Loss = {val_recon_loss}\
        \nVal KL Divergence = {val_kl_d}")

################################################################################
################################## Save & Test #################################
################################################################################
# Generate Loss Graph
graphics.draw_loss(all_train_loss, all_val_loss, loss_output_path, mode="vae")

# Save Model
torch.save(model.state_dict(), model_output_path)

# Plot Animation Sample
fig, axis = graphics.make_grid(("Sample", sample.detach().cpu()), 4, 4)
plt.savefig(animation_sample_image_name)
# Plot Fusion Animation Sample
fusion_sample = [x for y in fusion_sample.detach().cpu() for x in y]
fig, axis = graphics.make_grid(("Sample", fusion_sample), 4, 3)
plt.savefig(fusion_animation_sample_image_name)

# Create & Save Animation
anim = graphics.make_animation(graphics.make_grid, all_samples)
anim.save(animation_output_path)
# Create & Save Fusion Animation
fusion_anim = graphics.make_animation(graphics.make_grid,
                                      all_fusion_samples,
                                      width=3)
fusion_anim.save(fusion_animation_output_path)
Example #6
0
        "D": 32,
        "commitment_cost": 0.25
    },
}

transform = data.image2tensor_resize(image_size)

images_to_load, ids = pick_images(base_dir, num_images * 2)
bases, fusees = get_images(base_dir, image_size, transform, images_to_load)
fusions = get_fusion_images(base_fusion_dir, image_size, transform, ids)

fusion_sample = torch.stack((bases, fusees, fusions), dim=1).flatten(end_dim=1)

# Show base images
caption = "base"
fig, axis = graphics.make_grid((caption, fusion_sample), 4, 3)
plt.savefig(os.path.join(output_dir, f"{caption}.png"))
print(caption)

bases = bases.to(device)
fusees = fusees.to(device)
for model_name, model_parameters in model_config.items():
    # Load Model
    model_path = os.path.join(model_prefix, f"{model_name}/model.pt")
    temp_num_layers = num_layers

    model_type = "autoencoder"
    if "dual_input" in model_name:
        if "vae" in model_name:
            model_type = "dual_input_vae"
        else: