Example #1
0
                                         download=True,
                                         transform=transform)
train_loader = torch.utils.data.DataLoader(mnist_train,
                                           batch_size=batch_size,
                                           shuffle=True)
# mnist_test = torchvision.datasets.EMNIST('./EMNIST_data', train=False, download=True, transform=transform, split="letters")
# mnist_test = torchvision.datasets.EMNIST('./EMNIST_data', train=False, download=True, transform=transform, split="letters")
# test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size,  shuffle=True)

pretrained_generator = ConditionalGenerator()
pretrained_generator.load_state_dict(torch.load(pretrained_generator_filepath))

generator = Generator()
discriminator = Discriminator()
pretrained_discriminator = Discriminator()
pretrained_discriminator.load_state_dict(
    torch.load(pretrained_discriminator_filepath))

generator.deconv1 = pretrained_generator.input_layer1
# generator.deconv1.requires_grad = False
generator.deconv2 = pretrained_generator.input_layer2
# generator.deconv2.requires_grad = False

if __name__ == "__main__":
    d_filename = "testD"
    g_filename = "testG"
    filename = "control"
    filenames = []
    num_epochs = 10
    random_lrs = get_random_params(.00002, .0002, 50)
    run_stats = []
    for lr in random_lrs:
Example #2
0
netG = Generator(nc, nz, ngf, ndf, ngpu).to(device)
netD = Discriminator(nc, nz, ngf, ndf, ngpu).to(device)

# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG, list(range(ngpu)))
    netD = nn.DataParallel(netD, list(range(ngpu)))
    
# Print the model
print(netG)
print(netD)

# Load the model
checkpoint = torch.load(args.checkpoint)
netG.load_state_dict(checkpoint['state_dict'][0])
netD.load_state_dict(checkpoint['state_dict'][1])
netG.eval()
netD.eval()

# set random seed
manualSeed = 777
torch.manual_seed(manualSeed)
np.random.seed(manualSeed)

# Create batch of latent vectors that we will use to visualize the progression of the generator
fixed_noise = torch.randn(32, nz, 1, 1, device=device)

print("Starting inference...")

# Check how the generator is doing by saving G's output on fixed_noise
with torch.no_grad():