Ejemplo n.º 1
0
    print(netD)
    logFile.write(netD.__repr__() + '\n')
    #summary(netD, input_size=(1024, 4, 4), log_file=logFile) # Must set the right input size manually
    print(netQ)
    logFile.write(netQ.__repr__() + '\n')
    #summary(netQ, input_size=(1024, 4, 4), log_file=logFile) # Must set the right input size manually

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{'params': discriminator.parameters()}, {'params': netD.parameters()}],lr=params['learning_rate_D'], betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{'params': netG.parameters()}, {'params': netQ.parameters()}], lr=params['learning_rate_G'], betas=(params['beta1'], params['beta2']))

# Resume training
if args.load_path:
    checkpoint = torch.load(args.load_path)
    netG.load_state_dict(checkpoint['netG'])
    discriminator.load_state_dict(checkpoint['discriminator'])
    netD.load_state_dict(checkpoint['netD'])
    netQ.load_state_dict(checkpoint['netQ'])
    optimD.load_state_dict(checkpoint['optimD'])
    optimG.load_state_dict(checkpoint['optimG'])

# Fixed Noise  (comment dimensions are for MNIST)
z = torch.randn(100, params['num_z'], 1, 1, device=device) # tensor of random numebers. 100 x 62 x 1 x 1
fixed_noise = z
Ejemplo n.º 2
0
netQ = QHead().to(device)
netQ.apply(weights_init)
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{
    'params': discriminator.parameters()
}, {
    'params': netD.parameters()
}],
                    lr=params['learning_rateD'],
                    betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{
    'params': netG.parameters()
}, {
    'params': netQ.parameters()
}],
                    lr=params['learning_rateG'],
                    betas=(params['beta1'], params['beta2']))

# Fixed Noise
z = torch.randn(100, params['num_z'], 1, 1, device=device)
fixed_noise = z
if (params['num_dis_c'] != 0):
Ejemplo n.º 3
0
print(netD)

netQ = QHead(params['dis_c_dim'], params['num_con_c']).to(device)

netQ.apply(weights_init)
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{'params': discriminator.parameters()}, {'params': netD.parameters()}], lr=params['learning_rate'],
                    betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{'params': netG.parameters()}, {'params': netQ.parameters()}], lr=params['learning_rate'],
                    betas=(params['beta1'], params['beta2']))

# Fixed Noise
dis_c_dim_squared = dis_c_dim * dis_c_dim
z = torch.randn(dis_c_dim_squared, params['num_z'], 1, 1, device=device)
fixed_noise = z
if (params['num_dis_c'] != 0):
    idx = np.arange(params['dis_c_dim']).repeat(dis_c_dim)
    dis_c = torch.zeros(dis_c_dim_squared, params['num_dis_c'], params['dis_c_dim'], device=device)
    for i in range(params['num_dis_c']):
        dis_c[torch.arange(0, dis_c_dim_squared), i, idx] = 1.0

    dis_c = dis_c.view(dis_c_dim_squared, -1, 1, 1)