Example #1
0
plt.close('all')

# Initialise the network.
netG = Generator().to(device)
netG.apply(weights_init)
print(netG)

discriminator = Discriminator().to(device)
discriminator.apply(weights_init)
print(discriminator)

netD = DHead().to(device)
netD.apply(weights_init)
print(netD)

netQ = QHead().to(device)
netQ.apply(weights_init)
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{
    'params': discriminator.parameters()
}, {
    'params': netD.parameters()
Example #2
0
sample_batch = next(iter(dataloader))
plt.figure(figsize=(10, 10))
plt.axis("off")
plt.imshow(np.transpose(vutils.make_grid(
    sample_batch[0].to(device)[ : 100], nrow=10, padding=2, normalize=True).cpu(), (1, 2, 0)))
plt.savefig('output/Training Images {}'.format(params['dataset']))
plt.close('all')

# Initialise the network.
netG = Generator().to(device)
netG.apply(weights_init)
discriminator = Discriminator().to(device)
discriminator.apply(weights_init)
netD = DHead().to(device)
netD.apply(weights_init)
netQ = QHead().to(device)
netQ.apply(weights_init)

# Print model summary
logFile = open("output/log.txt", "a")
if params['print_model_description']:
    print(netG)
    logFile.write(netG.__repr__() + '\n')
    noise_shape = noise_sample(params['num_dis_c'], params['dis_c_dim'], params['num_con_c'], params['num_z'], params['batch_size'], device)[0].shape
    summary(netG, input_size=(noise_shape[1], noise_shape[2], noise_shape[3]), log_file=logFile)
    print(discriminator)
    logFile.write(discriminator.__repr__() + '\n')
    dataset_shape = dataloader.dataset[0][0].shape
    summary(discriminator, input_size=(dataset_shape[0], dataset_shape[1], dataset_shape[2]), log_file=logFile)
    print(netD)
    logFile.write(netD.__repr__() + '\n')
Example #3
0
plt.close('all')

# Initialise the network.
netG = Generator().to(device)
netG.apply(weights_init)
print(netG)

discriminator = Discriminator().to(device)
discriminator.apply(weights_init)
print(discriminator)

netD = DHead().to(device)
netD.apply(weights_init)
print(netD)

netQ = QHead().to(device)
netQ.apply(weights_init)
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{'params': discriminator.parameters()}, {'params': netD.parameters()}], lr=params['learning_rate'], betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{'params': netG.parameters()}, {'params': netQ.parameters()}], lr=params['learning_rate'], betas=(params['beta1'], params['beta2']))

# Fixed Noise
Example #4
0
netG = Generator().to(device)
# netG.apply(weights_init)
netG.load_state_dict(state_dict1['netG'])
print(netG)

discriminator = Discriminator().to(device)
# discriminator.apply(weights_init)
discriminator.load_state_dict(state_dict1['discriminator'])
print(discriminator)

netD = DHead().to(device)
# netD.apply(weights_init)
netD.load_state_dict(state_dict1['netD'])
print(netD)

netQ = QHead().to(device)
# netQ.apply(weights_init)
netQ.load_state_dict(state_dict1['netQ'])
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{'params': discriminator.parameters()}, {'params': netD.parameters()}], lr=params['learning_rate'], betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{'params': netG.parameters()}, {'params': netQ.parameters()}], lr=params['learning_rate'], betas=(params['beta1'], params['beta2']))
Example #5
0
# Initialise the network.
num_z_c = params['num_z'] + params['num_dis_c'] * params['dis_c_dim'] + params['num_con_c']
netG = Generator(num_z_c).to(device)

netG.apply(weights_init)
print(netG)

discriminator = Discriminator().to(device)
discriminator.apply(weights_init)
print(discriminator)

netD = DHead().to(device)
netD.apply(weights_init)
print(netD)

netQ = QHead(params['dis_c_dim'], params['num_con_c']).to(device)

netQ.apply(weights_init)
print(netQ)

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()

# Adam optimiser is used.
optimD = optim.Adam([{'params': discriminator.parameters()}, {'params': netD.parameters()}], lr=params['learning_rate'],
                    betas=(params['beta1'], params['beta2']))
optimG = optim.Adam([{'params': netG.parameters()}, {'params': netQ.parameters()}], lr=params['learning_rate'],
lambda_cdis = params['lambda_cdis']
lambda_ccon = params['lambda_ccon']
sim_num = params['sim_num']

temp_dim = params['dis_c_dim']

# restore models: generator, discriminator, netQ
discriminator = Discriminator().to(device)
discriminator.load_state_dict(state_dict['discriminator'])

num_z_c = params['num_z'] + params['num_dis_c'] * params['dis_c_dim'] + params[
    'num_con_c']
netG = Generator(num_z_c).to(device)
netG.load_state_dict(state_dict['netG'])

netQ = QHead(params['dis_c_dim'], params['num_con_c']).to(device)
netQ.load_state_dict(state_dict['netQ'])

netD = DHead().to(device)
netD.load_state_dict(state_dict['netD'])

# Loss for discrimination between real and fake images.
criterionD = nn.BCELoss()
# Loss for discrete latent code.
criterionQ_dis = nn.CrossEntropyLoss()
# Loss for continuous latent code.
criterionQ_con = NormalNLLLoss()


def res_loss(x, Gz):
    abs_sub = abs(x - Gz)