params['dis_c_dim'] = 10 params['num_con_c'] = 2 # Plot the training images. sample_batch = next(iter(dataloader)) plt.figure(figsize=(10, 10)) plt.axis("off") plt.imshow(np.transpose(vutils.make_grid( sample_batch[0].to(device)[ : 100], nrow=10, padding=2, normalize=True).cpu(), (1, 2, 0))) plt.savefig('output/Training Images {}'.format(params['dataset'])) plt.close('all') # Initialise the network. netG = Generator().to(device) netG.apply(weights_init) discriminator = Discriminator().to(device) discriminator.apply(weights_init) netD = DHead().to(device) netD.apply(weights_init) netQ = QHead().to(device) netQ.apply(weights_init) # Print model summary logFile = open("output/log.txt", "a") if params['print_model_description']: print(netG) logFile.write(netG.__repr__() + '\n') noise_shape = noise_sample(params['num_dis_c'], params['dis_c_dim'], params['num_con_c'], params['num_z'], params['batch_size'], device)[0].shape summary(netG, input_size=(noise_shape[1], noise_shape[2], noise_shape[3]), log_file=logFile) print(discriminator) logFile.write(discriminator.__repr__() + '\n')
plt.axis("off") plt.imshow( np.transpose( vutils.make_grid(sample_batch[0].to(device)[:100], nrow=10, padding=2, normalize=True).cpu(), (1, 2, 0))) plt.savefig('Training Images {}'.format(params['dataset'])) plt.close('all') # Initialise the network. netG = Generator().to(device) netG.apply(weights_init) print(netG) discriminator = Discriminator().to(device) discriminator.apply(weights_init) print(discriminator) netD = DHead().to(device) netD.apply(weights_init) print(netD) netQ = QHead().to(device) netQ.apply(weights_init) print(netQ) # Loss for discrimination between real and fake images. criterionD = nn.BCELoss() # Loss for discrete latent code. criterionQ_dis = nn.CrossEntropyLoss()
# Plot the training images. sample_batch = next(iter(dataloader)) plt.figure(figsize=(10, 10)) plt.axis("off") plt.imshow(np.transpose(vutils.make_grid( sample_batch[0].to(device)[ : 100], nrow=10, padding=2, normalize=True).cpu(), (1, 2, 0))) plt.savefig('Training Images {}'.format(params['dataset'])) plt.close('all') # Initialise the network. netG = Generator().to(device) netG.apply(weights_init) print(netG) discriminator = Discriminator().to(device) discriminator.apply(weights_init) print(discriminator) netD = DHead().to(device) netD.apply(weights_init) print(netD) netQ = QHead().to(device) netQ.apply(weights_init) print(netQ) # Loss for discrimination between real and fake images. criterionD = nn.BCELoss() # Loss for discrete latent code. criterionQ_dis = nn.CrossEntropyLoss()
# loading saved weights state_dict1 = torch.load('checkpoint/model_final_FashionMNIST') # Set the device to run on: GPU or CPU. device = torch.device("cuda:0" if(torch.cuda.is_available()) else "cpu") # Get the 'params' dictionary from the loaded state_dict. params1 = state_dict1['params'] # Initialise the network. netG = Generator().to(device) # netG.apply(weights_init) netG.load_state_dict(state_dict1['netG']) print(netG) discriminator = Discriminator().to(device) # discriminator.apply(weights_init) discriminator.load_state_dict(state_dict1['discriminator']) print(discriminator) netD = DHead().to(device) # netD.apply(weights_init) netD.load_state_dict(state_dict1['netD']) print(netD) netQ = QHead().to(device) # netQ.apply(weights_init) netQ.load_state_dict(state_dict1['netQ']) print(netQ) # Loss for discrimination between real and fake images.
# plt.imshow(np.transpose(vutils.make_grid( # sample_batch[0].to(device)[: 100], nrow=10, padding=2, # normalize=True).cpu(), (1, 2, 0))) # plt.savefig('result/Training Images {}'.format(dataset)) # plt.close('all') # Initialise the network. netG = Generator().to(device) netG.apply(weights_init) print(netG) fe = FrontEnd().to(device) fe.apply(weights_init) print(fe) netD = Discriminator().to(device) netD.apply(weights_init) print(netD) netQ = Q().to(device) netQ.apply(weights_init) print(netQ) # Loss for discrimination between real and fake images. criterionD = nn.BCELoss() # Loss for discrete latent code. criterionQ_dis = nn.CrossEntropyLoss() # Loss for continuous latent code. criterionQ_con = NegativeLogLikelihoodNormalDist() # Adam optimiser is used.
# real_batch = next(iter(dataloader)) # plt.figure(figsize=(8, 8)) # plt.axis("off") # plt.title("Training Images") # plt.imshow(np.transpose( # vutils.make_grid(real_batch[0].to(device)[:64], padding=2, # normalize=True).cpu(), (1, 2, 0))) # plt.show() # Create the dataloader dataloader = get_data(dataset, batch_size, image_size, workers) # Create the generator netG = Generator().to(device) # Create the Discriminator netD = Discriminator().to(device) # Handle multi-gpu if desired if (device.type == 'cuda') and (ngpu > 1): netG = nn.DataParallel(netG, list(range(ngpu))) netD = nn.DataParallel(netD, list(range(ngpu))) # Print the model print(netG) print(netD) # Establish convention for real and fake labels during training real_label = 1. fake_label = 0. # Initialize BCELoss function criterion = nn.BCELoss()
if (params['dataset'] == 'MNIST'): params['num_z'] = 62 params['num_dis_c'] = 1 # params['dis_c_dim'] = 9 # params['num_con_c'] = 2 lambda_res = params['lambda_res'] lambda_disc = params['lambda_disc'] lambda_cdis = params['lambda_cdis'] lambda_ccon = params['lambda_ccon'] sim_num = params['sim_num'] temp_dim = params['dis_c_dim'] # restore models: generator, discriminator, netQ discriminator = Discriminator().to(device) discriminator.load_state_dict(state_dict['discriminator']) num_z_c = params['num_z'] + params['num_dis_c'] * params['dis_c_dim'] + params[ 'num_con_c'] netG = Generator(num_z_c).to(device) netG.load_state_dict(state_dict['netG']) netQ = QHead(params['dis_c_dim'], params['num_con_c']).to(device) netQ.load_state_dict(state_dict['netQ']) netD = DHead().to(device) netD.load_state_dict(state_dict['netD']) # Loss for discrimination between real and fake images. criterionD = nn.BCELoss()