print_every = config_d['print_every'] z_size = config_d['noise_dim'] n_critic = config_d['critic_iter'] lr = config_d['lr'] beta1 = config_d['beta1'] beta2 = config_d['beta2'] D = Discriminator() G = Generator(z_size=z_size) print(D) print(G) if cuda: G.cuda() D.cuda() print('GPU available for training. Models moved to GPU') else: print('Training on CPU.') d_optimizer = optim.Adam(D.parameters(), lr=lr, betas=[beta1, beta2]) g_optimizer = optim.Adam(G.parameters(), lr=lr, betas=[beta1, beta2]) losses_train = [] losses_val = [] reg_lambda = config_d['gp_lambda'] Nbatch = sdat_train.get_batch_size() N_train_btot = sdat_train.get_Nbatches_tot() N_val_btot = sdat_val.get_Nbatches_tot()
# os.makedirs(save_path, exist_ok=True) root_path = "/Users/yuming/OneDrive/sync/semester/ML/hw/project/dataset/" data_path = root_path + "faces/" save_path = root_path + "fake-faces/" save_csv_loss_g = root_path + "csv/loss_g.csv" save_csv_loss_d = root_path + "csv/loss_d.csv" # Initialize generator and discriminator generator = Generator(opt.latent_dim, img_shape) discriminator = Discriminator(img_shape) cuda_enabled = torch.cuda.is_available() if cuda_enabled: generator.cuda() discriminator.cuda() Tensor = torch.cuda.FloatTensor else: Tensor = torch.FloatTensor # Tensor = torch.cuda.FloatTensor if cuda_enabled else torch.FloatTensor image_raw_data = fetch_dataset(data_path) data_loader = DataLoader(image_raw_data, batch_size=opt.batch_size, shuffle=True) # Optimizers optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)) optimizer_D = torch.optim.Adam(discriminator.parameters(),