running_results['batch_sizes'] += batch_size ############################ # (1) Update D network: maximize D(x)-1-D(G(z)) ########################### real_img = Variable(target) if torch.cuda.is_available(): real_img = real_img.cuda() z = Variable(data) if torch.cuda.is_available(): z = z.cuda() fake_img = netG(z) fake_out = netD(fake_img).mean() g_loss = generator_criterion(fake_out, fake_img, real_img) netG.zero_grad() g_loss.backward() optimizerG.step() real_out = netD(real_img).mean() fake_out = netD(fake_img.detach()).mean() d_loss = 1 - real_out + fake_out netD.zero_grad() d_loss.backward(retain_graph=True) optimizerD.step() running_results['g_loss'] += g_loss.item() * batch_size running_results['d_loss'] += d_loss.item() * batch_size running_results['d_score'] += real_out.item() * batch_size running_results['g_score'] += fake_out.item() * batch_size
plt.close() list_loss = [] iteration = 0 print("Starting Training Loop...") for epoch in range(0, max_epochs): for i, data in enumerate(dataloader, 0): # ----------------------------------------------------------- # Initial batch data_A = data[0].to(device) data_B = data[1].to(device) real_batch_size = data_A.size(0) # ----------------------------------------------------------- # Update network: net.zero_grad() fake_B = net(data_A) # Calculate loss loss = criterion(fake_B, data_B) # Update G loss.backward() optimizer.step() # ----------------------------------------------------------- # Output training stats with torch.no_grad(): if i % 50 == 0: print('[%d/%d][%2d/%d]\tLoss: %.4f' %