lossG.backward() optimizer_G.step() # ==================== # Save to tensorborad # ==================== if (i == 0 or (i % opt.n_display_step == 0)): board_train.add_scalar('Generater/loss_G', lossG.item(), iterations) board_train.add_scalar('Discriminator/loss_D', lossD.item(), iterations) board_train.add_scalar('Discriminator/loss_D_real', lossD_real.item(), iterations) board_train.add_scalar('Discriminator/loss_D_fake', lossD_fake.item(), iterations) board_add_image(board_train, 'fake_image', gen_imgs, iterations) # Monitor trainnig progresses print("epoch={}, iters={}, loss_G={:.5f}, loss_C={:.5f}".format( epoch, iterations, lossG, lossD)) # ============ # Save images # ============ generator.eval() with torch.no_grad(): fake_imgs = generator(z_fixed) save_image(fake_imgs[0], os.path.join(opt.dir_out, opt.exper_name) + "/fake_image_epoches{}_batch0.png".format(epoch), normalize=True)
# 勾配計算 loss_G.backward() # backward() で計算した勾配を元に、設定した optimizer に従って、重みを更新 optimizer_G.step() #==================================================== # 学習過程の表示 #==================================================== if( step == 0 or ( step % args.n_display_step == 0 ) ): board_train.add_scalar('Generater/loss_G', loss_G.item(), iterations) board_train.add_scalar('Discriminator/loss_D', loss_D.item(), iterations) board_train.add_scalar('Discriminator/loss_D_real', loss_D_real.item(), iterations) board_train.add_scalar('Discriminator/loss_D_fake', loss_D_fake.item(), iterations) board_add_image(board_train, 'fake image', G_z, iterations) print( "epoch={}, iters={}, loss_G={:.5f}, loss_D={:.5f}".format(epoch, iterations, loss_G, loss_D) ) #==================================================== # test loss の表示 #==================================================== if( step == 0 or ( step % args.n_display_test_step == 0 ) ): model_G.eval() model_D.eval() n_test_loop = 0 test_iterations = 0 loss_D_total = 0 loss_D_real_total = 0 loss_D_fake_total = 0 loss_G_total = 0
# 勾配計算 loss.backward() # backward() で計算した勾配を元に、設定した optimizer に従って、重みを更新 optimizer.step() #==================================================== # 学習過程の表示 #==================================================== if (step == 0 or (step % args.n_display_step == 0)): board_train.add_scalar('Model/loss', loss.item(), step + 1) print("step={}, loss={:.5f}".format(step + 1, loss)) board_add_image(board_train, 'input image', image, step + 1, n_max_images=4) #board_train.add_histogram('output[0]', output[0], step+1) #print( "step={}, targets=({}), output=({:.5f},{:.5f})".format(step+1, targets[0].item(), output[0,0], output[0,1]) ) #---------------------------------------------------- # 正解率を計算する。(バッチデータ) #---------------------------------------------------- # 確率値が最大のラベル 0~9 を予想ラベルとする。 # dim = 1 ⇒ 列方向で最大値をとる # Returns : (Tensor, LongTensor) _, predicts = torch.max(output.data, dim=1) if (args.debug and n_print > 0): print("predicts.shape :", predicts.shape)