# optimizer_G の設定 #================================ optimizer_G = optim.Adam(params=model_G.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) optimizer_D = optim.Adam(params=model_D.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) #================================ # loss 関数の設定 #================================ loss_l1_fn = nn.L1Loss() loss_vgg_fn = VGGLoss(device, n_channels=3) loss_feat_fn = FeatureMatchingLoss(device, n_dis=2, n_layers_D=3) loss_adv_fn = LSGANLoss(device) #================================ # モデルの学習 #================================ print("Starting Training Loop...") n_print = 1 step = 0 for epoch in tqdm(range(args.n_epoches), desc="epoches"): for iter, inputs in enumerate( tqdm(dloader_train, desc="epoch={}".format(epoch))): model_G.train() model_D.train() # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため) if inputs["image_s"].shape[0] != args.batch_size:
#================================ # optimizer_G の設定 #================================ optimizer_G = optim.Adam(params=model_G.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) optimizer_D = optim.Adam(params=model_D.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) #================================ # loss 関数の設定 #================================ loss_l1_fn = nn.L1Loss() loss_vgg_fn = VGGLoss(device, n_channels=3) loss_adv_fn = LSGANLoss(device) #================================ # モデルの学習 #================================ print("Starting Training Loop...") n_print = 1 step = 0 for epoch in tqdm(range(args.n_epoches), desc="epoches"): for iter, inputs in enumerate( tqdm(dloader_train, desc="epoch={}".format(epoch))): model_G.train() model_D.train() # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため) if inputs["pose_parse_onehot"].shape[0] != args.batch_size:
#================================ # optimizer_G の設定 #================================ optimizer_G = optim.Adam(params=model_G.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) optimizer_D = optim.Adam(params=model_D.parameters(), lr=args.lr, betas=(args.beta1, args.beta2)) #================================ # loss 関数の設定 #================================ loss_l1_fn = nn.L1Loss() loss_vgg_fn = VGGLoss(device, n_channels=3) loss_adv_fn = LSGANLoss(device) loss_const_fn = nn.MSELoss() #================================ # モデルの学習 #================================ cutmix_fn = CutMix(prob=1.0) seed_cutmix = random.randint(0, 10000) print("Starting Training Loop...") n_print = 1 step = 0 for epoch in tqdm(range(args.n_epoches), desc="epoches"): for iter, inputs in enumerate( tqdm(dloader_train, desc="epoch={}".format(epoch))): model_G.train()