optimizer_G = optim.Adam(
                params=model_G.feature_regression.parameters(),
                lr=args.lr,
                betas=(args.beta1, args.beta2))
        else:
            optimizer_G = optim.Adam(params=model_G.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, args.beta2))

    #================================
    # loss 関数の設定
    #================================
    loss_grid_fn = TransformedGridLoss(device=device,
                                       geometric_model=args.geometric_model)
    loss_l1_fn = nn.L1Loss()
    loss_vgg_fn = VGGLoss(device=device, layids=[4])

    #================================
    # モデルの学習
    #================================
    print("Starting Training Loop...")
    n_print = 1
    step = 0
    for epoch in tqdm(range(args.n_epoches), desc="epoches"):
        for iter, inputs in enumerate(
                tqdm(dloader_train, desc="epoch={}".format(epoch))):
            model_G.train()

            # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため)
            if inputs["image_s"].shape[0] != args.batch_size:
                break
Example #2
0
    #================================
    # optimizer_G の設定
    #================================
    optimizer_G = optim.Adam(params=model_G.parameters(),
                             lr=args.lr,
                             betas=(args.beta1, args.beta2))
    optimizer_D = optim.Adam(params=model_D.parameters(),
                             lr=args.lr,
                             betas=(args.beta1, args.beta2))

    #================================
    # loss 関数の設定
    #================================
    loss_l1_fn = nn.L1Loss()
    loss_vgg_fn = VGGLoss(device, n_channels=3)
    loss_adv_fn = LSGANLoss(device)

    #================================
    # モデルの学習
    #================================
    print("Starting Training Loop...")
    n_print = 1
    step = 0
    for epoch in tqdm(range(args.n_epoches), desc="epoches"):
        for iter, inputs in enumerate(
                tqdm(dloader_train, desc="epoch={}".format(epoch))):
            model_G.train()
            model_D.train()

            # 一番最後のミニバッチループで、バッチサイズに満たない場合は無視する(後の計算で、shape の不一致をおこすため)
    #================================
    # optimizer_G の設定
    #================================
    optimizer_G = optim.Adam(params=model_G.parameters(),
                             lr=args.lr,
                             betas=(args.beta1, args.beta2))
    optimizer_D = optim.Adam(params=model_D.parameters(),
                             lr=args.lr,
                             betas=(args.beta1, args.beta2))

    #================================
    # loss 関数の設定
    #================================
    if (args.n_output_channels == 1):
        loss_l1_fn = nn.L1Loss()
        loss_vgg_fn = VGGLoss(device, n_channels=args.n_output_channels)
        loss_adv_fn = LSGANLoss(device)
    else:
        #loss_bce_fn = ParsingCrossEntropyLoss()
        loss_bce_fn = CrossEntropy2DLoss(device)

    #================================
    # 定義済みグラフ構造の取得
    #================================
    adj_matrix_cihp_to_cihp, adj_matrix_pascal_to_pascal, adj_matrix_cihp_to_pascal = get_graph_adj_matrix(
    )
    adj_matrix_cihp_to_cihp, adj_matrix_pascal_to_pascal, adj_matrix_cihp_to_pascal = adj_matrix_cihp_to_cihp.to(
        device), adj_matrix_pascal_to_pascal.to(
            device), adj_matrix_cihp_to_pascal.to(device)

    #================================