def build_model(config, from_style, to_style):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    generator_ab = ResidualGenerator(config.image_size,
                                     config.num_residual_blocks).to(device)
    generator_ba = ResidualGenerator(config.image_size,
                                     config.num_residual_blocks).to(device)
    discriminator_a = Discriminator(config.image_size).to(device)
    discriminator_b = Discriminator(config.image_size).to(device)

    generator_ab_param = glob(
        os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}",
                     f"generator_ab_{config.epoch-1}.pth"))
    generator_ba_param = glob(
        os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}",
                     f"generator_ba_{config.epoch-1}.pth"))
    discriminator_a_param = glob(
        os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}",
                     f"discriminator_a_{config.epoch-1}.pth"))
    discriminator_b_param = glob(
        os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}",
                     f"discriminator_b_{config.epoch-1}.pth"))

    print(f"[*] Load checkpoint in {config.checkpoint_dir}")
    if not os.path.exists(
            os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}")):
        os.makedirs(
            os.path.join(config.checkpoint_dir, f"{from_style}2{to_style}"))

    if len(
            os.listdir(
                os.path.join(config.checkpoint_dir,
                             f"{from_style}2{to_style}"))) == 0:
        print(f"[!] No checkpoint in {config.checkpoint_dir}")
        generator_ab.apply(weights_init)
        generator_ba.apply(weights_init)
        discriminator_a.apply(weights_init)
        discriminator_b.apply(weights_init)
    else:
        generator_ab.load_state_dict(
            torch.load(generator_ab_param[-1], map_location=device))
        generator_ba.load_state_dict(
            torch.load(generator_ba_param[-1], map_location=device))
        discriminator_a.load_state_dict(
            torch.load(discriminator_a_param[-1], map_location=device))
        discriminator_b.load_state_dict(
            torch.load(discriminator_b_param[-1], map_location=device))

    return generator_ab, generator_ba, discriminator_a, discriminator_b
Exemple #2
0

netE = Encoder(ngpu, ndf, nc).cuda()
if (device.type == 'cuda') and (ngpu > 1):
    netE = nn.DataParallel(netE, list(range(ngpu)))
netE.apply(weights_init)

netG = Decoder(ngpu, num, ngf).cuda()
if (device.type == 'cuda') and (ngpu > 1):
    netG = nn.DataParallel(netG, list(range(ngpu)))
netG.apply(weights_init)

netD = Discriminator(ngpu, ndf, nc).cuda()
if (device.type == 'cuda') and (ngpu > 1):
    netD = nn.DataParallel(netD, list(range(ngpu)))
netD.apply(weights_init)

# 损失函数和优化器
criterionE = Encoder_loss
criterionG = Decoder_loss
criterionD = nn.BCELoss()

# 优化器
optimizerE = optim.Adam(netE.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
real_label = 1
fake_label = 0
E_losses = []
G_losses = []  # 计算生成器损失()
D_losses = []