Beispiel #1
0
def verka_human(enc):
    sum_loss = 0
    n = len(LazyLoader.human36().test_dataset)
    for i, batch in enumerate(LazyLoader.human36().test_loader):
        data = batch['A'].cuda()
        landmarks = batch["paired_B"].cuda()
        pred = enc(data)["mes"].coord
        # eye_dist = landmarks[:, 45] - landmarks[:, 36]
        # eye_dist = eye_dist.pow(2).sum(dim=1).sqrt()
        sum_loss += ((pred - landmarks).pow(2).sum(dim=2).sqrt().mean(
            dim=1)).sum().item()
    print("test brule_loss: ", sum_loss / n)
    return sum_loss / n
Beispiel #2
0
hm_discriminator.load_state_dict(weights["dh"])
hm_discriminator = hm_discriminator.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator,
                                               StyleGANLoss(discriminator_img),
                                               (0.001 / 4, 0.0015 / 4))
gan_model_obratno = StyleGanModel[HG_skeleton](hg,
                                               StyleGANLoss(hm_discriminator),
                                               (2e-5, 0.0015 / 4))

style_opt = optim.Adam(enc_dec.style_encoder.parameters(), lr=1e-5)

writer = SummaryWriter(f"{Paths.default.board()}/human{int(time.time())}")
WR.writer = writer

batch = next(LazyLoader.human36().loader_train_inf)
test_img = batch["A"].cuda()
test_landmarks = torch.clamp(next(
    LazyLoader.human_landmarks(args.data_path).loader_train_inf).cuda(),
                             max=1)
test_hm = heatmapper.forward(test_landmarks).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

psp_loss = PSPLoss(id_lambda=0).cuda()
mes_loss = MesBceL2Loss(heatmapper, bce_coef=10000 / 2, l2_coef=100)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)
hm_accumulator = Accumulator(hg, decay=0.99, write_every=100)

for i in range(10000):
Beispiel #3
0
enc_dec = StyleGanAutoEncoder(
    hm_nc=measure_size, image_size=image_size).load_state_dict(weights).cuda()

discriminator_img = Discriminator(image_size)
discriminator_img.load_state_dict(weights['di'])
discriminator_img = discriminator_img.cuda()

gan_model_tuda = StyleGanModel[HeatmapToImage](enc_dec.generator,
                                               StyleGANLoss(discriminator_img),
                                               (0.001, 0.0015))

writer = SummaryWriter(f"{Paths.default.board()}/human_gan{int(time.time())}")
WR.writer = writer

batch = next(LazyLoader.human36(use_mask=True).loader_train_inf)
test_img = batch["A"].cuda()
test_landmarks = torch.clamp(batch["paired_B"].cuda(), max=1)
test_hm = heatmapper.forward(test_landmarks).detach()
test_noise = mixing_noise(batch_size, 512, 0.9, device)

image_accumulator = Accumulator(enc_dec.generator, decay=0.99, write_every=100)

mes_loss = MesBceL2Loss(heatmapper, bce_coef=10000 / 2, l2_coef=100)

for i in range(100000):

    WR.counter.update(i)

    batch = next(LazyLoader.human36(use_mask=True).loader_train_inf)
    real_img = batch["A"].cuda()